summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/system.c2
-rw-r--r--drivers/atm/Kconfig1
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/base/Kconfig7
-rw-r--r--drivers/base/class.c9
-rw-r--r--drivers/base/core.c130
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dd.c4
-rw-r--r--drivers/base/devtmpfs.c5
-rw-r--r--drivers/base/firmware_class.c206
-rw-r--r--drivers/base/module.c4
-rw-r--r--drivers/base/platform.c8
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/bluetooth/btmrvl_drv.h8
-rw-r--r--drivers/bluetooth/btmrvl_main.c92
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c7
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_ll.c8
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/isicom.c8
-rw-r--r--drivers/char/n_gsm.c2763
-rw-r--r--drivers/char/serial167.c223
-rw-r--r--drivers/char/tty_buffer.c2
-rw-r--r--drivers/firmware/dcdbas.c4
-rw-r--r--drivers/firmware/dell_rbu.c10
-rw-r--r--drivers/firmware/efivars.c4
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c5
-rw-r--r--drivers/hid/Kconfig151
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/hid-3m-pct.c31
-rw-r--r--drivers/hid/hid-cando.c272
-rw-r--r--drivers/hid/hid-core.c53
-rw-r--r--drivers/hid/hid-egalax.c281
-rw-r--r--drivers/hid/hid-ids.h25
-rw-r--r--drivers/hid/hid-lg.c9
-rw-r--r--drivers/hid/hid-magicmouse.c5
-rw-r--r--drivers/hid/hid-ntrig.c526
-rw-r--r--drivers/hid/hid-picolcd.c2631
-rw-r--r--drivers/hid/hid-prodikeys.c910
-rw-r--r--drivers/hid/hid-roccat-kone.c994
-rw-r--r--drivers/hid/hid-roccat-kone.h224
-rw-r--r--drivers/hid/hid-samsung.c95
-rw-r--r--drivers/hid/hid-topseed.c38
-rw-r--r--drivers/hid/hid-wacom.c229
-rw-r--r--drivers/hid/hid-zydacron.c237
-rw-r--r--drivers/hid/hidraw.c50
-rw-r--r--drivers/hid/usbhid/hid-core.c93
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c19
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hid/usbhid/usbkbd.c17
-rw-r--r--drivers/hid/usbhid/usbmouse.c6
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c36
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c3
-rw-r--r--drivers/i2c/busses/i2c-elektor.c2
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c59
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c2
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c3
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c3
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c12
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c2
-rw-r--r--drivers/i2c/busses/i2c-parport.c2
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-s6000.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c2
-rw-r--r--drivers/i2c/busses/i2c-simtec.c3
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-stub.c9
-rw-r--r--drivers/i2c/busses/i2c-versatile.c3
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c4
-rw-r--r--drivers/i2c/busses/scx200_i2c.c2
-rw-r--r--drivers/i2c/i2c-core.c90
-rw-r--r--drivers/i2c/i2c-dev.c36
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/input/joystick/xpad.c16
-rw-r--r--drivers/input/misc/ati_remote.c12
-rw-r--r--drivers/input/misc/ati_remote2.c4
-rw-r--r--drivers/input/misc/cm109.c28
-rw-r--r--drivers/input/misc/keyspan_remote.c6
-rw-r--r--drivers/input/misc/powermate.c17
-rw-r--r--drivers/input/misc/yealink.c25
-rw-r--r--drivers/input/mouse/appletouch.c12
-rw-r--r--drivers/input/mouse/bcm5974.c24
-rw-r--r--drivers/input/serio/Kconfig16
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/ams_delta_serio.c177
-rw-r--r--drivers/input/tablet/acecad.c6
-rw-r--r--drivers/input/tablet/aiptek.c14
-rw-r--r--drivers/input/tablet/gtco.c12
-rw-r--r--drivers/input/tablet/kbtab.c6
-rw-r--r--drivers/input/tablet/wacom_sys.c8
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c8
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c17
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c14
-rw-r--r--drivers/media/dvb/dvb-usb/usb-urb.c7
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusb_dec.c6
-rw-r--r--drivers/media/video/au0828/au0828-video.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c14
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c4
-rw-r--r--drivers/media/video/gspca/benq.c4
-rw-r--r--drivers/media/video/gspca/gspca.c30
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c8
-rw-r--r--drivers/media/video/tlg2300/pd-main.c2
-rw-r--r--drivers/media/video/tlg2300/pd-video.c14
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c16
-rw-r--r--drivers/media/video/uvc/uvc_video.c4
-rw-r--r--drivers/message/fusion/mptbase.c177
-rw-r--r--drivers/message/fusion/mptbase.h5
-rw-r--r--drivers/message/fusion/mptctl.c181
-rw-r--r--drivers/message/fusion/mptfc.c22
-rw-r--r--drivers/message/fusion/mptsas.c55
-rw-r--r--drivers/message/fusion/mptsas.h2
-rw-r--r--drivers/message/fusion/mptscsih.c27
-rw-r--r--drivers/message/fusion/mptspi.c10
-rw-r--r--drivers/misc/c2port/core.c4
-rw-r--r--drivers/misc/ds1682.c6
-rw-r--r--drivers/misc/eeprom/at24.c65
-rw-r--r--drivers/misc/eeprom/at25.c6
-rw-r--r--drivers/misc/eeprom/eeprom.c3
-rw-r--r--drivers/misc/eeprom/max6875.c2
-rw-r--r--drivers/mtd/Kconfig13
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c137
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c344
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c136
-rw-r--r--drivers/mtd/chips/cfi_probe.c56
-rw-r--r--drivers/mtd/chips/cfi_util.c3
-rw-r--r--drivers/mtd/chips/fwh_lock.h6
-rw-r--r--drivers/mtd/chips/gen_probe.c15
-rw-r--r--drivers/mtd/chips/jedec_probe.c288
-rw-r--r--drivers/mtd/devices/Makefile2
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/pmc551.c4
-rw-r--r--drivers/mtd/devices/sst25l.c68
-rw-r--r--drivers/mtd/ftl.c1
-rw-r--r--drivers/mtd/inftlcore.c1
-rw-r--r--drivers/mtd/inftlmount.c7
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c79
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c7
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c16
-rw-r--r--drivers/mtd/maps/ceiva.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c3
-rw-r--r--drivers/mtd/maps/ixp4xx.c7
-rw-r--r--drivers/mtd/maps/pcmciamtd.c88
-rw-r--r--drivers/mtd/maps/physmap.c7
-rw-r--r--drivers/mtd/maps/physmap_of.c55
-rw-r--r--drivers/mtd/maps/pismo.c8
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c3
-rw-r--r--drivers/mtd/mtd_blkdevs.c335
-rw-r--r--drivers/mtd/mtdblock.c72
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdchar.c105
-rw-r--r--drivers/mtd/mtdconcat.c3
-rw-r--r--drivers/mtd/mtdcore.c285
-rw-r--r--drivers/mtd/mtdcore.h7
-rw-r--r--drivers/mtd/mtdoops.c5
-rw-r--r--drivers/mtd/mtdsuper.c18
-rw-r--r--drivers/mtd/nand/Kconfig69
-rw-r--r--drivers/mtd/nand/Makefile10
-rw-r--r--drivers/mtd/nand/alauda.c2
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/au1550nd.c12
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c3
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c29
-rw-r--r--drivers/mtd/nand/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c6
-rw-r--r--drivers/mtd/nand/denali.c2134
-rw-r--r--drivers/mtd/nand/denali.h816
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_upm.c9
-rw-r--r--drivers/mtd/nand/gpio.c12
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c917
-rw-r--r--drivers/mtd/nand/mxc_nand.c146
-rw-r--r--drivers/mtd/nand/nand_base.c387
-rw-r--r--drivers/mtd/nand/nand_bbt.c29
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h71
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/nandsim.c17
-rw-r--r--drivers/mtd/nand/nomadik_nand.c6
-rw-r--r--drivers/mtd/nand/nuc900_nand.c (renamed from drivers/mtd/nand/w90p910_nand.c)144
-rw-r--r--drivers/mtd/nand/omap2.c16
-rw-r--r--drivers/mtd/nand/orion_nand.c13
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c11
-rw-r--r--drivers/mtd/nand/r852.c1140
-rw-r--r--drivers/mtd/nand/r852.h163
-rw-r--r--drivers/mtd/nand/s3c2410.c12
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sm_common.c148
-rw-r--r--drivers/mtd/nand/sm_common.h61
-rw-r--r--drivers/mtd/nand/socrates_nand.c4
-rw-r--r--drivers/mtd/nand/tmio_nand.c14
-rw-r--r--drivers/mtd/nand/ts7250.c207
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nftlcore.c1
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/Makefile1
-rw-r--r--drivers/mtd/onenand/omap2.c12
-rw-r--r--drivers/mtd/onenand/onenand_base.c63
-rw-r--r--drivers/mtd/onenand/samsung.c1071
-rw-r--r--drivers/mtd/rfd_ftl.c1
-rw-r--r--drivers/mtd/sm_ftl.c1284
-rw-r--r--drivers/mtd/sm_ftl.h94
-rw-r--r--drivers/mtd/ssfdc.c1
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c3
-rw-r--r--drivers/mtd/tests/mtd_readtest.c3
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c3
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c3
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c3
-rw-r--r--drivers/mtd/ubi/Kconfig2
-rw-r--r--drivers/mtd/ubi/build.c60
-rw-r--r--drivers/mtd/ubi/io.c6
-rw-r--r--drivers/mtd/ubi/kapi.c6
-rw-r--r--drivers/mtd/ubi/scan.c4
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/vtbl.c4
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/3c501.c2
-rw-r--r--drivers/net/3c503.c44
-rw-r--r--drivers/net/3c505.c14
-rw-r--r--drivers/net/3c507.c5
-rw-r--r--drivers/net/3c509.c4
-rw-r--r--drivers/net/3c515.c6
-rw-r--r--drivers/net/3c523.c11
-rw-r--r--drivers/net/3c527.c6
-rw-r--r--drivers/net/3c59x.c11
-rw-r--r--drivers/net/7990.c12
-rw-r--r--drivers/net/8139cp.c9
-rw-r--r--drivers/net/8139too.c8
-rw-r--r--drivers/net/82596.c9
-rw-r--r--drivers/net/Kconfig46
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/a2065.c10
-rw-r--r--drivers/net/ac3200.c2
-rw-r--r--drivers/net/acenic.c44
-rw-r--r--drivers/net/acenic.h6
-rw-r--r--drivers/net/amd8111e.c8
-rw-r--r--drivers/net/apne.c1
-rw-r--r--drivers/net/appletalk/cops.c9
-rw-r--r--drivers/net/appletalk/ltpc.c1
-rw-r--r--drivers/net/arcnet/arcnet.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c4
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/arm/am79c961a.c7
-rw-r--r--drivers/net/arm/at91_ether.c7
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/arm/ether1.c1
-rw-r--r--drivers/net/arm/ether3.c1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c9
-rw-r--r--drivers/net/arm/ks8695net.c13
-rw-r--r--drivers/net/arm/w90p910_ether.c7
-rw-r--r--drivers/net/at1700.c11
-rw-r--r--drivers/net/atarilance.c5
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c9
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_main.c16
-rw-r--r--drivers/net/atlx/atl1.c7
-rw-r--r--drivers/net/atlx/atl2.c8
-rw-r--r--drivers/net/atlx/atlx.c6
-rw-r--r--drivers/net/atp.c10
-rw-r--r--drivers/net/au1000_eth.c262
-rw-r--r--drivers/net/au1000_eth.h4
-rw-r--r--drivers/net/ax88796.c1
-rw-r--r--drivers/net/b44.c8
-rw-r--r--drivers/net/bcm63xx_enet.c14
-rw-r--r--drivers/net/benet/be.h11
-rw-r--r--drivers/net/benet/be_cmds.c14
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c5
-rw-r--r--drivers/net/benet/be_hw.h3
-rw-r--r--drivers/net/benet/be_main.c315
-rw-r--r--drivers/net/bfin_mac.c559
-rw-r--r--drivers/net/bfin_mac.h18
-rw-r--r--drivers/net/bmac.c15
-rw-r--r--drivers/net/bnx2.c102
-rw-r--r--drivers/net/bnx2.h9
-rw-r--r--drivers/net/bnx2x.h66
-rw-r--r--drivers/net/bnx2x_link.c12
-rw-r--r--drivers/net/bnx2x_main.c1878
-rw-r--r--drivers/net/bnx2x_reg.h27
-rw-r--r--drivers/net/bonding/bond_ipv6.c9
-rw-r--r--drivers/net/bonding/bond_main.c275
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/caif/Kconfig17
-rw-r--r--drivers/net/caif/Makefile12
-rw-r--r--drivers/net/caif/caif_serial.c449
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c3
-rw-r--r--drivers/net/can/mcp251x.c16
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c1
-rw-r--r--drivers/net/can/mscan/mscan.c1
-rw-r--r--drivers/net/can/sja1000/Kconfig4
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c154
-rw-r--r--drivers/net/can/sja1000/sja1000.c23
-rw-r--r--drivers/net/can/sja1000/sja1000.h1
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c48
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c22
-rw-r--r--drivers/net/cassini.c15
-rw-r--r--drivers/net/chelsio/pm3393.c7
-rw-r--r--drivers/net/chelsio/sge.c58
-rw-r--r--drivers/net/cnic.c80
-rw-r--r--drivers/net/cnic.h10
-rw-r--r--drivers/net/cpmac.c17
-rw-r--r--drivers/net/cris/eth_v10.c8
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/cxgb3/l2t.c1
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/cxgb3/xgmac.c8
-rw-r--r--drivers/net/cxgb4/cxgb4.h9
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c102
-rw-r--r--drivers/net/cxgb4/sge.c11
-rw-r--r--drivers/net/cxgb4/t4_hw.c117
-rw-r--r--drivers/net/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/davinci_emac.c8
-rw-r--r--drivers/net/de600.c4
-rw-r--r--drivers/net/de620.c1
-rw-r--r--drivers/net/declance.c10
-rw-r--r--drivers/net/defxx.c6
-rw-r--r--drivers/net/depca.c15
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/dm9000.c50
-rw-r--r--drivers/net/dnet.c4
-rw-r--r--drivers/net/e100.c190
-rw-r--r--drivers/net/e1000/e1000.h37
-rw-r--r--drivers/net/e1000/e1000_ethtool.c89
-rw-r--r--drivers/net/e1000/e1000_hw.c356
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c429
-rw-r--r--drivers/net/e1000/e1000_osdep.h14
-rw-r--r--drivers/net/e1000/e1000_param.c112
-rw-r--r--drivers/net/e1000e/82571.c29
-rw-r--r--drivers/net/e1000e/defines.h9
-rw-r--r--drivers/net/e1000e/e1000.h26
-rw-r--r--drivers/net/e1000e/es2lan.c11
-rw-r--r--drivers/net/e1000e/ethtool.c48
-rw-r--r--drivers/net/e1000e/hw.h5
-rw-r--r--drivers/net/e1000e/ich8lan.c391
-rw-r--r--drivers/net/e1000e/lib.c60
-rw-r--r--drivers/net/e1000e/netdev.c844
-rw-r--r--drivers/net/e1000e/param.c25
-rw-r--r--drivers/net/e1000e/phy.c21
-rw-r--r--drivers/net/e2100.c1
-rw-r--r--drivers/net/eepro.c13
-rw-r--r--drivers/net/eexpress.c11
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c78
-rw-r--r--drivers/net/ehea/ehea_qmr.c43
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/cq_enet_desc.h12
-rw-r--r--drivers/net/enic/enic.h12
-rw-r--r--drivers/net/enic/enic_main.c341
-rw-r--r--drivers/net/enic/enic_res.c5
-rw-r--r--drivers/net/enic/enic_res.h1
-rw-r--r--drivers/net/enic/vnic_dev.c110
-rw-r--r--drivers/net/enic/vnic_dev.h10
-rw-r--r--drivers/net/enic/vnic_rq.c4
-rw-r--r--drivers/net/enic/vnic_vic.c73
-rw-r--r--drivers/net/enic/vnic_vic.h59
-rw-r--r--drivers/net/enic/vnic_wq.c4
-rw-r--r--drivers/net/epic100.c13
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/es3210.c2
-rw-r--r--drivers/net/eth16i.c5
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/ewrk3.c14
-rw-r--r--drivers/net/fealnx.c9
-rw-r--r--drivers/net/fec.c1140
-rw-r--r--drivers/net/fec_mpc52xx.c8
-rw-r--r--drivers/net/forcedeth.c251
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c6
-rw-r--r--drivers/net/fs_enet/mac-fec.c6
-rw-r--r--drivers/net/fs_enet/mac-scc.c6
-rw-r--r--drivers/net/fsl_pq_mdio.c6
-rw-r--r--drivers/net/gianfar.c213
-rw-r--r--drivers/net/gianfar.h8
-rw-r--r--drivers/net/greth.c7
-rw-r--r--drivers/net/hamachi.c11
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hp-plus.c4
-rw-r--r--drivers/net/hp.c3
-rw-r--r--drivers/net/hp100.c16
-rw-r--r--drivers/net/ibm_newemac/core.c12
-rw-r--r--drivers/net/ibmlana.c8
-rw-r--r--drivers/net/ibmveth.c24
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/igb/e1000_82575.c35
-rw-r--r--drivers/net/igb/e1000_82575.h9
-rw-r--r--drivers/net/igb/e1000_defines.h5
-rw-r--r--drivers/net/igb/e1000_hw.h17
-rw-r--r--drivers/net/igb/e1000_mac.c27
-rw-r--r--drivers/net/igb/igb.h9
-rw-r--r--drivers/net/igb/igb_ethtool.c58
-rw-r--r--drivers/net/igb/igb_main.c613
-rw-r--r--drivers/net/igbvf/ethtool.c2
-rw-r--r--drivers/net/igbvf/netdev.c82
-rw-r--r--drivers/net/ioc3-eth.c7
-rw-r--r--drivers/net/ipg.c11
-rw-r--r--drivers/net/ipg.h109
-rw-r--r--drivers/net/irda/Kconfig6
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ali-ircc.c32
-rw-r--r--drivers/net/irda/au1k_ir.c1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/mcs7780.c4
-rw-r--r--drivers/net/irda/pxaficp_ir.c1
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/sh_irda.c865
-rw-r--r--drivers/net/irda/sh_sir.c12
-rw-r--r--drivers/net/irda/sir_dev.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.c3
-rw-r--r--drivers/net/irda/via-ircc.h2
-rw-r--r--drivers/net/irda/vlsi_ir.c5
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/iseries_veth.c6
-rw-r--r--drivers/net/ixgb/ixgb.h8
-rw-r--r--drivers/net/ixgb/ixgb_ee.c24
-rw-r--r--drivers/net/ixgb/ixgb_hw.c164
-rw-r--r--drivers/net/ixgb/ixgb_hw.h12
-rw-r--r--drivers/net/ixgb/ixgb_main.c159
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h16
-rw-r--r--drivers/net/ixgb/ixgb_param.c31
-rw-r--r--drivers/net/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c8
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c418
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c539
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h22
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c146
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c656
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c42
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c137
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h50
-rw-r--r--drivers/net/ixgbevf/defines.h12
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c125
-rw-r--r--drivers/net/ixgbevf/vf.c27
-rw-r--r--drivers/net/ixgbevf/vf.h4
-rw-r--r--drivers/net/ixp2000/ixpdev.c2
-rw-r--r--drivers/net/jme.c12
-rw-r--r--drivers/net/korina.c12
-rw-r--r--drivers/net/ks8842.c61
-rw-r--r--drivers/net/ks8851.c448
-rw-r--r--drivers/net/ks8851.h14
-rw-r--r--drivers/net/ks8851_mll.c63
-rw-r--r--drivers/net/ksz884x.c86
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/lib82596.c11
-rw-r--r--drivers/net/lib8390.c22
-rw-r--r--drivers/net/ll_temac.h14
-rw-r--r--drivers/net/ll_temac_main.c159
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/lp486e.c8
-rw-r--r--drivers/net/mac8390.c2
-rw-r--r--drivers/net/mac89x0.c1
-rw-r--r--drivers/net/macb.c9
-rw-r--r--drivers/net/mace.c6
-rw-r--r--drivers/net/macmace.c7
-rw-r--r--drivers/net/macvlan.c23
-rw-r--r--drivers/net/macvtap.c46
-rw-r--r--drivers/net/meth.c4
-rw-r--r--drivers/net/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/mlx4/en_netdev.c53
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/mv643xx_eth.c10
-rw-r--r--drivers/net/myri10ge/myri10ge.c54
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/natsemi.c10
-rw-r--r--drivers/net/ne-h8300.c1
-rw-r--r--drivers/net/ne.c1
-rw-r--r--drivers/net/ne2.c1
-rw-r--r--drivers/net/ne2k-pci.c1
-rw-r--r--drivers/net/ne3210.c2
-rw-r--r--drivers/net/netconsole.c15
-rw-r--r--drivers/net/netx-eth.c1
-rw-r--r--drivers/net/netxen/netxen_nic.h6
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c9
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h8
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c131
-rw-r--r--drivers/net/netxen/netxen_nic_init.c169
-rw-r--r--drivers/net/netxen/netxen_nic_main.c85
-rw-r--r--drivers/net/ni5010.c5
-rw-r--r--drivers/net/ni52.c13
-rw-r--r--drivers/net/ni65.c5
-rw-r--r--drivers/net/niu.c57
-rw-r--r--drivers/net/niu.h7
-rw-r--r--drivers/net/octeon/octeon_mgmt.c66
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c7
-rw-r--r--drivers/net/pcmcia/3c574_cs.c6
-rw-r--r--drivers/net/pcmcia/3c589_cs.c287
-rw-r--r--drivers/net/pcmcia/axnet_cs.c10
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c9
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c2
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c12
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c14
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c9
-rw-r--r--drivers/net/pcnet32.c15
-rw-r--r--drivers/net/phy/bcm63xx.c8
-rw-r--r--drivers/net/phy/broadcom.c16
-rw-r--r--drivers/net/phy/cicada.c8
-rw-r--r--drivers/net/phy/davicom.c9
-rw-r--r--drivers/net/phy/et1011c.c7
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/phy/marvell.c13
-rw-r--r--drivers/net/phy/mdio-bitbang.c60
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/micrel.c9
-rw-r--r--drivers/net/phy/national.c10
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/phy/qsemi.c7
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/smsc.c11
-rw-r--r--drivers/net/phy/ste10Xp.c8
-rw-r--r--drivers/net/phy/vitesse.c8
-rw-r--r--drivers/net/plip.c4
-rw-r--r--drivers/net/ppp_generic.c19
-rw-r--r--drivers/net/pppoe.c11
-rw-r--r--drivers/net/pppol2tp.c2680
-rw-r--r--drivers/net/ps3_gelic_net.c13
-rw-r--r--drivers/net/ps3_gelic_wireless.c74
-rw-r--r--drivers/net/qla3xxx.c72
-rw-r--r--drivers/net/qla3xxx.h8
-rw-r--r--drivers/net/qlcnic/qlcnic.h40
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c34
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h60
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c136
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c101
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c447
-rw-r--r--drivers/net/qlge/qlge.h8
-rw-r--r--drivers/net/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c64
-rw-r--r--drivers/net/r6040.c46
-rw-r--r--drivers/net/r8169.c158
-rw-r--r--drivers/net/rrunner.c1
-rw-r--r--drivers/net/s2io.c15
-rw-r--r--drivers/net/s6gmac.c3
-rw-r--r--drivers/net/sb1000.c5
-rw-r--r--drivers/net/sb1250-mac.c275
-rw-r--r--drivers/net/sc92031.c8
-rw-r--r--drivers/net/seeq8005.c4
-rw-r--r--drivers/net/sfc/efx.c137
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/ethtool.c6
-rw-r--r--drivers/net/sfc/falcon.c16
-rw-r--r--drivers/net/sfc/falcon_xmac.c22
-rw-r--r--drivers/net/sfc/mcdi.c32
-rw-r--r--drivers/net/sfc/mcdi_mac.c25
-rw-r--r--drivers/net/sfc/mcdi_pcol.h71
-rw-r--r--drivers/net/sfc/mcdi_phy.c152
-rw-r--r--drivers/net/sfc/net_driver.h76
-rw-r--r--drivers/net/sfc/nic.c114
-rw-r--r--drivers/net/sfc/nic.h3
-rw-r--r--drivers/net/sfc/selftest.c8
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c19
-rw-r--r--drivers/net/sfc/tx.c61
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sgiseeq.c6
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/sis190.c6
-rw-r--r--drivers/net/sis900.c25
-rw-r--r--drivers/net/skfp/fplustm.c2
-rw-r--r--drivers/net/skfp/pcmplc.c4
-rw-r--r--drivers/net/skfp/skfddi.c15
-rw-r--r--drivers/net/skfp/smt.c2
-rw-r--r--drivers/net/skfp/srf.c2
-rw-r--r--drivers/net/skge.c46
-rw-r--r--drivers/net/skge.h4
-rw-r--r--drivers/net/sky2.c215
-rw-r--r--drivers/net/sky2.h41
-rw-r--r--drivers/net/slhc.c1
-rw-r--r--drivers/net/slip.c4
-rw-r--r--drivers/net/smc-mca.c1
-rw-r--r--drivers/net/smc-ultra.c1
-rw-r--r--drivers/net/smc-ultra32.c1
-rw-r--r--drivers/net/smc911x.c21
-rw-r--r--drivers/net/smc9194.c61
-rw-r--r--drivers/net/smc91x.c12
-rw-r--r--drivers/net/smsc911x.c7
-rw-r--r--drivers/net/smsc9420.c8
-rw-r--r--drivers/net/sonic.c10
-rw-r--r--drivers/net/spider_net.c8
-rw-r--r--drivers/net/starfire.c16
-rw-r--r--drivers/net/stmmac/Makefile2
-rw-r--r--drivers/net/stmmac/common.h21
-rw-r--r--drivers/net/stmmac/dwmac100.c538
-rw-r--r--drivers/net/stmmac/dwmac100.h5
-rw-r--r--drivers/net/stmmac/dwmac1000.h12
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c41
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c338
-rw-r--r--drivers/net/stmmac/dwmac100_core.c196
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c134
-rw-r--r--drivers/net/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/stmmac/dwmac_lib.c19
-rw-r--r--drivers/net/stmmac/enh_desc.c337
-rw-r--r--drivers/net/stmmac/norm_desc.c236
-rw-r--r--drivers/net/stmmac/stmmac.h10
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/stmmac/stmmac_timer.c6
-rw-r--r--drivers/net/stnic.c1
-rw-r--r--drivers/net/sun3_82586.c13
-rw-r--r--drivers/net/sun3lance.c8
-rw-r--r--drivers/net/sunbmac.c12
-rw-r--r--drivers/net/sundance.c14
-rw-r--r--drivers/net/sungem.c9
-rw-r--r--drivers/net/sunhme.c22
-rw-r--r--drivers/net/sunlance.c11
-rw-r--r--drivers/net/sunqe.c7
-rw-r--r--drivers/net/sunvnet.c9
-rw-r--r--drivers/net/tc35815.c8
-rw-r--r--drivers/net/tehuti.c10
-rw-r--r--drivers/net/tg3.c839
-rw-r--r--drivers/net/tg3.h17
-rw-r--r--drivers/net/tlan.c13
-rw-r--r--drivers/net/tokenring/3c359.c112
-rw-r--r--drivers/net/tokenring/ibmtr.c13
-rw-r--r--drivers/net/tokenring/lanstreamer.c58
-rw-r--r--drivers/net/tokenring/madgemc.c12
-rw-r--r--drivers/net/tokenring/olympic.c74
-rw-r--r--drivers/net/tokenring/smctr.c4
-rw-r--r--drivers/net/tokenring/tms380tr.c65
-rw-r--r--drivers/net/tsi108_eth.c16
-rw-r--r--drivers/net/tulip/de2104x.c13
-rw-r--r--drivers/net/tulip/de4x5.c87
-rw-r--r--drivers/net/tulip/dmfe.c17
-rw-r--r--drivers/net/tulip/media.c2
-rw-r--r--drivers/net/tulip/pnic.c2
-rw-r--r--drivers/net/tulip/tulip_core.c31
-rw-r--r--drivers/net/tulip/uli526x.c10
-rw-r--r--drivers/net/tulip/winbond-840.c18
-rw-r--r--drivers/net/tulip/xircom_cb.c6
-rw-r--r--drivers/net/tun.c55
-rw-r--r--drivers/net/typhoon.c8
-rw-r--r--drivers/net/ucc_geth.c12
-rw-r--r--drivers/net/usb/asix.c53
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/cdc_ether.c113
-rw-r--r--drivers/net/usb/dm9601.c9
-rw-r--r--drivers/net/usb/hso.c4
-rw-r--r--drivers/net/usb/ipheth.c24
-rw-r--r--drivers/net/usb/kaweth.c13
-rw-r--r--drivers/net/usb/mcs7830.c10
-rw-r--r--drivers/net/usb/pegasus.c9
-rw-r--r--drivers/net/usb/pegasus.h2
-rw-r--r--drivers/net/usb/rndis_host.c18
-rw-r--r--drivers/net/usb/smsc75xx.c6
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/usb/usbnet.c15
-rw-r--r--drivers/net/via-rhine.c10
-rw-r--r--drivers/net/via-velocity.c121
-rw-r--r--drivers/net/via-velocity.h77
-rw-r--r--drivers/net/virtio_net.c57
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c11
-rw-r--r--drivers/net/vxge/vxge-config.c41
-rw-r--r--drivers/net/vxge/vxge-config.h34
-rw-r--r--drivers/net/vxge/vxge-ethtool.c5
-rw-r--r--drivers/net/vxge/vxge-main.c245
-rw-r--r--drivers/net/vxge/vxge-main.h6
-rw-r--r--drivers/net/vxge/vxge-traffic.c79
-rw-r--r--drivers/net/vxge/vxge-traffic.h50
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/cycx_x25.c13
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hd64570.c1
-rw-r--r--drivers/net/wan/hd64572.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c12
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1
-rw-r--r--drivers/net/wan/lapbether.c12
-rw-r--r--drivers/net/wan/lmc/lmc_main.c6
-rw-r--r--drivers/net/wan/pc300_drv.c5
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.c12
-rw-r--r--drivers/net/wd.c1
-rw-r--r--drivers/net/wimax/i2400m/control.c27
-rw-r--r--drivers/net/wimax/i2400m/driver.c167
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h5
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h82
-rw-r--r--drivers/net/wimax/i2400m/netdev.c14
-rw-r--r--drivers/net/wimax/i2400m/rx.c116
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c2
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c35
-rw-r--r--drivers/net/wimax/i2400m/sdio.c7
-rw-r--r--drivers/net/wimax/i2400m/tx.c155
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c14
-rw-r--r--drivers/net/wireless/Kconfig92
-rw-r--r--drivers/net/wireless/adm8211.c12
-rw-r--r--drivers/net/wireless/airo.c37
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/ath/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h52
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c587
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c25
-rw-r--r--drivers/net/wireless/ath/ath.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c744
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h104
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h313
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c329
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h39
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c382
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c19
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h35
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h88
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c379
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c77
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h42
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig21
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile26
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c217
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h742
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c1374
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h1254
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c1000
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c598
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h (renamed from drivers/net/wireless/ath/ath9k/initvals.h)2292
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c480
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c535
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h572
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c802
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c1838
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h323
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c205
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_initvals.h1784
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c614
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h120
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c1134
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h847
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h28
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1024
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c392
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c297
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1008
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h104
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h464
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c255
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c834
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1775
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c707
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c480
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h245
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h280
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1913
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h275
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c88
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c571
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h93
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c158
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c978
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h596
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c533
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h183
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c336
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h139
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c591
-rw-r--r--drivers/net/wireless/ath/debug.h1
-rw-r--r--drivers/net/wireless/ath/hw.c4
-rw-r--r--drivers/net/wireless/ath/regd.c4
-rw-r--r--drivers/net/wireless/atmel.c1
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c26
-rw-r--r--drivers/net/wireless/b43/phy_n.c479
-rw-r--r--drivers/net/wireless/b43/phy_n.h21
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c22
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h37
-rw-r--r--drivers/net/wireless/b43/xmit.c1
-rw-r--r--drivers/net/wireless/b43legacy/main.c21
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_download.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c49
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c190
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h14
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c13
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c500
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c361
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1493
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c331
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c850
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c276
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c307
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c1530
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c208
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c1340
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c425
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1021
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h181
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h116
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1021
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h136
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c912
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c826
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c536
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c901
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1074
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c406
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c17
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c14
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h7
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c123
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c15
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c79
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c19
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h283
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c12
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h2
-rw-r--r--drivers/net/wireless/libertas/assoc.c22
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/debugfs.c5
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c127
-rw-r--r--drivers/net/wireless/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/libertas/main.c15
-rw-r--r--drivers/net/wireless/libertas/rx.c51
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c4
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c203
-rw-r--r--drivers/net/wireless/libertas_tf/deb_defs.h104
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c252
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c106
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c96
-rw-r--r--drivers/net/wireless/mwl8k.c30
-rw-r--r--drivers/net/wireless/orinoco/Kconfig20
-rw-r--r--drivers/net/wireless/orinoco/Makefile4
-rw-r--r--drivers/net/wireless/orinoco/airport.c8
-rw-r--r--drivers/net/wireless/orinoco/cfg.c91
-rw-r--r--drivers/net/wireless/orinoco/fw.c10
-rw-r--r--drivers/net/wireless/orinoco/hermes.c286
-rw-r--r--drivers/net/wireless/orinoco/hermes.h62
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c243
-rw-r--r--drivers/net/wireless/orinoco/hw.c102
-rw-r--r--drivers/net/wireless/orinoco/hw.h1
-rw-r--r--drivers/net/wireless/orinoco/main.c307
-rw-r--r--drivers/net/wireless/orinoco/main.h12
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h38
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c85
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1795
-rw-r--r--drivers/net/wireless/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c7
-rw-r--r--drivers/net/wireless/orinoco/wext.c273
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c10
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c3
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c18
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c8
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/ray_cs.c242
-rw-r--r--drivers/net/wireless/rndis_wlan.c374
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c56
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c54
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c130
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h119
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c676
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c318
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c297
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h40
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h35
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c23
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c95
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c128
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig88
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c115
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c14
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig24
-rw-r--r--drivers/net/wireless/wl12xx/Makefile6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_io.h20
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c73
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_reg.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c144
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h63
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c179
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h157
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c46
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h10
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c337
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h27
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h488
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c57
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.c87
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h139
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c1272
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c7
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c94
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_sdio.c291
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c315
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h96
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c133
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h9
-rw-r--r--drivers/net/wireless/wl3501_cs.c57
-rw-r--r--drivers/net/wireless/zd1201.c12
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c10
-rw-r--r--drivers/net/xilinx_emaclite.c10
-rw-r--r--drivers/net/yellowfin.c13
-rw-r--r--drivers/net/znet.c2
-rw-r--r--drivers/net/zorro8390.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c5
-rw-r--r--drivers/pci/pci-sysfs.c45
-rw-r--r--drivers/pcmcia/cistpl.c4
-rw-r--r--drivers/power/olpc_battery.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c6
-rw-r--r--drivers/rtc/rtc-cmos.c6
-rw-r--r--drivers/rtc/rtc-ds1305.c6
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/rtc/rtc-ds1511.c10
-rw-r--r--drivers/rtc/rtc-ds1553.c4
-rw-r--r--drivers/rtc/rtc-ds1742.c4
-rw-r--r--drivers/rtc/rtc-m48t59.c4
-rw-r--r--drivers/rtc/rtc-stk17ta8.c4
-rw-r--r--drivers/rtc/rtc-tx4939.c4
-rw-r--r--drivers/s390/cio/chp.c5
-rw-r--r--drivers/s390/net/ctcm_main.c6
-rw-r--r--drivers/s390/net/ctcm_mpc.c6
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/qeth_core.h32
-rw-r--r--drivers/s390/net/qeth_core_main.c237
-rw-r--r--drivers/s390/net/qeth_core_mpc.h10
-rw-r--r--drivers/s390/net/qeth_core_sys.c151
-rw-r--r--drivers/s390/net/qeth_l2_main.c47
-rw-r--r--drivers/s390/net/qeth_l3_main.c108
-rw-r--r--drivers/s390/net/qeth_l3_sys.c244
-rw-r--r--drivers/s390/scsi/zfcp_aux.c7
-rw-r--r--drivers/s390/scsi/zfcp_def.h19
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h6
-rw-r--r--drivers/s390/scsi/zfcp_fc.c4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c246
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c108
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h104
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c23
-rw-r--r--drivers/scsi/3w-9xxx.c24
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/3w-sas.c4
-rw-r--r--drivers/scsi/3w-xxxx.c23
-rw-r--r--drivers/scsi/3w-xxxx.h8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a2091.c309
-rw-r--r--drivers/scsi/a2091.h42
-rw-r--r--drivers/scsi/a3000.c285
-rw-r--r--drivers/scsi/a3000.h46
-rw-r--r--drivers/scsi/aacraid/aachba.c67
-rw-r--r--drivers/scsi/aacraid/aacraid.h4
-rw-r--r--drivers/scsi/aacraid/commsup.c18
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c9
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim_macros.h29
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c22
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h23
-rw-r--r--drivers/scsi/bfa/bfad.c21
-rw-r--r--drivers/scsi/bfa/bfad_attr.c201
-rw-r--r--drivers/scsi/bfa/bfad_drv.h4
-rw-r--r--drivers/scsi/bfa/bfad_im.c67
-rw-r--r--drivers/scsi/bfa/bfad_im.h6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c11
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c212
-rw-r--r--drivers/scsi/fcoe/libfcoe.c111
-rw-r--r--drivers/scsi/fnic/fnic.h4
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/gvp11.c565
-rw-r--r--drivers/scsi/gvp11.h36
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/hpsa_cmd.h15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c11
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h1
-rw-r--r--drivers/scsi/ipr.c9
-rw-r--r--drivers/scsi/iscsi_tcp.c10
-rw-r--r--drivers/scsi/iscsi_tcp.h1
-rw-r--r--drivers/scsi/libfc/fc_disc.c8
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c51
-rw-r--r--drivers/scsi/libfc/fc_fcp.c103
-rw-r--r--drivers/scsi/libfc/fc_libfc.h8
-rw-r--r--drivers/scsi/libfc/fc_lport.c58
-rw-r--r--drivers/scsi/libfc/fc_npiv.c7
-rw-r--r--drivers/scsi/libfc/fc_rport.c195
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c16
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c498
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c79
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h190
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c213
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c99
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c269
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c90
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h36
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c46
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1055
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c89
-rw-r--r--drivers/scsi/mvme147.c169
-rw-r--r--drivers/scsi/mvme147.h4
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c25
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c10
-rw-r--r--drivers/scsi/mvsas/mv_init.c19
-rw-r--r--drivers/scsi/mvsas/mv_sas.c201
-rw-r--r--drivers/scsi/mvsas/mv_sas.h11
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pmcraid.c6
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c804
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c1212
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h135
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c61
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h343
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h106
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h126
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c724
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c879
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c540
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c266
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3636
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h889
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c543
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c149
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h48
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h46
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c374
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c337
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c135
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c89
-rw-r--r--drivers/scsi/scsi_error.c19
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/scsi_trace.c284
-rw-r--r--drivers/scsi/scsi_transport_fc.c30
-rw-r--r--drivers/scsi/sd.c25
-rw-r--r--drivers/scsi/wd33c93.c6
-rw-r--r--drivers/scsi/wd33c93.h1
-rw-r--r--drivers/serial/Kconfig82
-rw-r--r--drivers/serial/Makefile2
-rw-r--r--drivers/serial/altera_jtaguart.c504
-rw-r--r--drivers/serial/altera_uart.c570
-rw-r--r--drivers/serial/bfin_sport_uart.c209
-rw-r--r--drivers/serial/bfin_sport_uart.h27
-rw-r--r--drivers/serial/timbuart.c25
-rw-r--r--drivers/serial/uartlite.c32
-rw-r--r--drivers/spi/Kconfig6
-rw-r--r--drivers/ssb/driver_chipcommon.c3
-rw-r--r--drivers/ssb/main.c3
-rw-r--r--drivers/ssb/pci.c14
-rw-r--r--drivers/ssb/sprom.c14
-rw-r--r--drivers/staging/arlan/arlan-main.c9
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c1
-rw-r--r--drivers/staging/et131x/et131x_netdev.c6
-rw-r--r--drivers/staging/rt2860/iface/rtmp_usb.h4
-rw-r--r--drivers/staging/slicoss/slicoss.c6
-rw-r--r--drivers/staging/udlfb/udlfb.c11
-rw-r--r--drivers/staging/usbip/stub_rx.c2
-rw-r--r--drivers/staging/usbip/usbip_common.c2
-rw-r--r--drivers/staging/usbip/vhci.h2
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c6
-rw-r--r--drivers/staging/wavelan/wavelan.c10
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c12
-rw-r--r--drivers/staging/winbond/wbusb.c6
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c12
-rw-r--r--drivers/usb/atm/ueagle-atm.c347
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h2
-rw-r--r--drivers/usb/class/cdc-acm.c22
-rw-r--r--drivers/usb/class/cdc-acm.h4
-rw-r--r--drivers/usb/class/cdc-wdm.c38
-rw-r--r--drivers/usb/class/usblp.c2
-rw-r--r--drivers/usb/core/buffer.c2
-rw-r--r--drivers/usb/core/config.c214
-rw-r--r--drivers/usb/core/devices.c19
-rw-r--r--drivers/usb/core/devio.c3
-rw-r--r--drivers/usb/core/driver.c60
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c246
-rw-r--r--drivers/usb/core/hcd.h578
-rw-r--r--drivers/usb/core/hub.c30
-rw-r--r--drivers/usb/core/hub.h205
-rw-r--r--drivers/usb/core/inode.c2
-rw-r--r--drivers/usb/core/message.c133
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/core/sysfs.c25
-rw-r--r--drivers/usb/core/urb.c18
-rw-r--r--drivers/usb/core/usb.c96
-rw-r--r--drivers/usb/gadget/Kconfig58
-rw-r--r--drivers/usb/gadget/Makefile8
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/composite.c60
-rw-r--r--drivers/usb/gadget/config.c4
-rw-r--r--drivers/usb/gadget/dummy_hcd.c4
-rw-r--r--drivers/usb/gadget/epautoconf.c12
-rw-r--r--drivers/usb/gadget/f_acm.c32
-rw-r--r--drivers/usb/gadget/f_ecm.c33
-rw-r--r--drivers/usb/gadget/f_fs.c2442
-rw-r--r--drivers/usb/gadget/f_hid.c673
-rw-r--r--drivers/usb/gadget/f_mass_storage.c138
-rw-r--r--drivers/usb/gadget/f_rndis.c33
-rw-r--r--drivers/usb/gadget/f_uvc.c661
-rw-r--r--drivers/usb/gadget/f_uvc.h376
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c (renamed from drivers/usb/gadget/fsl_mx3_udc.c)14
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/g_ffs.c426
-rw-r--r--drivers/usb/gadget/hid.c298
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h2
-rw-r--r--drivers/usb/gadget/u_ether.c4
-rw-r--r--drivers/usb/gadget/uvc.h241
-rw-r--r--drivers/usb/gadget/uvc_queue.c583
-rw-r--r--drivers/usb/gadget/uvc_queue.h89
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c374
-rw-r--r--drivers/usb/gadget/uvc_video.c386
-rw-r--r--drivers/usb/gadget/webcam.c399
-rw-r--r--drivers/usb/host/Kconfig15
-rw-r--r--drivers/usb/host/ehci-au1xxx.c27
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c3
-rw-r--r--drivers/usb/host/ehci-hub.c182
-rw-r--r--drivers/usb/host/ehci-omap.c21
-rw-r--r--drivers/usb/host/ehci-pci.c18
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/ehci.h18
-rw-r--r--drivers/usb/host/fhci-dbg.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/fhci-hub.c2
-rw-r--r--drivers/usb/host/fhci-mem.c2
-rw-r--r--drivers/usb/host/fhci-q.c2
-rw-r--r--drivers/usb/host/fhci-sched.c2
-rw-r--r--drivers/usb/host/fhci-tds.c2
-rw-r--r--drivers/usb/host/fhci.h11
-rw-r--r--drivers/usb/host/imx21-hcd.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c6
-rw-r--r--drivers/usb/host/isp1760-hcd.c29
-rw-r--r--drivers/usb/host/isp1760-if.c13
-rw-r--r--drivers/usb/host/ohci-hcd.c33
-rw-r--r--drivers/usb/host/ohci-omap3.c735
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c31
-rw-r--r--drivers/usb/host/r8a66597-hcd.c39
-rw-r--r--drivers/usb/host/sl811-hcd.c60
-rw-r--r--drivers/usb/host/u132-hcd.c6
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/whci/debug.c2
-rw-r--r--drivers/usb/host/whci/qset.c6
-rw-r--r--drivers/usb/host/xhci-dbg.c24
-rw-r--r--drivers/usb/host/xhci-hub.c39
-rw-r--r--drivers/usb/host/xhci-mem.c489
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c329
-rw-r--r--drivers/usb/host/xhci.c416
-rw-r--r--drivers/usb/host/xhci.h112
-rw-r--r--drivers/usb/misc/appledisplay.c6
-rw-r--r--drivers/usb/misc/ftdi-elan.c20
-rw-r--r--drivers/usb/misc/iowarrior.c12
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c13
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c8
-rw-r--r--drivers/usb/misc/usblcd.c8
-rw-r--r--drivers/usb/misc/usbtest.c17
-rw-r--r--drivers/usb/mon/mon_bin.c4
-rw-r--r--drivers/usb/mon/mon_main.c3
-rw-r--r--drivers/usb/mon/mon_text.c6
-rw-r--r--drivers/usb/musb/Kconfig6
-rw-r--r--drivers/usb/musb/Makefile14
-rw-r--r--drivers/usb/musb/blackfin.c96
-rw-r--r--drivers/usb/musb/davinci.c2
-rw-r--r--drivers/usb/musb/musb_core.c147
-rw-r--r--drivers/usb/musb/musb_core.h10
-rw-r--r--drivers/usb/musb/musb_debug.h13
-rw-r--r--drivers/usb/musb/musb_debugfs.c294
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c25
-rw-r--r--drivers/usb/musb/musb_regs.h10
-rw-r--r--drivers/usb/musb/musb_virthub.c4
-rw-r--r--drivers/usb/musb/musbhsdma.h16
-rw-r--r--drivers/usb/musb/omap2430.c29
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/usb/otg/twl4030-usb.c108
-rw-r--r--drivers/usb/otg/ulpi.c50
-rw-r--r--drivers/usb/serial/Kconfig23
-rw-r--r--drivers/usb/serial/Makefile2
-rw-r--r--drivers/usb/serial/aircable.c499
-rw-r--r--drivers/usb/serial/ark3116.c111
-rw-r--r--drivers/usb/serial/belkin_sa.c130
-rw-r--r--drivers/usb/serial/belkin_sa.h10
-rw-r--r--drivers/usb/serial/ch341.c5
-rw-r--r--drivers/usb/serial/console.c27
-rw-r--r--drivers/usb/serial/cp210x.c63
-rw-r--r--drivers/usb/serial/cypress_m8.c242
-rw-r--r--drivers/usb/serial/cypress_m8.h53
-rw-r--r--drivers/usb/serial/digi_acceleport.c4
-rw-r--r--drivers/usb/serial/empeg.c401
-rw-r--r--drivers/usb/serial/ftdi_sio.c457
-rw-r--r--drivers/usb/serial/ftdi_sio.h126
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h42
-rw-r--r--drivers/usb/serial/generic.c330
-rw-r--r--drivers/usb/serial/io_edgeport.c3
-rw-r--r--drivers/usb/serial/io_edgeport.h16
-rw-r--r--drivers/usb/serial/io_ionsp.h95
-rw-r--r--drivers/usb/serial/io_ti.c228
-rw-r--r--drivers/usb/serial/io_ti.h92
-rw-r--r--drivers/usb/serial/io_usbvend.h87
-rw-r--r--drivers/usb/serial/ipaq.c357
-rw-r--r--drivers/usb/serial/ipaq.h54
-rw-r--r--drivers/usb/serial/ipw.c184
-rw-r--r--drivers/usb/serial/ir-usb.c272
-rw-r--r--drivers/usb/serial/iuu_phoenix.c30
-rw-r--r--drivers/usb/serial/kl5kusb105.c436
-rw-r--r--drivers/usb/serial/kl5kusb105.h47
-rw-r--r--drivers/usb/serial/kobil_sct.c3
-rw-r--r--drivers/usb/serial/kobil_sct.h75
-rw-r--r--drivers/usb/serial/mct_u232.c7
-rw-r--r--drivers/usb/serial/mct_u232.h254
-rw-r--r--drivers/usb/serial/mos7720.c1130
-rw-r--r--drivers/usb/serial/mos7840.c1
-rw-r--r--drivers/usb/serial/option.c841
-rw-r--r--drivers/usb/serial/oti6858.c254
-rw-r--r--drivers/usb/serial/pl2303.c430
-rw-r--r--drivers/usb/serial/pl2303.h6
-rw-r--r--drivers/usb/serial/qcaux.c5
-rw-r--r--drivers/usb/serial/qcserial.c64
-rw-r--r--drivers/usb/serial/safe_serial.c231
-rw-r--r--drivers/usb/serial/spcp8x5.c407
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c179
-rw-r--r--drivers/usb/serial/usb-serial.c47
-rw-r--r--drivers/usb/serial/usb-wwan.h67
-rw-r--r--drivers/usb/serial/usb_debug.c12
-rw-r--r--drivers/usb/serial/usb_wwan.c665
-rw-r--r--drivers/usb/serial/visor.c344
-rw-r--r--drivers/usb/serial/visor.h9
-rw-r--r--drivers/usb/serial/zio.c64
-rw-r--r--drivers/usb/storage/isd200.c4
-rw-r--r--drivers/usb/storage/onetouch.c12
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h24
-rw-r--r--drivers/usb/storage/usb.c87
-rw-r--r--drivers/usb/storage/usb.h3
-rw-r--r--drivers/usb/usb-skeleton.c10
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c2
-rw-r--r--drivers/usb/wusbcore/wusbhc.h4
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/vhost.c11
-rw-r--r--drivers/video/Kconfig16
-rw-r--r--drivers/video/aty/radeon_base.c4
-rw-r--r--drivers/video/via/Makefile4
-rw-r--r--drivers/video/via/accel.c137
-rw-r--r--drivers/video/via/accel.h40
-rw-r--r--drivers/video/via/chip.h8
-rw-r--r--drivers/video/via/dvi.c37
-rw-r--r--drivers/video/via/global.h1
-rw-r--r--drivers/video/via/hw.c307
-rw-r--r--drivers/video/via/hw.h20
-rw-r--r--drivers/video/via/ioctl.h2
-rw-r--r--drivers/video/via/lcd.c31
-rw-r--r--drivers/video/via/lcd.h2
-rw-r--r--drivers/video/via/share.h20
-rw-r--r--drivers/video/via/via-core.c668
-rw-r--r--drivers/video/via/via-gpio.c285
-rw-r--r--drivers/video/via/via_i2c.c232
-rw-r--r--drivers/video/via/via_modesetting.c126
-rw-r--r--drivers/video/via/via_modesetting.h (renamed from drivers/video/via/via_i2c.h)42
-rw-r--r--drivers/video/via/via_utility.c1
-rw-r--r--drivers/video/via/viafbdev.c181
-rw-r--r--drivers/video/via/viafbdev.h14
-rw-r--r--drivers/video/via/viamode.c15
-rw-r--r--drivers/video/via/vt1636.c34
-rw-r--r--drivers/video/via/vt1636.h2
-rw-r--r--drivers/w1/slaves/w1_ds2431.c4
-rw-r--r--drivers/w1/slaves/w1_ds2433.c4
-rw-r--r--drivers/w1/slaves/w1_ds2760.c2
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/zorro/zorro-sysfs.c2
1446 files changed, 112982 insertions, 48925 deletions
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index e35525b..c79e789 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -71,7 +71,7 @@ struct acpi_table_attr {
struct list_head node;
};
-static ssize_t acpi_table_show(struct kobject *kobj,
+static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 191b85e..f1a0a00 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -394,6 +394,7 @@ config ATM_HE_USE_SUNI
config ATM_SOLOS
tristate "Solos ADSL2+ PCI Multiport card driver"
depends on PCI
+ select FW_LOADER
help
Support for the Solos multiport ADSL2+ card.
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index b867121..b910181 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -68,7 +68,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
*(struct atm_vcc **) &new_msg->vcc = vcc;
old_test = test_bit(flag,&vcc->flags);
out_vcc->push(out_vcc,skb);
- add_wait_queue(sk_atm(vcc)->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
while (test_bit(flag,&vcc->flags) == old_test) {
mb();
out_vcc = PRIV(vcc->dev) ? PRIV(vcc->dev)->vcc : NULL;
@@ -80,7 +80,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_atm(vcc)->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
return error;
}
@@ -105,7 +105,7 @@ static int atmtcp_recv_control(const struct atmtcp_control *msg)
msg->type);
return -EINVAL;
}
- wake_up(sk_atm(vcc)->sk_sleep);
+ wake_up(sk_sleep(sk_atm(vcc)));
return 0;
}
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 719ec5a..90a5a7c 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1131,7 +1131,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
if (i == -1)
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb->data,
- skb->len - skb->data_len);
+ skb_headlen(skb));
else
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb_shinfo(skb)->frags[i].page + skb_shinfo(skb)->frags[i].page_offset,
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index c213e0d..56c2e99 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2664,8 +2664,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
#ifdef USE_SCATTERGATHER
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
- tpd->iovec[slot].len = skb->len - skb->data_len;
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ tpd->iovec[slot].len = skb_headlen(skb);
++slot;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fd52c48..ef38aff 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -18,9 +18,9 @@ config UEVENT_HELPER_PATH
config DEVTMPFS
bool "Maintain a devtmpfs filesystem to mount at /dev"
- depends on HOTPLUG && SHMEM && TMPFS
+ depends on HOTPLUG
help
- This creates a tmpfs filesystem instance early at bootup.
+ This creates a tmpfs/ramfs filesystem instance early at bootup.
In this filesystem, the kernel driver core maintains device
nodes with their default names and permissions for all
registered devices with an assigned major/minor number.
@@ -33,6 +33,9 @@ config DEVTMPFS
functional /dev without any further help. It also allows simple
rescue systems, and reliably handles dynamic major/minor numbers.
+ Notice: if CONFIG_TMPFS isn't enabled, the simpler ramfs
+ file system will be used instead.
+
config DEVTMPFS_MOUNT
bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs"
depends on DEVTMPFS
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 9c6a0d6..8e231d0 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -63,6 +63,14 @@ static void class_release(struct kobject *kobj)
kfree(cp);
}
+static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
+{
+ struct class_private *cp = to_class(kobj);
+ struct class *class = cp->class;
+
+ return class->ns_type;
+}
+
static const struct sysfs_ops class_sysfs_ops = {
.show = class_attr_show,
.store = class_attr_store,
@@ -71,6 +79,7 @@ static const struct sysfs_ops class_sysfs_ops = {
static struct kobj_type class_ktype = {
.sysfs_ops = &class_sysfs_ops,
.release = class_release,
+ .child_ns_type = class_child_ns_type,
};
/* Hotplug events for classes go to the class class_subsys */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b56a0ba..9630fbd 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -20,7 +20,6 @@
#include <linux/notifier.h>
#include <linux/genhd.h>
#include <linux/kallsyms.h>
-#include <linux/semaphore.h>
#include <linux/mutex.h>
#include <linux/async.h>
@@ -132,9 +131,21 @@ static void device_release(struct kobject *kobj)
kfree(p);
}
+static const void *device_namespace(struct kobject *kobj)
+{
+ struct device *dev = to_dev(kobj);
+ const void *ns = NULL;
+
+ if (dev->class && dev->class->ns_type)
+ ns = dev->class->namespace(dev);
+
+ return ns;
+}
+
static struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
+ .namespace = device_namespace,
};
@@ -559,10 +570,10 @@ void device_initialize(struct device *dev)
dev->kobj.kset = devices_kset;
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
- init_MUTEX(&dev->sem);
+ mutex_init(&dev->mutex);
+ lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
- device_init_wakeup(dev, 0);
device_pm_init(dev);
set_dev_node(dev, -1);
}
@@ -596,11 +607,59 @@ static struct kobject *virtual_device_parent(struct device *dev)
return virtual_dir;
}
-static struct kobject *get_device_parent(struct device *dev,
- struct device *parent)
+struct class_dir {
+ struct kobject kobj;
+ struct class *class;
+};
+
+#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
+
+static void class_dir_release(struct kobject *kobj)
+{
+ struct class_dir *dir = to_class_dir(kobj);
+ kfree(dir);
+}
+
+static const
+struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
{
+ struct class_dir *dir = to_class_dir(kobj);
+ return dir->class->ns_type;
+}
+
+static struct kobj_type class_dir_ktype = {
+ .release = class_dir_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .child_ns_type = class_dir_child_ns_type
+};
+
+static struct kobject *
+class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+{
+ struct class_dir *dir;
int retval;
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return NULL;
+
+ dir->class = class;
+ kobject_init(&dir->kobj, &class_dir_ktype);
+
+ dir->kobj.kset = &class->p->class_dirs;
+
+ retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
+ if (retval < 0) {
+ kobject_put(&dir->kobj);
+ return NULL;
+ }
+ return &dir->kobj;
+}
+
+
+static struct kobject *get_device_parent(struct device *dev,
+ struct device *parent)
+{
if (dev->class) {
static DEFINE_MUTEX(gdp_mutex);
struct kobject *kobj = NULL;
@@ -635,18 +694,7 @@ static struct kobject *get_device_parent(struct device *dev,
}
/* or create a new class-directory at the parent device */
- k = kobject_create();
- if (!k) {
- mutex_unlock(&gdp_mutex);
- return NULL;
- }
- k->kset = &dev->class->p->class_dirs;
- retval = kobject_add(k, parent_kobj, "%s", dev->class->name);
- if (retval < 0) {
- mutex_unlock(&gdp_mutex);
- kobject_put(k);
- return NULL;
- }
+ k = class_dir_create_and_add(dev->class, parent_kobj);
/* do not emit an uevent for this simple "glue" directory */
mutex_unlock(&gdp_mutex);
return k;
@@ -738,7 +786,7 @@ out_device:
out_busid:
if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
device_is_not_partition(dev))
- sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj,
dev_name(dev));
#else
/* link in the class directory pointing to the device */
@@ -756,7 +804,7 @@ out_busid:
return 0;
out_busid:
- sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
#endif
out_subsys:
@@ -784,13 +832,13 @@ static void device_remove_class_symlinks(struct device *dev)
if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
device_is_not_partition(dev))
- sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj,
dev_name(dev));
#else
if (dev->parent && device_is_not_partition(dev))
sysfs_remove_link(&dev->kobj, "device");
- sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
#endif
sysfs_remove_link(&dev->kobj, "subsystem");
@@ -1372,7 +1420,7 @@ struct device *__root_device_register(const char *name, struct module *owner)
return ERR_PTR(err);
}
-#ifdef CONFIG_MODULE /* gotta find a "cleaner" way to do this */
+#ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
if (owner) {
struct module_kobject *mk = &owner->mkobj;
@@ -1576,6 +1624,14 @@ int device_rename(struct device *dev, char *new_name)
goto out;
}
+#ifndef CONFIG_SYSFS_DEPRECATED
+ if (dev->class) {
+ error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
+ &dev->kobj, old_device_name, new_name);
+ if (error)
+ goto out;
+ }
+#endif
error = kobject_rename(&dev->kobj, new_name);
if (error)
goto out;
@@ -1590,11 +1646,6 @@ int device_rename(struct device *dev, char *new_name)
new_class_name);
}
}
-#else
- if (dev->class) {
- error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
- &dev->kobj, old_device_name, new_name);
- }
#endif
out:
@@ -1735,10 +1786,25 @@ EXPORT_SYMBOL_GPL(device_move);
*/
void device_shutdown(void)
{
- struct device *dev, *devn;
+ struct device *dev;
+
+ spin_lock(&devices_kset->list_lock);
+ /*
+ * Walk the devices list backward, shutting down each in turn.
+ * Beware that device unplug events may also start pulling
+ * devices offline, even as the system is shutting down.
+ */
+ while (!list_empty(&devices_kset->list)) {
+ dev = list_entry(devices_kset->list.prev, struct device,
+ kobj.entry);
+ get_device(dev);
+ /*
+ * Make sure the device is off the kset list, in the
+ * event that dev->*->shutdown() doesn't remove it.
+ */
+ list_del_init(&dev->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
- list_for_each_entry_safe_reverse(dev, devn, &devices_kset->list,
- kobj.entry) {
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
dev->bus->shutdown(dev);
@@ -1746,6 +1812,10 @@ void device_shutdown(void)
dev_dbg(dev, "shutdown\n");
dev->driver->shutdown(dev);
}
+ put_device(dev);
+
+ spin_lock(&devices_kset->list_lock);
}
+ spin_unlock(&devices_kset->list_lock);
async_synchronize_full();
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f35719a..251acea 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -186,7 +186,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class,
/* display offline cpus < nr_cpu_ids */
if (!alloc_cpumask_var(&offline, GFP_KERNEL))
return -ENOMEM;
- cpumask_complement(offline, cpu_online_mask);
+ cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
n = cpulist_scnprintf(buf, len, offline);
free_cpumask_var(offline);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index c89291f..503c262 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -40,11 +40,11 @@ static void driver_bound(struct device *dev)
pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev),
__func__, dev->driver->name);
+ klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
-
- klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
}
static int driver_sysfs_add(struct device *dev)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 057cf11..af06001 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -20,6 +20,7 @@
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
+#include <linux/ramfs.h>
#include <linux/cred.h>
#include <linux/sched.h>
#include <linux/init_task.h>
@@ -45,7 +46,11 @@ __setup("devtmpfs.mount=", mount_param);
static int dev_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, struct vfsmount *mnt)
{
+#ifdef CONFIG_TMPFS
return get_sb_single(fs_type, flags, data, shmem_fill_super, mnt);
+#else
+ return get_sb_single(fs_type, flags, data, ramfs_fill_super, mnt);
+#endif
}
static struct file_system_type dev_fs_type = {
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 985da11..3f093b0 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -27,6 +27,52 @@ MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Multi purpose firmware loading support");
MODULE_LICENSE("GPL");
+/* Builtin firmware support */
+
+#ifdef CONFIG_FW_LOADER
+
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+
+static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+{
+ struct builtin_fw *b_fw;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
+ if (strcmp(name, b_fw->name) == 0) {
+ fw->size = b_fw->size;
+ fw->data = b_fw->data;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+ struct builtin_fw *b_fw;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
+ if (fw->data == b_fw->data)
+ return true;
+
+ return false;
+}
+
+#else /* Module case - no builtin firmware support */
+
+static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+{
+ return false;
+}
+
+static inline bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+ return false;
+}
+#endif
+
enum {
FW_STATUS_LOADING,
FW_STATUS_DONE,
@@ -40,7 +86,6 @@ static int loading_timeout = 60; /* In seconds */
static DEFINE_MUTEX(fw_lock);
struct firmware_priv {
- char *fw_id;
struct completion completion;
struct bin_attribute attr_data;
struct firmware *fw;
@@ -48,18 +93,11 @@ struct firmware_priv {
struct page **pages;
int nr_pages;
int page_array_size;
- const char *vdata;
struct timer_list timeout;
+ bool nowait;
+ char fw_id[];
};
-#ifdef CONFIG_FW_LOADER
-extern struct builtin_fw __start_builtin_fw[];
-extern struct builtin_fw __end_builtin_fw[];
-#else /* Module case. Avoid ifdefs later; it'll all optimise out */
-static struct builtin_fw *__start_builtin_fw;
-static struct builtin_fw *__end_builtin_fw;
-#endif
-
static void
fw_load_abort(struct firmware_priv *fw_priv)
{
@@ -100,9 +138,25 @@ firmware_timeout_store(struct class *class,
return count;
}
-static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store);
+static struct class_attribute firmware_class_attrs[] = {
+ __ATTR(timeout, S_IWUSR | S_IRUGO,
+ firmware_timeout_show, firmware_timeout_store),
+ __ATTR_NULL
+};
+
+static void fw_dev_release(struct device *dev)
+{
+ struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ kfree(fw_priv->pages);
+ kfree(fw_priv);
+ kfree(dev);
-static void fw_dev_release(struct device *dev);
+ module_put(THIS_MODULE);
+}
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -112,12 +166,15 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
return -ENOMEM;
+ if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
+ return -ENOMEM;
return 0;
}
static struct class firmware_class = {
.name = "firmware",
+ .class_attrs = firmware_class_attrs,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
@@ -130,6 +187,17 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
+static void firmware_free_data(const struct firmware *fw)
+{
+ int i;
+ vunmap(fw->data);
+ if (fw->pages) {
+ for (i = 0; i < PFN_UP(fw->size); i++)
+ __free_page(fw->pages[i]);
+ kfree(fw->pages);
+ }
+}
+
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
@@ -162,21 +230,21 @@ static ssize_t firmware_loading_store(struct device *dev,
mutex_unlock(&fw_lock);
break;
}
- vfree(fw_priv->fw->data);
- fw_priv->fw->data = NULL;
+ firmware_free_data(fw_priv->fw);
+ memset(fw_priv->fw, 0, sizeof(struct firmware));
+ /* If the pages are not owned by 'struct firmware' */
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kfree(fw_priv->pages);
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
- fw_priv->fw->size = 0;
set_bit(FW_STATUS_LOADING, &fw_priv->status);
mutex_unlock(&fw_lock);
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
- vfree(fw_priv->fw->data);
+ vunmap(fw_priv->fw->data);
fw_priv->fw->data = vmap(fw_priv->pages,
fw_priv->nr_pages,
0, PAGE_KERNEL_RO);
@@ -184,7 +252,10 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: vmap() failed\n", __func__);
goto err;
}
- /* Pages will be freed by vfree() */
+ /* Pages are now owned by 'struct firmware' */
+ fw_priv->fw->pages = fw_priv->pages;
+ fw_priv->pages = NULL;
+
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
complete(&fw_priv->completion);
@@ -207,8 +278,9 @@ static ssize_t firmware_loading_store(struct device *dev,
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
static ssize_t
-firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
+firmware_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer, loff_t offset,
+ size_t count)
{
struct device *dev = to_dev(kobj);
struct firmware_priv *fw_priv = dev_get_drvdata(dev);
@@ -291,6 +363,7 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
/**
* firmware_data_write - write method for firmware
+ * @filp: open sysfs file
* @kobj: kobject for the device
* @bin_attr: bin_attr structure
* @buffer: buffer being written
@@ -301,8 +374,9 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
* the driver as a firmware image.
**/
static ssize_t
-firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
+firmware_data_write(struct file* filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer,
+ loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
struct firmware_priv *fw_priv = dev_get_drvdata(dev);
@@ -353,21 +427,6 @@ static struct bin_attribute firmware_attr_data_tmpl = {
.write = firmware_data_write,
};
-static void fw_dev_release(struct device *dev)
-{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- kfree(fw_priv->pages);
- kfree(fw_priv->fw_id);
- kfree(fw_priv);
- kfree(dev);
-
- module_put(THIS_MODULE);
-}
-
static void
firmware_class_timeout(u_long data)
{
@@ -379,8 +438,8 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
struct device *device)
{
int retval;
- struct firmware_priv *fw_priv = kzalloc(sizeof(*fw_priv),
- GFP_KERNEL);
+ struct firmware_priv *fw_priv =
+ kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
struct device *f_dev = kzalloc(sizeof(*f_dev), GFP_KERNEL);
*dev_p = NULL;
@@ -391,16 +450,9 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
goto error_kfree;
}
+ strcpy(fw_priv->fw_id, fw_name);
init_completion(&fw_priv->completion);
fw_priv->attr_data = firmware_attr_data_tmpl;
- fw_priv->fw_id = kstrdup(fw_name, GFP_KERNEL);
- if (!fw_priv->fw_id) {
- dev_err(device, "%s: Firmware name allocation failed\n",
- __func__);
- retval = -ENOMEM;
- goto error_kfree;
- }
-
fw_priv->timeout.function = firmware_class_timeout;
fw_priv->timeout.data = (u_long) fw_priv;
init_timer(&fw_priv->timeout);
@@ -427,7 +479,7 @@ error_kfree:
static int fw_setup_device(struct firmware *fw, struct device **dev_p,
const char *fw_name, struct device *device,
- int uevent)
+ int uevent, bool nowait)
{
struct device *f_dev;
struct firmware_priv *fw_priv;
@@ -443,6 +495,8 @@ static int fw_setup_device(struct firmware *fw, struct device **dev_p,
fw_priv = dev_get_drvdata(f_dev);
+ fw_priv->nowait = nowait;
+
fw_priv->fw = fw;
sysfs_bin_attr_init(&fw_priv->attr_data);
retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data);
@@ -470,12 +524,11 @@ out:
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, int uevent)
+ struct device *device, int uevent, bool nowait)
{
struct device *f_dev;
struct firmware_priv *fw_priv;
struct firmware *firmware;
- struct builtin_fw *builtin;
int retval;
if (!firmware_p)
@@ -489,21 +542,16 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
- for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
- builtin++) {
- if (strcmp(name, builtin->name))
- continue;
- dev_info(device, "firmware: using built-in firmware %s\n",
- name);
- firmware->size = builtin->size;
- firmware->data = builtin->data;
+ if (fw_get_builtin_firmware(firmware, name)) {
+ dev_dbg(device, "firmware: using built-in firmware %s\n", name);
return 0;
}
if (uevent)
- dev_info(device, "firmware: requesting %s\n", name);
+ dev_dbg(device, "firmware: requesting %s\n", name);
- retval = fw_setup_device(firmware, &f_dev, name, device, uevent);
+ retval = fw_setup_device(firmware, &f_dev, name, device,
+ uevent, nowait);
if (retval)
goto error_kfree_fw;
@@ -560,26 +608,18 @@ request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
int uevent = 1;
- return _request_firmware(firmware_p, name, device, uevent);
+ return _request_firmware(firmware_p, name, device, uevent, false);
}
/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
-void
-release_firmware(const struct firmware *fw)
+void release_firmware(const struct firmware *fw)
{
- struct builtin_fw *builtin;
-
if (fw) {
- for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
- builtin++) {
- if (fw->data == builtin->data)
- goto free_fw;
- }
- vfree(fw->data);
- free_fw:
+ if (!fw_is_builtin_firmware(fw))
+ firmware_free_data(fw);
kfree(fw);
}
}
@@ -606,7 +646,7 @@ request_firmware_work_func(void *arg)
return 0;
}
ret = _request_firmware(&fw, fw_work->name, fw_work->device,
- fw_work->uevent);
+ fw_work->uevent, true);
fw_work->cont(fw, fw_work->context);
@@ -670,26 +710,12 @@ request_firmware_nowait(
return 0;
}
-static int __init
-firmware_class_init(void)
+static int __init firmware_class_init(void)
{
- int error;
- error = class_register(&firmware_class);
- if (error) {
- printk(KERN_ERR "%s: class_register failed\n", __func__);
- return error;
- }
- error = class_create_file(&firmware_class, &class_attr_timeout);
- if (error) {
- printk(KERN_ERR "%s: class_create_file failed\n",
- __func__);
- class_unregister(&firmware_class);
- }
- return error;
-
+ return class_register(&firmware_class);
}
-static void __exit
-firmware_class_exit(void)
+
+static void __exit firmware_class_exit(void)
{
class_unregister(&firmware_class);
}
diff --git a/drivers/base/module.c b/drivers/base/module.c
index f32f2f9..db930d3 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -15,12 +15,10 @@ static char *make_driver_name(struct device_driver *drv)
{
char *driver_name;
- driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2,
- GFP_KERNEL);
+ driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name);
if (!driver_name)
return NULL;
- sprintf(driver_name, "%s:%s", drv->bus->name, drv->name);
return driver_name;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ada6397..4d99c8b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -735,7 +735,7 @@ static void platform_pm_complete(struct device *dev)
#ifdef CONFIG_SUSPEND
-static int platform_pm_suspend(struct device *dev)
+int __weak platform_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -753,7 +753,7 @@ static int platform_pm_suspend(struct device *dev)
return ret;
}
-static int platform_pm_suspend_noirq(struct device *dev)
+int __weak platform_pm_suspend_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -769,7 +769,7 @@ static int platform_pm_suspend_noirq(struct device *dev)
return ret;
}
-static int platform_pm_resume(struct device *dev)
+int __weak platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -787,7 +787,7 @@ static int platform_pm_resume(struct device *dev)
return ret;
}
-static int platform_pm_resume_noirq(struct device *dev)
+int __weak platform_pm_resume_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 8546d12..a90e83c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -835,6 +835,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
+ /* let user-space know about the new size */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
set_blocksize(bdev, lo_blocksize);
@@ -858,6 +860,7 @@ out_clr:
set_capacity(lo->lo_disk, 0);
invalidate_bdev(bdev);
bd_set_size(bdev, 0);
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
lo->lo_state = Lo_unbound;
out_putf:
@@ -944,8 +947,11 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
if (bdev)
invalidate_bdev(bdev);
set_capacity(lo->lo_disk, 0);
- if (bdev)
+ if (bdev) {
bd_set_size(bdev, 0);
+ /* let user-space know about this change */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
+ }
mapping_set_gfp_mask(filp->f_mapping, gfp);
lo->lo_state = Lo_unbound;
/* This is safe: open() is still holding a reference. */
@@ -1189,6 +1195,8 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
sz <<= 9;
mutex_lock(&bdev->bd_mutex);
bd_set_size(bdev, sz);
+ /* let user-space know about the new size */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
mutex_unlock(&bdev->bd_mutex);
out:
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 2047275..bed0ba6 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -42,6 +42,8 @@ struct btmrvl_device {
void *card;
struct hci_dev *hcidev;
+ u8 dev_type;
+
u8 tx_dnld_rdy;
u8 psmode;
@@ -88,8 +90,11 @@ struct btmrvl_private {
#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
#define BT_CMD_MODULE_CFG_REQ 0x5B
-/* Sub-commands: Module Bringup/Shutdown Request */
+/* Sub-commands: Module Bringup/Shutdown Request/Response */
#define MODULE_BRINGUP_REQ 0xF1
+#define MODULE_BROUGHT_UP 0x00
+#define MODULE_ALREADY_UP 0x0C
+
#define MODULE_SHUTDOWN_REQ 0xF2
#define BT_EVENT_POWER_STATE 0x20
@@ -123,6 +128,7 @@ struct btmrvl_event {
/* Prototype of global function */
+int btmrvl_register_hdev(struct btmrvl_private *priv);
struct btmrvl_private *btmrvl_add_card(void *card);
int btmrvl_remove_card(struct btmrvl_private *priv);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 53a43ad..ee37ef0 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -66,7 +66,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
{
struct btmrvl_adapter *adapter = priv->adapter;
struct btmrvl_event *event;
- u8 ret = 0;
+ int ret = 0;
event = (struct btmrvl_event *) skb->data;
if (event->ec != 0xff) {
@@ -112,8 +112,17 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
case BT_CMD_MODULE_CFG_REQ:
if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_BRINGUP_REQ) {
- BT_DBG("EVENT:%s", (event->data[2]) ?
- "Bring-up failed" : "Bring-up succeed");
+ BT_DBG("EVENT:%s",
+ ((event->data[2] == MODULE_BROUGHT_UP) ||
+ (event->data[2] == MODULE_ALREADY_UP)) ?
+ "Bring-up succeed" : "Bring-up failed");
+
+ if (event->length > 3)
+ priv->btmrvl_dev.dev_type = event->data[3];
+ else
+ priv->btmrvl_dev.dev_type = HCI_BREDR;
+
+ BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type);
} else if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_SHUTDOWN_REQ) {
BT_DBG("EVENT:%s", (event->data[2]) ?
@@ -522,47 +531,20 @@ static int btmrvl_service_main_thread(void *data)
return 0;
}
-struct btmrvl_private *btmrvl_add_card(void *card)
+int btmrvl_register_hdev(struct btmrvl_private *priv)
{
struct hci_dev *hdev = NULL;
- struct btmrvl_private *priv;
int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- BT_ERR("Can not allocate priv");
- goto err_priv;
- }
-
- priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL);
- if (!priv->adapter) {
- BT_ERR("Allocate buffer for btmrvl_adapter failed!");
- goto err_adapter;
- }
-
- btmrvl_init_adapter(priv);
-
hdev = hci_alloc_dev();
if (!hdev) {
BT_ERR("Can not allocate HCI device");
goto err_hdev;
}
- BT_DBG("Starting kthread...");
- priv->main_thread.priv = priv;
- spin_lock_init(&priv->driver_lock);
-
- init_waitqueue_head(&priv->main_thread.wait_q);
- priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
- &priv->main_thread, "btmrvl_main_service");
-
priv->btmrvl_dev.hcidev = hdev;
- priv->btmrvl_dev.card = card;
-
hdev->driver_data = priv;
- priv->btmrvl_dev.tx_dnld_rdy = true;
-
hdev->bus = HCI_SDIO;
hdev->open = btmrvl_open;
hdev->close = btmrvl_close;
@@ -572,6 +554,10 @@ struct btmrvl_private *btmrvl_add_card(void *card)
hdev->ioctl = btmrvl_ioctl;
hdev->owner = THIS_MODULE;
+ btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+
+ hdev->dev_type = priv->btmrvl_dev.dev_type;
+
ret = hci_register_dev(hdev);
if (ret < 0) {
BT_ERR("Can not register HCI device");
@@ -582,16 +568,52 @@ struct btmrvl_private *btmrvl_add_card(void *card)
btmrvl_debugfs_init(hdev);
#endif
- return priv;
+ return 0;
err_hci_register_dev:
- /* Stop the thread servicing the interrupts */
- kthread_stop(priv->main_thread.task);
-
hci_free_dev(hdev);
err_hdev:
+ /* Stop the thread servicing the interrupts */
+ kthread_stop(priv->main_thread.task);
+
btmrvl_free_adapter(priv);
+ kfree(priv);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(btmrvl_register_hdev);
+
+struct btmrvl_private *btmrvl_add_card(void *card)
+{
+ struct btmrvl_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ BT_ERR("Can not allocate priv");
+ goto err_priv;
+ }
+
+ priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL);
+ if (!priv->adapter) {
+ BT_ERR("Allocate buffer for btmrvl_adapter failed!");
+ goto err_adapter;
+ }
+
+ btmrvl_init_adapter(priv);
+
+ BT_DBG("Starting kthread...");
+ priv->main_thread.priv = priv;
+ spin_lock_init(&priv->driver_lock);
+
+ init_waitqueue_head(&priv->main_thread.wait_q);
+ priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
+ &priv->main_thread, "btmrvl_main_service");
+
+ priv->btmrvl_dev.card = card;
+ priv->btmrvl_dev.tx_dnld_rdy = true;
+
+ return priv;
err_adapter:
kfree(priv);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 0dba76a..df0773e 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -931,7 +931,12 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
priv->hw_host_to_card = btmrvl_sdio_host_to_card;
priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw;
- btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+ if (btmrvl_register_hdev(priv)) {
+ BT_ERR("Register hdev failed!");
+ ret = -ENODEV;
+ goto disable_host_int;
+ }
+
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index c0ce813..3f038f5 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -246,7 +246,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
BT_ERR("Can't allocate mem for new packet");
h4->rx_state = H4_W4_PACKET_TYPE;
h4->rx_count = 0;
- return 0;
+ return -ENOMEM;
}
h4->rx_skb->dev = (void *) hu->hdev;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 5c65014..fb8445c 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -402,7 +402,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_EVENT_HDR:
- eh = (struct hci_event_hdr *) ll->rx_skb->data;
+ eh = hci_event_hdr(ll->rx_skb);
BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
@@ -410,7 +410,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_ACL_HDR:
- ah = (struct hci_acl_hdr *) ll->rx_skb->data;
+ ah = hci_acl_hdr(ll->rx_skb);
dlen = __le16_to_cpu(ah->dlen);
BT_DBG("ACL header: dlen %d", dlen);
@@ -419,7 +419,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_SCO_HDR:
- sh = (struct hci_sco_hdr *) ll->rx_skb->data;
+ sh = hci_sco_hdr(ll->rx_skb);
BT_DBG("SCO header: dlen %d", sh->dlen);
@@ -491,7 +491,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
BT_ERR("Can't allocate mem for new packet");
ll->rx_state = HCILL_W4_PACKET_TYPE;
ll->rx_count = 0;
- return 0;
+ return -ENOMEM;
}
ll->rx_skb->dev = (void *) hu->hdev;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index bb0aefd..3aa7b2a 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -157,7 +157,7 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
break;
case HCI_SCODATA_PKT:
- data->hdev->stat.cmd_tx++;
+ data->hdev->stat.sco_tx++;
break;
};
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3141dd3..e21175b 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -276,11 +276,19 @@ config N_HDLC
Allows synchronous HDLC communications with tty device drivers that
support synchronous HDLC such as the Microgate SyncLink adapter.
- This driver can only be built as a module ( = code which can be
+ This driver can be built as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module will be called n_hdlc. If you want to do that, say M
here.
+config N_GSM
+ tristate "GSM MUX line discipline support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on NET
+ help
+ This line discipline provides support for the GSM MUX protocol and
+ presents the mux as a set of 61 individual tty devices.
+
config RISCOM8
tristate "SDL RISCom/8 card support"
depends on SERIAL_NONSTANDARD
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f957edf..d39be4c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_SYNCLINK) += synclink.o
obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
+obj-$(CONFIG_N_GSM) += n_gsm.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index c1ab303..98310e1 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -1573,11 +1573,16 @@ static int __devinit isicom_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
/* allot the first empty slot in the array */
- for (index = 0; index < BOARD_COUNT; index++)
+ for (index = 0; index < BOARD_COUNT; index++) {
if (isi_card[index].base == 0) {
board = &isi_card[index];
break;
}
+ }
+ if (index == BOARD_COUNT) {
+ retval = -ENODEV;
+ goto err_disable;
+ }
board->index = index;
board->base = pci_resource_start(pdev, 3);
@@ -1624,6 +1629,7 @@ errunrr:
errdec:
board->base = 0;
card_count--;
+err_disable:
pci_disable_device(pdev);
err:
return retval;
diff --git a/drivers/char/n_gsm.c b/drivers/char/n_gsm.c
new file mode 100644
index 0000000..c4161d5
--- /dev/null
+++ b/drivers/char/n_gsm.c
@@ -0,0 +1,2763 @@
+/*
+ * n_gsm.c GSM 0710 tty multiplexor
+ * Copyright (c) 2009/10 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
+ *
+ * TO DO:
+ * Mostly done: ioctls for setting modes/timing
+ * Partly done: hooks so you can pull off frames to non tty devs
+ * Restart DLCI 0 when it closes ?
+ * Test basic encoding
+ * Improve the tx engine
+ * Resolve tx side locking by adding a queue_head and routing
+ * all control traffic via it
+ * General tidy/document
+ * Review the locking/move to refcounts more (mux now moved to an
+ * alloc/free model ready)
+ * Use newest tty open/close port helpers and install hooks
+ * What to do about power functions ?
+ * Termios setting and negotiation
+ * Do we need a 'which mux are you' ioctl to correlate mux and tty sets
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/bitops.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/tty_flip.h>
+#include <linux/tty_driver.h>
+#include <linux/serial.h>
+#include <linux/kfifo.h>
+#include <linux/skbuff.h>
+#include <linux/gsmmux.h>
+
+static int debug;
+module_param(debug, int, 0600);
+
+#define T1 (HZ/10)
+#define T2 (HZ/3)
+#define N2 3
+
+/* Use long timers for testing at low speed with debug on */
+#ifdef DEBUG_TIMING
+#define T1 HZ
+#define T2 (2 * HZ)
+#endif
+
+/* Semi-arbitary buffer size limits. 0710 is normally run with 32-64 byte
+ limits so this is plenty */
+#define MAX_MRU 512
+#define MAX_MTU 512
+
+/*
+ * Each block of data we have queued to go out is in the form of
+ * a gsm_msg which holds everything we need in a link layer independant
+ * format
+ */
+
+struct gsm_msg {
+ struct gsm_msg *next;
+ u8 addr; /* DLCI address + flags */
+ u8 ctrl; /* Control byte + flags */
+ unsigned int len; /* Length of data block (can be zero) */
+ unsigned char *data; /* Points into buffer but not at the start */
+ unsigned char buffer[0];
+};
+
+/*
+ * Each active data link has a gsm_dlci structure associated which ties
+ * the link layer to an optional tty (if the tty side is open). To avoid
+ * complexity right now these are only ever freed up when the mux is
+ * shut down.
+ *
+ * At the moment we don't free DLCI objects until the mux is torn down
+ * this avoid object life time issues but might be worth review later.
+ */
+
+struct gsm_dlci {
+ struct gsm_mux *gsm;
+ int addr;
+ int state;
+#define DLCI_CLOSED 0
+#define DLCI_OPENING 1 /* Sending SABM not seen UA */
+#define DLCI_OPEN 2 /* SABM/UA complete */
+#define DLCI_CLOSING 3 /* Sending DISC not seen UA/DM */
+
+ /* Link layer */
+ spinlock_t lock; /* Protects the internal state */
+ struct timer_list t1; /* Retransmit timer for SABM and UA */
+ int retries;
+ /* Uplink tty if active */
+ struct tty_port port; /* The tty bound to this DLCI if there is one */
+ struct kfifo *fifo; /* Queue fifo for the DLCI */
+ struct kfifo _fifo; /* For new fifo API porting only */
+ int adaption; /* Adaption layer in use */
+ u32 modem_rx; /* Our incoming virtual modem lines */
+ u32 modem_tx; /* Our outgoing modem lines */
+ int dead; /* Refuse re-open */
+ /* Flow control */
+ int throttled; /* Private copy of throttle state */
+ int constipated; /* Throttle status for outgoing */
+ /* Packetised I/O */
+ struct sk_buff *skb; /* Frame being sent */
+ struct sk_buff_head skb_list; /* Queued frames */
+ /* Data handling callback */
+ void (*data)(struct gsm_dlci *dlci, u8 *data, int len);
+};
+
+/* DLCI 0, 62/63 are special or reseved see gsmtty_open */
+
+#define NUM_DLCI 64
+
+/*
+ * DLCI 0 is used to pass control blocks out of band of the data
+ * flow (and with a higher link priority). One command can be outstanding
+ * at a time and we use this structure to manage them. They are created
+ * and destroyed by the user context, and updated by the receive paths
+ * and timers
+ */
+
+struct gsm_control {
+ u8 cmd; /* Command we are issuing */
+ u8 *data; /* Data for the command in case we retransmit */
+ int len; /* Length of block for retransmission */
+ int done; /* Done flag */
+ int error; /* Error if any */
+};
+
+/*
+ * Each GSM mux we have is represented by this structure. If we are
+ * operating as an ldisc then we use this structure as our ldisc
+ * state. We need to sort out lifetimes and locking with respect
+ * to the gsm mux array. For now we don't free DLCI objects that
+ * have been instantiated until the mux itself is terminated.
+ *
+ * To consider further: tty open versus mux shutdown.
+ */
+
+struct gsm_mux {
+ struct tty_struct *tty; /* The tty our ldisc is bound to */
+ spinlock_t lock;
+
+ /* Events on the GSM channel */
+ wait_queue_head_t event;
+
+ /* Bits for GSM mode decoding */
+
+ /* Framing Layer */
+ unsigned char *buf;
+ int state;
+#define GSM_SEARCH 0
+#define GSM_START 1
+#define GSM_ADDRESS 2
+#define GSM_CONTROL 3
+#define GSM_LEN 4
+#define GSM_DATA 5
+#define GSM_FCS 6
+#define GSM_OVERRUN 7
+ unsigned int len;
+ unsigned int address;
+ unsigned int count;
+ int escape;
+ int encoding;
+ u8 control;
+ u8 fcs;
+ u8 *txframe; /* TX framing buffer */
+
+ /* Methods for the receiver side */
+ void (*receive)(struct gsm_mux *gsm, u8 ch);
+ void (*error)(struct gsm_mux *gsm, u8 ch, u8 flag);
+ /* And transmit side */
+ int (*output)(struct gsm_mux *mux, u8 *data, int len);
+
+ /* Link Layer */
+ unsigned int mru;
+ unsigned int mtu;
+ int initiator; /* Did we initiate connection */
+ int dead; /* Has the mux been shut down */
+ struct gsm_dlci *dlci[NUM_DLCI];
+ int constipated; /* Asked by remote to shut up */
+
+ spinlock_t tx_lock;
+ unsigned int tx_bytes; /* TX data outstanding */
+#define TX_THRESH_HI 8192
+#define TX_THRESH_LO 2048
+ struct gsm_msg *tx_head; /* Pending data packets */
+ struct gsm_msg *tx_tail;
+
+ /* Control messages */
+ struct timer_list t2_timer; /* Retransmit timer for commands */
+ int cretries; /* Command retry counter */
+ struct gsm_control *pending_cmd;/* Our current pending command */
+ spinlock_t control_lock; /* Protects the pending command */
+
+ /* Configuration */
+ int adaption; /* 1 or 2 supported */
+ u8 ftype; /* UI or UIH */
+ int t1, t2; /* Timers in 1/100th of a sec */
+ int n2; /* Retry count */
+
+ /* Statistics (not currently exposed) */
+ unsigned long bad_fcs;
+ unsigned long malformed;
+ unsigned long io_error;
+ unsigned long bad_size;
+ unsigned long unsupported;
+};
+
+
+/*
+ * Mux objects - needed so that we can translate a tty index into the
+ * relevant mux and DLCI.
+ */
+
+#define MAX_MUX 4 /* 256 minors */
+static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */
+static spinlock_t gsm_mux_lock;
+
+/*
+ * This section of the driver logic implements the GSM encodings
+ * both the basic and the 'advanced'. Reliable transport is not
+ * supported.
+ */
+
+#define CR 0x02
+#define EA 0x01
+#define PF 0x10
+
+/* I is special: the rest are ..*/
+#define RR 0x01
+#define UI 0x03
+#define RNR 0x05
+#define REJ 0x09
+#define DM 0x0F
+#define SABM 0x2F
+#define DISC 0x43
+#define UA 0x63
+#define UIH 0xEF
+
+/* Channel commands */
+#define CMD_NSC 0x09
+#define CMD_TEST 0x11
+#define CMD_PSC 0x21
+#define CMD_RLS 0x29
+#define CMD_FCOFF 0x31
+#define CMD_PN 0x41
+#define CMD_RPN 0x49
+#define CMD_FCON 0x51
+#define CMD_CLD 0x61
+#define CMD_SNC 0x69
+#define CMD_MSC 0x71
+
+/* Virtual modem bits */
+#define MDM_FC 0x01
+#define MDM_RTC 0x02
+#define MDM_RTR 0x04
+#define MDM_IC 0x20
+#define MDM_DV 0x40
+
+#define GSM0_SOF 0xF9
+#define GSM1_SOF 0x7E
+#define GSM1_ESCAPE 0x7D
+#define GSM1_ESCAPE_BITS 0x20
+#define XON 0x11
+#define XOFF 0x13
+
+static const struct tty_port_operations gsm_port_ops;
+
+/*
+ * CRC table for GSM 0710
+ */
+
+static const u8 gsm_fcs8[256] = {
+ 0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75,
+ 0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B,
+ 0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69,
+ 0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67,
+ 0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D,
+ 0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43,
+ 0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51,
+ 0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F,
+ 0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05,
+ 0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B,
+ 0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19,
+ 0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17,
+ 0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D,
+ 0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33,
+ 0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21,
+ 0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F,
+ 0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95,
+ 0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B,
+ 0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89,
+ 0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87,
+ 0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD,
+ 0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3,
+ 0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1,
+ 0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF,
+ 0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5,
+ 0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB,
+ 0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9,
+ 0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7,
+ 0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD,
+ 0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3,
+ 0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1,
+ 0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF
+};
+
+#define INIT_FCS 0xFF
+#define GOOD_FCS 0xCF
+
+/**
+ * gsm_fcs_add - update FCS
+ * @fcs: Current FCS
+ * @c: Next data
+ *
+ * Update the FCS to include c. Uses the algorithm in the specification
+ * notes.
+ */
+
+static inline u8 gsm_fcs_add(u8 fcs, u8 c)
+{
+ return gsm_fcs8[fcs ^ c];
+}
+
+/**
+ * gsm_fcs_add_block - update FCS for a block
+ * @fcs: Current FCS
+ * @c: buffer of data
+ * @len: length of buffer
+ *
+ * Update the FCS to include c. Uses the algorithm in the specification
+ * notes.
+ */
+
+static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len)
+{
+ while (len--)
+ fcs = gsm_fcs8[fcs ^ *c++];
+ return fcs;
+}
+
+/**
+ * gsm_read_ea - read a byte into an EA
+ * @val: variable holding value
+ * c: byte going into the EA
+ *
+ * Processes one byte of an EA. Updates the passed variable
+ * and returns 1 if the EA is now completely read
+ */
+
+static int gsm_read_ea(unsigned int *val, u8 c)
+{
+ /* Add the next 7 bits into the value */
+ *val <<= 7;
+ *val |= c >> 1;
+ /* Was this the last byte of the EA 1 = yes*/
+ return c & EA;
+}
+
+/**
+ * gsm_encode_modem - encode modem data bits
+ * @dlci: DLCI to encode from
+ *
+ * Returns the correct GSM encoded modem status bits (6 bit field) for
+ * the current status of the DLCI and attached tty object
+ */
+
+static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
+{
+ u8 modembits = 0;
+ /* FC is true flow control not modem bits */
+ if (dlci->throttled)
+ modembits |= MDM_FC;
+ if (dlci->modem_tx & TIOCM_DTR)
+ modembits |= MDM_RTC;
+ if (dlci->modem_tx & TIOCM_RTS)
+ modembits |= MDM_RTR;
+ if (dlci->modem_tx & TIOCM_RI)
+ modembits |= MDM_IC;
+ if (dlci->modem_tx & TIOCM_CD)
+ modembits |= MDM_DV;
+ return modembits;
+}
+
+/**
+ * gsm_print_packet - display a frame for debug
+ * @hdr: header to print before decode
+ * @addr: address EA from the frame
+ * @cr: C/R bit from the frame
+ * @control: control including PF bit
+ * @data: following data bytes
+ * @dlen: length of data
+ *
+ * Displays a packet in human readable format for debugging purposes. The
+ * style is based on amateur radio LAP-B dump display.
+ */
+
+static void gsm_print_packet(const char *hdr, int addr, int cr,
+ u8 control, const u8 *data, int dlen)
+{
+ if (!(debug & 1))
+ return;
+
+ printk(KERN_INFO "%s %d) %c: ", hdr, addr, "RC"[cr]);
+
+ switch (control & ~PF) {
+ case SABM:
+ printk(KERN_CONT "SABM");
+ break;
+ case UA:
+ printk(KERN_CONT "UA");
+ break;
+ case DISC:
+ printk(KERN_CONT "DISC");
+ break;
+ case DM:
+ printk(KERN_CONT "DM");
+ break;
+ case UI:
+ printk(KERN_CONT "UI");
+ break;
+ case UIH:
+ printk(KERN_CONT "UIH");
+ break;
+ default:
+ if (!(control & 0x01)) {
+ printk(KERN_CONT "I N(S)%d N(R)%d",
+ (control & 0x0E) >> 1, (control & 0xE)>> 5);
+ } else switch (control & 0x0F) {
+ case RR:
+ printk("RR(%d)", (control & 0xE0) >> 5);
+ break;
+ case RNR:
+ printk("RNR(%d)", (control & 0xE0) >> 5);
+ break;
+ case REJ:
+ printk("REJ(%d)", (control & 0xE0) >> 5);
+ break;
+ default:
+ printk(KERN_CONT "[%02X]", control);
+ }
+ }
+
+ if (control & PF)
+ printk(KERN_CONT "(P)");
+ else
+ printk(KERN_CONT "(F)");
+
+ if (dlen) {
+ int ct = 0;
+ while (dlen--) {
+ if (ct % 8 == 0)
+ printk(KERN_CONT "\n ");
+ printk(KERN_CONT "%02X ", *data++);
+ ct++;
+ }
+ }
+ printk(KERN_CONT "\n");
+}
+
+
+/*
+ * Link level transmission side
+ */
+
+/**
+ * gsm_stuff_packet - bytestuff a packet
+ * @ibuf: input
+ * @obuf: output
+ * @len: length of input
+ *
+ * Expand a buffer by bytestuffing it. The worst case size change
+ * is doubling and the caller is responsible for handing out
+ * suitable sized buffers.
+ */
+
+static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
+{
+ int olen = 0;
+ while (len--) {
+ if (*input == GSM1_SOF || *input == GSM1_ESCAPE
+ || *input == XON || *input == XOFF) {
+ *output++ = GSM1_ESCAPE;
+ *output++ = *input++ ^ GSM1_ESCAPE_BITS;
+ olen++;
+ } else
+ *output++ = *input++;
+ olen++;
+ }
+ return olen;
+}
+
+static void hex_packet(const unsigned char *p, int len)
+{
+ int i;
+ for (i = 0; i < len; i++) {
+ if (i && (i % 16) == 0)
+ printk("\n");
+ printk("%02X ", *p++);
+ }
+ printk("\n");
+}
+
+/**
+ * gsm_send - send a control frame
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @cr: command/response bit
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a control frame. These do not go via the
+ * queueing logic as they should be transmitted ahead of data when
+ * they are needed.
+ *
+ * FIXME: Lock versus data TX path
+ */
+
+static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
+{
+ int len;
+ u8 cbuf[10];
+ u8 ibuf[3];
+
+ switch (gsm->encoding) {
+ case 0:
+ cbuf[0] = GSM0_SOF;
+ cbuf[1] = (addr << 2) | (cr << 1) | EA;
+ cbuf[2] = control;
+ cbuf[3] = EA; /* Length of data = 0 */
+ cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
+ cbuf[5] = GSM0_SOF;
+ len = 6;
+ break;
+ case 1:
+ case 2:
+ /* Control frame + packing (but not frame stuffing) in mode 1 */
+ ibuf[0] = (addr << 2) | (cr << 1) | EA;
+ ibuf[1] = control;
+ ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
+ /* Stuffing may double the size worst case */
+ len = gsm_stuff_frame(ibuf, cbuf + 1, 3);
+ /* Now add the SOF markers */
+ cbuf[0] = GSM1_SOF;
+ cbuf[len + 1] = GSM1_SOF;
+ /* FIXME: we can omit the lead one in many cases */
+ len += 2;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ gsm->output(gsm, cbuf, len);
+ gsm_print_packet("-->", addr, cr, control, NULL, 0);
+}
+
+/**
+ * gsm_response - send a control response
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a link level response frame.
+ */
+
+static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
+{
+ gsm_send(gsm, addr, 0, control);
+}
+
+/**
+ * gsm_command - send a control command
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a link level command frame.
+ */
+
+static inline void gsm_command(struct gsm_mux *gsm, int addr, int control)
+{
+ gsm_send(gsm, addr, 1, control);
+}
+
+/* Data transmission */
+
+#define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */
+
+/**
+ * gsm_data_alloc - allocate data frame
+ * @gsm: GSM mux
+ * @addr: DLCI address
+ * @len: length excluding header and FCS
+ * @ctrl: control byte
+ *
+ * Allocate a new data buffer for sending frames with data. Space is left
+ * at the front for header bytes but that is treated as an implementation
+ * detail and not for the high level code to use
+ */
+
+static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ u8 ctrl)
+{
+ struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN,
+ GFP_ATOMIC);
+ if (m == NULL)
+ return NULL;
+ m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */
+ m->len = len;
+ m->addr = addr;
+ m->ctrl = ctrl;
+ m->next = NULL;
+ return m;
+}
+
+/**
+ * gsm_data_kick - poke the queue
+ * @gsm: GSM Mux
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any
+ *
+ * FIXME: lock against link layer control transmissions
+ */
+
+static void gsm_data_kick(struct gsm_mux *gsm)
+{
+ struct gsm_msg *msg = gsm->tx_head;
+ int len;
+ int skip_sof = 0;
+
+ /* FIXME: We need to apply this solely to data messages */
+ if (gsm->constipated)
+ return;
+
+ while (gsm->tx_head != NULL) {
+ msg = gsm->tx_head;
+ if (gsm->encoding != 0) {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data,
+ gsm->txframe + 1, msg->len);
+ gsm->txframe[len + 1] = GSM1_SOF;
+ len += 2;
+ } else {
+ gsm->txframe[0] = GSM0_SOF;
+ memcpy(gsm->txframe + 1 , msg->data, msg->len);
+ gsm->txframe[msg->len + 1] = GSM0_SOF;
+ len = msg->len + 2;
+ }
+
+ if (debug & 4) {
+ printk("gsm_data_kick: \n");
+ hex_packet(gsm->txframe, len);
+ }
+
+ if (gsm->output(gsm, gsm->txframe + skip_sof,
+ len - skip_sof) < 0)
+ break;
+ /* FIXME: Can eliminate one SOF in many more cases */
+ gsm->tx_head = msg->next;
+ if (gsm->tx_head == NULL)
+ gsm->tx_tail = NULL;
+ gsm->tx_bytes -= msg->len;
+ kfree(msg);
+ /* For a burst of frames skip the extra SOF within the
+ burst */
+ skip_sof = 1;
+ }
+}
+
+/**
+ * __gsm_data_queue - queue a UI or UIH frame
+ * @dlci: DLCI sending the data
+ * @msg: message queued
+ *
+ * Add data to the transmit queue and try and get stuff moving
+ * out of the mux tty if not already doing so. The Caller must hold
+ * the gsm tx lock.
+ */
+
+static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ u8 *dp = msg->data;
+ u8 *fcs = dp + msg->len;
+
+ /* Fill in the header */
+ if (gsm->encoding == 0) {
+ if (msg->len < 128)
+ *--dp = (msg->len << 1) | EA;
+ else {
+ *--dp = (msg->len >> 6) | EA;
+ *--dp = (msg->len & 127) << 1;
+ }
+ }
+
+ *--dp = msg->ctrl;
+ if (gsm->initiator)
+ *--dp = (msg->addr << 2) | 2 | EA;
+ else
+ *--dp = (msg->addr << 2) | EA;
+ *fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp);
+ /* Ugly protocol layering violation */
+ if (msg->ctrl == UI || msg->ctrl == (UI|PF))
+ *fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len);
+ *fcs = 0xFF - *fcs;
+
+ gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl,
+ msg->data, msg->len);
+
+ /* Move the header back and adjust the length, also allow for the FCS
+ now tacked on the end */
+ msg->len += (msg->data - dp) + 1;
+ msg->data = dp;
+
+ /* Add to the actual output queue */
+ if (gsm->tx_tail)
+ gsm->tx_tail->next = msg;
+ else
+ gsm->tx_head = msg;
+ gsm->tx_tail = msg;
+ gsm->tx_bytes += msg->len;
+ gsm_data_kick(gsm);
+}
+
+/**
+ * gsm_data_queue - queue a UI or UIH frame
+ * @dlci: DLCI sending the data
+ * @msg: message queued
+ *
+ * Add data to the transmit queue and try and get stuff moving
+ * out of the mux tty if not already doing so. Take the
+ * the gsm tx lock and dlci lock.
+ */
+
+static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ __gsm_data_queue(dlci, msg);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+}
+
+/**
+ * gsm_dlci_data_output - try and push data out of a DLCI
+ * @gsm: mux
+ * @dlci: the DLCI to pull data from
+ *
+ * Pull data from a DLCI and send it into the transmit queue if there
+ * is data. Keep to the MRU of the mux. This path handles the usual tty
+ * interface which is a byte stream with optional modem data.
+ *
+ * Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg;
+ u8 *dp;
+ int len, size;
+ int h = dlci->adaption - 1;
+
+ len = kfifo_len(dlci->fifo);
+ if (len == 0)
+ return 0;
+
+ /* MTU/MRU count only the data bits */
+ if (len > gsm->mtu)
+ len = gsm->mtu;
+
+ size = len + h;
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+ if (msg == NULL)
+ return -ENOMEM;
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructed with modem bits. Always one byte as we never
+ send inline break data */
+ *dp += gsm_encode_modem(dlci);
+ len--;
+ break;
+ }
+ WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len);
+ __gsm_data_queue(dlci, msg);
+ /* Bytes of data we used up */
+ return size;
+}
+
+/**
+ * gsm_dlci_data_output_framed - try and push data out of a DLCI
+ * @gsm: mux
+ * @dlci: the DLCI to pull data from
+ *
+ * Pull data from a DLCI and send it into the transmit queue if there
+ * is data. Keep to the MRU of the mux. This path handles framed data
+ * queued as skbuffs to the DLCI.
+ *
+ * Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+ struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg;
+ u8 *dp;
+ int len, size;
+ int last = 0, first = 0;
+ int overhead = 0;
+
+ /* One byte per frame is used for B/F flags */
+ if (dlci->adaption == 4)
+ overhead = 1;
+
+ /* dlci->skb is locked by tx_lock */
+ if (dlci->skb == NULL) {
+ dlci->skb = skb_dequeue(&dlci->skb_list);
+ if (dlci->skb == NULL)
+ return 0;
+ first = 1;
+ }
+ len = dlci->skb->len + overhead;
+
+ /* MTU/MRU count only the data bits */
+ if (len > gsm->mtu) {
+ if (dlci->adaption == 3) {
+ /* Over long frame, bin it */
+ kfree_skb(dlci->skb);
+ dlci->skb = NULL;
+ return 0;
+ }
+ len = gsm->mtu;
+ } else
+ last = 1;
+
+ size = len + overhead;
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+ if (msg == NULL)
+ return -ENOMEM;
+ dp = msg->data;
+
+ if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
+ /* Flag byte to carry the start/end info */
+ *dp++ = last << 7 | first << 6 | 1; /* EA */
+ len--;
+ }
+ memcpy(dp, skb_pull(dlci->skb, len), len);
+ __gsm_data_queue(dlci, msg);
+ if (last)
+ dlci->skb = NULL;
+ return size;
+}
+
+/**
+ * gsm_dlci_data_sweep - look for data to send
+ * @gsm: the GSM mux
+ *
+ * Sweep the GSM mux channels in priority order looking for ones with
+ * data to send. We could do with optimising this scan a bit. We aim
+ * to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit
+ * TX_THRESH_LO we get called again
+ *
+ * FIXME: We should round robin between groups and in theory you can
+ * renegotiate DLCI priorities with optional stuff. Needs optimising.
+ */
+
+static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+{
+ int len;
+ /* Priority ordering: We should do priority with RR of the groups */
+ int i = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ while (i < NUM_DLCI) {
+ struct gsm_dlci *dlci;
+
+ if (gsm->tx_bytes > TX_THRESH_HI)
+ break;
+ dlci = gsm->dlci[i];
+ if (dlci == NULL || dlci->constipated) {
+ i++;
+ continue;
+ }
+ if (dlci->adaption < 3)
+ len = gsm_dlci_data_output(gsm, dlci);
+ else
+ len = gsm_dlci_data_output_framed(gsm, dlci);
+ if (len < 0)
+ return;
+ /* DLCI empty - try the next */
+ if (len == 0)
+ i++;
+ }
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+}
+
+/**
+ * gsm_dlci_data_kick - transmit if possible
+ * @dlci: DLCI to kick
+ *
+ * Transmit data from this DLCI if the queue is empty. We can't rely on
+ * a tty wakeup except when we filled the pipe so we need to fire off
+ * new data ourselves in other cases.
+ */
+
+static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
+ if (dlci->gsm->tx_bytes == 0)
+ gsm_dlci_data_output(dlci->gsm, dlci);
+ else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
+ gsm_dlci_data_sweep(dlci->gsm);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+}
+
+/*
+ * Control message processing
+ */
+
+
+/**
+ * gsm_control_reply - send a response frame to a control
+ * @gsm: gsm channel
+ * @cmd: the command to use
+ * @data: data to follow encoded info
+ * @dlen: length of data
+ *
+ * Encode up and queue a UI/UIH frame containing our response.
+ */
+
+static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
+ int dlen)
+{
+ struct gsm_msg *msg;
+ msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype);
+ msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */
+ msg->data[1] = (dlen << 1) | EA;
+ memcpy(msg->data + 2, data, dlen);
+ gsm_data_queue(gsm->dlci[0], msg);
+}
+
+/**
+ * gsm_process_modem - process received modem status
+ * @tty: virtual tty bound to the DLCI
+ * @dlci: DLCI to affect
+ * @modem: modem bits (full EA)
+ *
+ * Used when a modem control message or line state inline in adaption
+ * layer 2 is processed. Sort out the local modem state and throttles
+ */
+
+static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+ u32 modem)
+{
+ int mlines = 0;
+ u8 brk = modem >> 6;
+
+ /* Flow control/ready to communicate */
+ if (modem & MDM_FC) {
+ /* Need to throttle our output on this device */
+ dlci->constipated = 1;
+ }
+ if (modem & MDM_RTC) {
+ mlines |= TIOCM_DSR | TIOCM_DTR;
+ dlci->constipated = 0;
+ gsm_dlci_data_kick(dlci);
+ }
+ /* Map modem bits */
+ if (modem & MDM_RTR)
+ mlines |= TIOCM_RTS | TIOCM_CTS;
+ if (modem & MDM_IC)
+ mlines |= TIOCM_RI;
+ if (modem & MDM_DV)
+ mlines |= TIOCM_CD;
+
+ /* Carrier drop -> hangup */
+ if (tty) {
+ if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
+ if (!(tty->termios->c_cflag & CLOCAL))
+ tty_hangup(tty);
+ if (brk & 0x01)
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ }
+ dlci->modem_rx = mlines;
+}
+
+/**
+ * gsm_control_modem - modem status received
+ * @gsm: GSM channel
+ * @data: data following command
+ * @clen: command length
+ *
+ * We have received a modem status control message. This is used by
+ * the GSM mux protocol to pass virtual modem line status and optionally
+ * to indicate break signals. Unpack it, convert to Linux representation
+ * and if need be stuff a break message down the tty.
+ */
+
+static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
+{
+ unsigned int addr = 0;
+ unsigned int modem = 0;
+ struct gsm_dlci *dlci;
+ int len = clen;
+ u8 *dp = data;
+ struct tty_struct *tty;
+
+ while (gsm_read_ea(&addr, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ /* Must be at least one byte following the EA */
+ len--;
+ if (len <= 0)
+ return;
+
+ addr >>= 1;
+ /* Closed port, or invalid ? */
+ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
+ return;
+ dlci = gsm->dlci[addr];
+
+ while (gsm_read_ea(&modem, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ tty = tty_port_tty_get(&dlci->port);
+ gsm_process_modem(tty, dlci, modem);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+ gsm_control_reply(gsm, CMD_MSC, data, clen);
+}
+
+/**
+ * gsm_control_rls - remote line status
+ * @gsm: GSM channel
+ * @data: data bytes
+ * @clen: data length
+ *
+ * The modem sends us a two byte message on the control channel whenever
+ * it wishes to send us an error state from the virtual link. Stuff
+ * this into the uplink tty if present
+ */
+
+static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
+{
+ struct tty_struct *tty;
+ unsigned int addr = 0 ;
+ u8 bits;
+ int len = clen;
+ u8 *dp = data;
+
+ while (gsm_read_ea(&addr, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ /* Must be at least one byte following ea */
+ len--;
+ if (len <= 0)
+ return;
+ addr >>= 1;
+ /* Closed port, or invalid ? */
+ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
+ return;
+ /* No error ? */
+ bits = *dp;
+ if ((bits & 1) == 0)
+ return;
+ /* See if we have an uplink tty */
+ tty = tty_port_tty_get(&gsm->dlci[addr]->port);
+
+ if (tty) {
+ if (bits & 2)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ if (bits & 4)
+ tty_insert_flip_char(tty, 0, TTY_PARITY);
+ if (bits & 8)
+ tty_insert_flip_char(tty, 0, TTY_FRAME);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ }
+ gsm_control_reply(gsm, CMD_RLS, data, clen);
+}
+
+static void gsm_dlci_begin_close(struct gsm_dlci *dlci);
+
+/**
+ * gsm_control_message - DLCI 0 control processing
+ * @gsm: our GSM mux
+ * @command: the command EA
+ * @data: data beyond the command/length EAs
+ * @clen: length
+ *
+ * Input processor for control messages from the other end of the link.
+ * Processes the incoming request and queues a response frame or an
+ * NSC response if not supported
+ */
+
+static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+{
+ u8 buf[1];
+ switch (command) {
+ case CMD_CLD: {
+ struct gsm_dlci *dlci = gsm->dlci[0];
+ /* Modem wishes to close down */
+ if (dlci) {
+ dlci->dead = 1;
+ gsm->dead = 1;
+ gsm_dlci_begin_close(dlci);
+ }
+ }
+ break;
+ case CMD_TEST:
+ /* Modem wishes to test, reply with the data */
+ gsm_control_reply(gsm, CMD_TEST, data, clen);
+ break;
+ case CMD_FCON:
+ /* Modem wants us to STFU */
+ gsm->constipated = 1;
+ gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+ break;
+ case CMD_FCOFF:
+ /* Modem can accept data again */
+ gsm->constipated = 0;
+ gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
+ /* Kick the link in case it is idling */
+ gsm_data_kick(gsm);
+ break;
+ case CMD_MSC:
+ /* Out of band modem line change indicator for a DLCI */
+ gsm_control_modem(gsm, data, clen);
+ break;
+ case CMD_RLS:
+ /* Out of band error reception for a DLCI */
+ gsm_control_rls(gsm, data, clen);
+ break;
+ case CMD_PSC:
+ /* Modem wishes to enter power saving state */
+ gsm_control_reply(gsm, CMD_PSC, NULL, 0);
+ break;
+ /* Optional unsupported commands */
+ case CMD_PN: /* Parameter negotiation */
+ case CMD_RPN: /* Remote port negotation */
+ case CMD_SNC: /* Service negotation command */
+ default:
+ /* Reply to bad commands with an NSC */
+ buf[0] = command;
+ gsm_control_reply(gsm, CMD_NSC, buf, 1);
+ break;
+ }
+}
+
+/**
+ * gsm_control_response - process a response to our control
+ * @gsm: our GSM mux
+ * @command: the command (response) EA
+ * @data: data beyond the command/length EA
+ * @clen: length
+ *
+ * Process a response to an outstanding command. We only allow a single
+ * control message in flight so this is fairly easy. All the clean up
+ * is done by the caller, we just update the fields, flag it as done
+ * and return
+ */
+
+static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+{
+ struct gsm_control *ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm->control_lock, flags);
+
+ ctrl = gsm->pending_cmd;
+ /* Does the reply match our command */
+ command |= 1;
+ if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) {
+ /* Our command was replied to, kill the retry timer */
+ del_timer(&gsm->t2_timer);
+ gsm->pending_cmd = NULL;
+ /* Rejected by the other end */
+ if (command == CMD_NSC)
+ ctrl->error = -EOPNOTSUPP;
+ ctrl->done = 1;
+ wake_up(&gsm->event);
+ }
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ * gsm_control_transmit - send control packet
+ * @gsm: gsm mux
+ * @ctrl: frame to send
+ *
+ * Send out a pending control command (called under control lock)
+ */
+
+static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
+{
+ struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1,
+ gsm->ftype|PF);
+ if (msg == NULL)
+ return;
+ msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */
+ memcpy(msg->data + 1, ctrl->data, ctrl->len);
+ gsm_data_queue(gsm->dlci[0], msg);
+}
+
+/**
+ * gsm_control_retransmit - retransmit a control frame
+ * @data: pointer to our gsm object
+ *
+ * Called off the T2 timer expiry in order to retransmit control frames
+ * that have been lost in the system somewhere. The control_lock protects
+ * us from colliding with another sender or a receive completion event.
+ * In that situation the timer may still occur in a small window but
+ * gsm->pending_cmd will be NULL and we just let the timer expire.
+ */
+
+static void gsm_control_retransmit(unsigned long data)
+{
+ struct gsm_mux *gsm = (struct gsm_mux *)data;
+ struct gsm_control *ctrl;
+ unsigned long flags;
+ spin_lock_irqsave(&gsm->control_lock, flags);
+ ctrl = gsm->pending_cmd;
+ if (ctrl) {
+ gsm->cretries--;
+ if (gsm->cretries == 0) {
+ gsm->pending_cmd = NULL;
+ ctrl->error = -ETIMEDOUT;
+ ctrl->done = 1;
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ wake_up(&gsm->event);
+ return;
+ }
+ gsm_control_transmit(gsm, ctrl);
+ mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+ }
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ * gsm_control_send - send a control frame on DLCI 0
+ * @gsm: the GSM channel
+ * @command: command to send including CR bit
+ * @data: bytes of data (must be kmalloced)
+ * @len: length of the block to send
+ *
+ * Queue and dispatch a control command. Only one command can be
+ * active at a time. In theory more can be outstanding but the matching
+ * gets really complicated so for now stick to one outstanding.
+ */
+
+static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
+ unsigned int command, u8 *data, int clen)
+{
+ struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
+ GFP_KERNEL);
+ unsigned long flags;
+ if (ctrl == NULL)
+ return NULL;
+retry:
+ wait_event(gsm->event, gsm->pending_cmd == NULL);
+ spin_lock_irqsave(&gsm->control_lock, flags);
+ if (gsm->pending_cmd != NULL) {
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ goto retry;
+ }
+ ctrl->cmd = command;
+ ctrl->data = data;
+ ctrl->len = clen;
+ gsm->pending_cmd = ctrl;
+ gsm->cretries = gsm->n2;
+ mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+ gsm_control_transmit(gsm, ctrl);
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ return ctrl;
+}
+
+/**
+ * gsm_control_wait - wait for a control to finish
+ * @gsm: GSM mux
+ * @control: control we are waiting on
+ *
+ * Waits for the control to complete or time out. Frees any used
+ * resources and returns 0 for success, or an error if the remote
+ * rejected or ignored the request.
+ */
+
+static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
+{
+ int err;
+ wait_event(gsm->event, control->done == 1);
+ err = control->error;
+ kfree(control);
+ return err;
+}
+
+
+/*
+ * DLCI level handling: Needs krefs
+ */
+
+/*
+ * State transitions and timers
+ */
+
+/**
+ * gsm_dlci_close - a DLCI has closed
+ * @dlci: DLCI that closed
+ *
+ * Perform processing when moving a DLCI into closed state. If there
+ * is an attached tty this is hung up
+ */
+
+static void gsm_dlci_close(struct gsm_dlci *dlci)
+{
+ del_timer(&dlci->t1);
+ if (debug & 8)
+ printk("DLCI %d goes closed.\n", dlci->addr);
+ dlci->state = DLCI_CLOSED;
+ if (dlci->addr != 0) {
+ struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ kfifo_reset(dlci->fifo);
+ } else
+ dlci->gsm->dead = 1;
+ wake_up(&dlci->gsm->event);
+ /* A DLCI 0 close is a MUX termination so we need to kick that
+ back to userspace somehow */
+}
+
+/**
+ * gsm_dlci_open - a DLCI has opened
+ * @dlci: DLCI that opened
+ *
+ * Perform processing when moving a DLCI into open state.
+ */
+
+static void gsm_dlci_open(struct gsm_dlci *dlci)
+{
+ /* Note that SABM UA .. SABM UA first UA lost can mean that we go
+ open -> open */
+ del_timer(&dlci->t1);
+ /* This will let a tty open continue */
+ dlci->state = DLCI_OPEN;
+ if (debug & 8)
+ printk("DLCI %d goes open.\n", dlci->addr);
+ wake_up(&dlci->gsm->event);
+}
+
+/**
+ * gsm_dlci_t1 - T1 timer expiry
+ * @dlci: DLCI that opened
+ *
+ * The T1 timer handles retransmits of control frames (essentially of
+ * SABM and DISC). We resend the command until the retry count runs out
+ * in which case an opening port goes back to closed and a closing port
+ * is simply put into closed state (any further frames from the other
+ * end will get a DM response)
+ */
+
+static void gsm_dlci_t1(unsigned long data)
+{
+ struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+ struct gsm_mux *gsm = dlci->gsm;
+
+ switch (dlci->state) {
+ case DLCI_OPENING:
+ dlci->retries--;
+ if (dlci->retries) {
+ gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+ } else
+ gsm_dlci_close(dlci);
+ break;
+ case DLCI_CLOSING:
+ dlci->retries--;
+ if (dlci->retries) {
+ gsm_command(dlci->gsm, dlci->addr, DISC|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+ } else
+ gsm_dlci_close(dlci);
+ break;
+ }
+}
+
+/**
+ * gsm_dlci_begin_open - start channel open procedure
+ * @dlci: DLCI to open
+ *
+ * Commence opening a DLCI from the Linux side. We issue SABM messages
+ * to the modem which should then reply with a UA, at which point we
+ * will move into open state. Opening is done asynchronously with retry
+ * running off timers and the responses.
+ */
+
+static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ if (dlci->state == DLCI_OPEN || dlci->state == DLCI_OPENING)
+ return;
+ dlci->retries = gsm->n2;
+ dlci->state = DLCI_OPENING;
+ gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+}
+
+/**
+ * gsm_dlci_begin_close - start channel open procedure
+ * @dlci: DLCI to open
+ *
+ * Commence closing a DLCI from the Linux side. We issue DISC messages
+ * to the modem which should then reply with a UA, at which point we
+ * will move into closed state. Closing is done asynchronously with retry
+ * off timers. We may also receive a DM reply from the other end which
+ * indicates the channel was already closed.
+ */
+
+static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING)
+ return;
+ dlci->retries = gsm->n2;
+ dlci->state = DLCI_CLOSING;
+ gsm_command(dlci->gsm, dlci->addr, DISC|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+}
+
+/**
+ * gsm_dlci_data - data arrived
+ * @dlci: channel
+ * @data: block of bytes received
+ * @len: length of received block
+ *
+ * A UI or UIH frame has arrived which contains data for a channel
+ * other than the control channel. If the relevant virtual tty is
+ * open we shovel the bits down it, if not we drop them.
+ */
+
+static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
+{
+ /* krefs .. */
+ struct tty_port *port = &dlci->port;
+ struct tty_struct *tty = tty_port_tty_get(port);
+ unsigned int modem = 0;
+
+ if (debug & 16)
+ printk("%d bytes for tty %p\n", len, tty);
+ if (tty) {
+ switch (dlci->adaption) {
+ /* Unsupported types */
+ /* Packetised interruptible data */
+ case 4:
+ break;
+ /* Packetised uininterruptible voice/data */
+ case 3:
+ break;
+ /* Asynchronous serial with line state in each frame */
+ case 2:
+ while (gsm_read_ea(&modem, *data++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ gsm_process_modem(tty, dlci, modem);
+ /* Line state will go via DLCI 0 controls only */
+ case 1:
+ default:
+ tty_insert_flip_string(tty, data, len);
+ tty_flip_buffer_push(tty);
+ }
+ tty_kref_put(tty);
+ }
+}
+
+/**
+ * gsm_dlci_control - data arrived on control channel
+ * @dlci: channel
+ * @data: block of bytes received
+ * @len: length of received block
+ *
+ * A UI or UIH frame has arrived which contains data for DLCI 0 the
+ * control channel. This should contain a command EA followed by
+ * control data bytes. The command EA contains a command/response bit
+ * and we divide up the work accordingly.
+ */
+
+static void gsm_dlci_command(struct gsm_dlci *dlci, u8 *data, int len)
+{
+ /* See what command is involved */
+ unsigned int command = 0;
+ while (len-- > 0) {
+ if (gsm_read_ea(&command, *data++) == 1) {
+ int clen = *data++;
+ len--;
+ /* FIXME: this is properly an EA */
+ clen >>= 1;
+ /* Malformed command ? */
+ if (clen > len)
+ return;
+ if (command & 1)
+ gsm_control_message(dlci->gsm, command,
+ data, clen);
+ else
+ gsm_control_response(dlci->gsm, command,
+ data, clen);
+ return;
+ }
+ }
+}
+
+/*
+ * Allocate/Free DLCI channels
+ */
+
+/**
+ * gsm_dlci_alloc - allocate a DLCI
+ * @gsm: GSM mux
+ * @addr: address of the DLCI
+ *
+ * Allocate and install a new DLCI object into the GSM mux.
+ *
+ * FIXME: review locking races
+ */
+
+static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
+{
+ struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC);
+ if (dlci == NULL)
+ return NULL;
+ spin_lock_init(&dlci->lock);
+ dlci->fifo = &dlci->_fifo;
+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
+ kfree(dlci);
+ return NULL;
+ }
+
+ skb_queue_head_init(&dlci->skb_list);
+ init_timer(&dlci->t1);
+ dlci->t1.function = gsm_dlci_t1;
+ dlci->t1.data = (unsigned long)dlci;
+ tty_port_init(&dlci->port);
+ dlci->port.ops = &gsm_port_ops;
+ dlci->gsm = gsm;
+ dlci->addr = addr;
+ dlci->adaption = gsm->adaption;
+ dlci->state = DLCI_CLOSED;
+ if (addr)
+ dlci->data = gsm_dlci_data;
+ else
+ dlci->data = gsm_dlci_command;
+ gsm->dlci[addr] = dlci;
+ return dlci;
+}
+
+/**
+ * gsm_dlci_free - release DLCI
+ * @dlci: DLCI to destroy
+ *
+ * Free up a DLCI. Currently to keep the lifetime rules sane we only
+ * clean up DLCI objects when the MUX closes rather than as the port
+ * is closed down on both the tty and mux levels.
+ *
+ * Can sleep.
+ */
+static void gsm_dlci_free(struct gsm_dlci *dlci)
+{
+ struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+ del_timer_sync(&dlci->t1);
+ dlci->gsm->dlci[dlci->addr] = NULL;
+ kfifo_free(dlci->fifo);
+ kfree(dlci);
+}
+
+
+/*
+ * LAPBish link layer logic
+ */
+
+/**
+ * gsm_queue - a GSM frame is ready to process
+ * @gsm: pointer to our gsm mux
+ *
+ * At this point in time a frame has arrived and been demangled from
+ * the line encoding. All the differences between the encodings have
+ * been handled below us and the frame is unpacked into the structures.
+ * The fcs holds the header FCS but any data FCS must be added here.
+ */
+
+static void gsm_queue(struct gsm_mux *gsm)
+{
+ struct gsm_dlci *dlci;
+ u8 cr;
+ int address;
+ /* We have to sneak a look at the packet body to do the FCS.
+ A somewhat layering violation in the spec */
+
+ if ((gsm->control & ~PF) == UI)
+ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
+ if (gsm->fcs != GOOD_FCS) {
+ gsm->bad_fcs++;
+ if (debug & 4)
+ printk("BAD FCS %02x\n", gsm->fcs);
+ return;
+ }
+ address = gsm->address >> 1;
+ if (address >= NUM_DLCI)
+ goto invalid;
+
+ cr = gsm->address & 1; /* C/R bit */
+
+ gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
+
+ cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */
+ dlci = gsm->dlci[address];
+
+ switch (gsm->control) {
+ case SABM|PF:
+ if (cr == 0)
+ goto invalid;
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, address);
+ if (dlci == NULL)
+ return;
+ if (dlci->dead)
+ gsm_response(gsm, address, DM);
+ else {
+ gsm_response(gsm, address, UA);
+ gsm_dlci_open(dlci);
+ }
+ break;
+ case DISC|PF:
+ if (cr == 0)
+ goto invalid;
+ if (dlci == NULL || dlci->state == DLCI_CLOSED) {
+ gsm_response(gsm, address, DM);
+ return;
+ }
+ /* Real close complete */
+ gsm_response(gsm, address, UA);
+ gsm_dlci_close(dlci);
+ break;
+ case UA:
+ case UA|PF:
+ if (cr == 0 || dlci == NULL)
+ break;
+ switch (dlci->state) {
+ case DLCI_CLOSING:
+ gsm_dlci_close(dlci);
+ break;
+ case DLCI_OPENING:
+ gsm_dlci_open(dlci);
+ break;
+ }
+ break;
+ case DM: /* DM can be valid unsolicited */
+ case DM|PF:
+ if (cr)
+ goto invalid;
+ if (dlci == NULL)
+ return;
+ gsm_dlci_close(dlci);
+ break;
+ case UI:
+ case UI|PF:
+ case UIH:
+ case UIH|PF:
+#if 0
+ if (cr)
+ goto invalid;
+#endif
+ if (dlci == NULL || dlci->state != DLCI_OPEN) {
+ gsm_command(gsm, address, DM|PF);
+ return;
+ }
+ dlci->data(dlci, gsm->buf, gsm->len);
+ break;
+ default:
+ goto invalid;
+ }
+ return;
+invalid:
+ gsm->malformed++;
+ return;
+}
+
+
+/**
+ * gsm0_receive - perform processing for non-transparency
+ * @gsm: gsm data for this ldisc instance
+ * @c: character
+ *
+ * Receive bytes in gsm mode 0
+ */
+
+static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+{
+ switch (gsm->state) {
+ case GSM_SEARCH: /* SOF marker */
+ if (c == GSM0_SOF) {
+ gsm->state = GSM_ADDRESS;
+ gsm->address = 0;
+ gsm->len = 0;
+ gsm->fcs = INIT_FCS;
+ }
+ break; /* Address EA */
+ case GSM_ADDRESS:
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->address, c))
+ gsm->state = GSM_CONTROL;
+ break;
+ case GSM_CONTROL: /* Control Byte */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ gsm->control = c;
+ gsm->state = GSM_LEN;
+ break;
+ case GSM_LEN: /* Length EA */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->len, c)) {
+ if (gsm->len > gsm->mru) {
+ gsm->bad_size++;
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+ gsm->count = 0;
+ gsm->state = GSM_DATA;
+ }
+ break;
+ case GSM_DATA: /* Data */
+ gsm->buf[gsm->count++] = c;
+ if (gsm->count == gsm->len)
+ gsm->state = GSM_FCS;
+ break;
+ case GSM_FCS: /* FCS follows the packet */
+ gsm->fcs = c;
+ gsm_queue(gsm);
+ /* And then back for the next frame */
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+}
+
+/**
+ * gsm0_receive - perform processing for non-transparency
+ * @gsm: gsm data for this ldisc instance
+ * @c: character
+ *
+ * Receive bytes in mode 1 (Advanced option)
+ */
+
+static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+{
+ if (c == GSM1_SOF) {
+ /* EOF is only valid in frame if we have got to the data state
+ and received at least one byte (the FCS) */
+ if (gsm->state == GSM_DATA && gsm->count) {
+ /* Extract the FCS */
+ gsm->count--;
+ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
+ gsm->len = gsm->count;
+ gsm_queue(gsm);
+ gsm->state = GSM_START;
+ return;
+ }
+ /* Any partial frame was a runt so go back to start */
+ if (gsm->state != GSM_START) {
+ gsm->malformed++;
+ gsm->state = GSM_START;
+ }
+ /* A SOF in GSM_START means we are still reading idling or
+ framing bytes */
+ return;
+ }
+
+ if (c == GSM1_ESCAPE) {
+ gsm->escape = 1;
+ return;
+ }
+
+ /* Only an unescaped SOF gets us out of GSM search */
+ if (gsm->state == GSM_SEARCH)
+ return;
+
+ if (gsm->escape) {
+ c ^= GSM1_ESCAPE_BITS;
+ gsm->escape = 0;
+ }
+ switch (gsm->state) {
+ case GSM_START: /* First byte after SOF */
+ gsm->address = 0;
+ gsm->state = GSM_ADDRESS;
+ gsm->fcs = INIT_FCS;
+ /* Drop through */
+ case GSM_ADDRESS: /* Address continuation */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->address, c))
+ gsm->state = GSM_CONTROL;
+ break;
+ case GSM_CONTROL: /* Control Byte */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ gsm->control = c;
+ gsm->count = 0;
+ gsm->state = GSM_DATA;
+ break;
+ case GSM_DATA: /* Data */
+ if (gsm->count > gsm->mru ) { /* Allow one for the FCS */
+ gsm->state = GSM_OVERRUN;
+ gsm->bad_size++;
+ } else
+ gsm->buf[gsm->count++] = c;
+ break;
+ case GSM_OVERRUN: /* Over-long - eg a dropped SOF */
+ break;
+ }
+}
+
+/**
+ * gsm_error - handle tty error
+ * @gsm: ldisc data
+ * @data: byte received (may be invalid)
+ * @flag: error received
+ *
+ * Handle an error in the receipt of data for a frame. Currently we just
+ * go back to hunting for a SOF.
+ *
+ * FIXME: better diagnostics ?
+ */
+
+static void gsm_error(struct gsm_mux *gsm,
+ unsigned char data, unsigned char flag)
+{
+ gsm->state = GSM_SEARCH;
+ gsm->io_error++;
+}
+
+/**
+ * gsm_cleanup_mux - generic GSM protocol cleanup
+ * @gsm: our mux
+ *
+ * Clean up the bits of the mux which are the same for all framing
+ * protocols. Remove the mux from the mux table, stop all the timers
+ * and then shut down each device hanging up the channels as we go.
+ */
+
+void gsm_cleanup_mux(struct gsm_mux *gsm)
+{
+ int i;
+ struct gsm_dlci *dlci = gsm->dlci[0];
+ struct gsm_msg *txq;
+
+ gsm->dead = 1;
+
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm_mux[i] == gsm) {
+ gsm_mux[i] = NULL;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ WARN_ON(i == MAX_MUX);
+
+ del_timer_sync(&gsm->t2_timer);
+ /* Now we are sure T2 has stopped */
+ if (dlci) {
+ dlci->dead = 1;
+ gsm_dlci_begin_close(dlci);
+ wait_event_interruptible(gsm->event,
+ dlci->state == DLCI_CLOSED);
+ }
+ /* Free up any link layer users */
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ gsm_dlci_free(gsm->dlci[i]);
+ /* Now wipe the queues */
+ for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) {
+ gsm->tx_head = txq->next;
+ kfree(txq);
+ }
+ gsm->tx_tail = NULL;
+}
+EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
+
+/**
+ * gsm_activate_mux - generic GSM setup
+ * @gsm: our mux
+ *
+ * Set up the bits of the mux which are the same for all framing
+ * protocols. Add the mux to the mux table so it can be opened and
+ * finally kick off connecting to DLCI 0 on the modem.
+ */
+
+int gsm_activate_mux(struct gsm_mux *gsm)
+{
+ struct gsm_dlci *dlci;
+ int i = 0;
+
+ init_timer(&gsm->t2_timer);
+ gsm->t2_timer.function = gsm_control_retransmit;
+ gsm->t2_timer.data = (unsigned long)gsm;
+ init_waitqueue_head(&gsm->event);
+ spin_lock_init(&gsm->control_lock);
+ spin_lock_init(&gsm->tx_lock);
+
+ if (gsm->encoding == 0)
+ gsm->receive = gsm0_receive;
+ else
+ gsm->receive = gsm1_receive;
+ gsm->error = gsm_error;
+
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm_mux[i] == NULL) {
+ gsm_mux[i] = gsm;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ if (i == MAX_MUX)
+ return -EBUSY;
+
+ dlci = gsm_dlci_alloc(gsm, 0);
+ if (dlci == NULL)
+ return -ENOMEM;
+ gsm->dead = 0; /* Tty opens are now permissible */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gsm_activate_mux);
+
+/**
+ * gsm_free_mux - free up a mux
+ * @mux: mux to free
+ *
+ * Dispose of allocated resources for a dead mux. No refcounting
+ * at present so the mux must be truely dead.
+ */
+void gsm_free_mux(struct gsm_mux *gsm)
+{
+ kfree(gsm->txframe);
+ kfree(gsm->buf);
+ kfree(gsm);
+}
+EXPORT_SYMBOL_GPL(gsm_free_mux);
+
+/**
+ * gsm_alloc_mux - allocate a mux
+ *
+ * Creates a new mux ready for activation.
+ */
+
+struct gsm_mux *gsm_alloc_mux(void)
+{
+ struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
+ if (gsm == NULL)
+ return NULL;
+ gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL);
+ if (gsm->buf == NULL) {
+ kfree(gsm);
+ return NULL;
+ }
+ gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL);
+ if (gsm->txframe == NULL) {
+ kfree(gsm->buf);
+ kfree(gsm);
+ return NULL;
+ }
+ spin_lock_init(&gsm->lock);
+
+ gsm->t1 = T1;
+ gsm->t2 = T2;
+ gsm->n2 = N2;
+ gsm->ftype = UIH;
+ gsm->initiator = 0;
+ gsm->adaption = 1;
+ gsm->encoding = 1;
+ gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
+ gsm->mtu = 64;
+ gsm->dead = 1; /* Avoid early tty opens */
+
+ return gsm;
+}
+EXPORT_SYMBOL_GPL(gsm_alloc_mux);
+
+
+
+
+/**
+ * gsmld_output - write to link
+ * @gsm: our mux
+ * @data: bytes to output
+ * @len: size
+ *
+ * Write a block of data from the GSM mux to the data channel. This
+ * will eventually be serialized from above but at the moment isn't.
+ */
+
+static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
+{
+ if (tty_write_room(gsm->tty) < len) {
+ set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
+ return -ENOSPC;
+ }
+ if (debug & 4) {
+ printk("-->%d bytes out\n", len);
+ hex_packet(data, len);
+ }
+ gsm->tty->ops->write(gsm->tty, data, len);
+ return len;
+}
+
+/**
+ * gsmld_attach_gsm - mode set up
+ * @tty: our tty structure
+ * @gsm: our mux
+ *
+ * Set up the MUX for basic mode and commence connecting to the
+ * modem. Currently called from the line discipline set up but
+ * will need moving to an ioctl path.
+ */
+
+static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+{
+ int ret;
+
+ gsm->tty = tty_kref_get(tty);
+ gsm->output = gsmld_output;
+ ret = gsm_activate_mux(gsm);
+ if (ret != 0)
+ tty_kref_put(gsm->tty);
+ return ret;
+}
+
+
+/**
+ * gsmld_detach_gsm - stop doing 0710 mux
+ * @tty: tty atttached to the mux
+ * @gsm: mux
+ *
+ * Shutdown and then clean up the resources used by the line discipline
+ */
+
+static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+{
+ WARN_ON(tty != gsm->tty);
+ gsm_cleanup_mux(gsm);
+ tty_kref_put(gsm->tty);
+ gsm->tty = NULL;
+}
+
+static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+ const unsigned char *dp;
+ char *f;
+ int i;
+ char buf[64];
+ char flags;
+
+ if (debug & 4) {
+ printk("Inbytes %dd\n", count);
+ hex_packet(cp, count);
+ }
+
+ for (i = count, dp = cp, f = fp; i; i--, dp++) {
+ flags = *f++;
+ switch (flags) {
+ case TTY_NORMAL:
+ gsm->receive(gsm, *dp);
+ break;
+ case TTY_OVERRUN:
+ case TTY_BREAK:
+ case TTY_PARITY:
+ case TTY_FRAME:
+ gsm->error(gsm, *dp, flags);
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown flag %d\n",
+ tty_name(tty, buf), flags);
+ break;
+ }
+ }
+ /* FASYNC if needed ? */
+ /* If clogged call tty_throttle(tty); */
+}
+
+/**
+ * gsmld_chars_in_buffer - report available bytes
+ * @tty: tty device
+ *
+ * Report the number of characters buffered to be delivered to user
+ * at this instant in time.
+ *
+ * Locking: gsm lock
+ */
+
+static ssize_t gsmld_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0;
+}
+
+/**
+ * gsmld_flush_buffer - clean input queue
+ * @tty: terminal device
+ *
+ * Flush the input buffer. Called when the line discipline is
+ * being closed, when the tty layer wants the buffer flushed (eg
+ * at hangup).
+ */
+
+static void gsmld_flush_buffer(struct tty_struct *tty)
+{
+}
+
+/**
+ * gsmld_close - close the ldisc for this tty
+ * @tty: device
+ *
+ * Called from the terminal layer when this line discipline is
+ * being shut down, either because of a close or becsuse of a
+ * discipline change. The function will not be called while other
+ * ldisc methods are in progress.
+ */
+
+static void gsmld_close(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+
+ gsmld_detach_gsm(tty, gsm);
+
+ gsmld_flush_buffer(tty);
+ /* Do other clean up here */
+ gsm_free_mux(gsm);
+}
+
+/**
+ * gsmld_open - open an ldisc
+ * @tty: terminal to open
+ *
+ * Called when this line discipline is being attached to the
+ * terminal device. Can sleep. Called serialized so that no
+ * other events will occur in parallel. No further open will occur
+ * until a close.
+ */
+
+static int gsmld_open(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm;
+
+ if (tty->ops->write == NULL)
+ return -EINVAL;
+
+ /* Attach our ldisc data */
+ gsm = gsm_alloc_mux();
+ if (gsm == NULL)
+ return -ENOMEM;
+
+ tty->disc_data = gsm;
+ tty->receive_room = 65536;
+
+ /* Attach the initial passive connection */
+ gsm->encoding = 1;
+ return gsmld_attach_gsm(tty, gsm);
+}
+
+/**
+ * gsmld_write_wakeup - asynchronous I/O notifier
+ * @tty: tty device
+ *
+ * Required for the ptys, serial driver etc. since processes
+ * that attach themselves to the master and rely on ASYNC
+ * IO must be woken up
+ */
+
+static void gsmld_write_wakeup(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+
+ /* Queue poll */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ gsm_data_kick(gsm);
+ if (gsm->tx_bytes < TX_THRESH_LO)
+ gsm_dlci_data_sweep(gsm);
+}
+
+/**
+ * gsmld_read - read function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Perform reads for the line discipline. We are guaranteed that the
+ * line discipline will not be closed under us but we may get multiple
+ * parallel readers and must handle this ourselves. We may also get
+ * a hangup. Always called in user context, may sleep.
+ *
+ * This code must be sure never to sleep through a hangup.
+ */
+
+static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * gsmld_write - write function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Called when the owner of the device wants to send a frame
+ * itself (or some other control data). The data is transferred
+ * as-is and must be properly framed and checksummed as appropriate
+ * by userspace. Frames are either sent whole or not at all as this
+ * avoids pain user side.
+ */
+
+static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr)
+{
+ int space = tty_write_room(tty);
+ if (space >= nr)
+ return tty->ops->write(tty, buf, nr);
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ return -ENOBUFS;
+}
+
+/**
+ * gsmld_poll - poll method for N_GSM0710
+ * @tty: terminal device
+ * @file: file accessing it
+ * @wait: poll table
+ *
+ * Called when the line discipline is asked to poll() for data or
+ * for special events. This code is not serialized with respect to
+ * other events save open/close.
+ *
+ * This code must be sure never to sleep through a hangup.
+ * Called without the kernel lock held - fine
+ */
+
+static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct gsm_mux *gsm = tty->disc_data;
+
+ poll_wait(file, &tty->read_wait, wait);
+ poll_wait(file, &tty->write_wait, wait);
+ if (tty_hung_up_p(file))
+ mask |= POLLHUP;
+ if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
+ mask |= POLLOUT | POLLWRNORM;
+ if (gsm->dead)
+ mask |= POLLHUP;
+ return mask;
+}
+
+static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
+ struct gsm_config *c)
+{
+ int need_close = 0;
+ int need_restart = 0;
+
+ /* Stuff we don't support yet - UI or I frame transport, windowing */
+ if ((c->adaption !=1 && c->adaption != 2) || c->k)
+ return -EOPNOTSUPP;
+ /* Check the MRU/MTU range looks sane */
+ if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
+ return -EINVAL;
+ if (c->n2 < 3)
+ return -EINVAL;
+ if (c->encapsulation > 1) /* Basic, advanced, no I */
+ return -EINVAL;
+ if (c->initiator > 1)
+ return -EINVAL;
+ if (c->i == 0 || c->i > 2) /* UIH and UI only */
+ return -EINVAL;
+ /*
+ * See what is needed for reconfiguration
+ */
+
+ /* Timing fields */
+ if (c->t1 != 0 && c->t1 != gsm->t1)
+ need_restart = 1;
+ if (c->t2 != 0 && c->t2 != gsm->t2)
+ need_restart = 1;
+ if (c->encapsulation != gsm->encoding)
+ need_restart = 1;
+ if (c->adaption != gsm->adaption)
+ need_restart = 1;
+ /* Requires care */
+ if (c->initiator != gsm->initiator)
+ need_close = 1;
+ if (c->mru != gsm->mru)
+ need_restart = 1;
+ if (c->mtu != gsm->mtu)
+ need_restart = 1;
+
+ /*
+ * Close down what is needed, restart and initiate the new
+ * configuration
+ */
+
+ if (need_close || need_restart) {
+ gsm_dlci_begin_close(gsm->dlci[0]);
+ /* This will timeout if the link is down due to N2 expiring */
+ wait_event_interruptible(gsm->event,
+ gsm->dlci[0]->state == DLCI_CLOSED);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ if (need_restart)
+ gsm_cleanup_mux(gsm);
+
+ gsm->initiator = c->initiator;
+ gsm->mru = c->mru;
+ gsm->encoding = c->encapsulation;
+ gsm->adaption = c->adaption;
+
+ if (c->i == 1)
+ gsm->ftype = UIH;
+ else if (c->i == 2)
+ gsm->ftype = UI;
+
+ if (c->t1)
+ gsm->t1 = c->t1;
+ if (c->t2)
+ gsm->t2 = c->t2;
+
+ /* FIXME: We need to separate activation/deactivation from adding
+ and removing from the mux array */
+ if (need_restart)
+ gsm_activate_mux(gsm);
+ if (gsm->initiator && need_close)
+ gsm_dlci_begin_open(gsm->dlci[0]);
+ return 0;
+}
+
+static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct gsm_config c;
+ struct gsm_mux *gsm = tty->disc_data;
+
+ switch (cmd) {
+ case GSMIOC_GETCONF:
+ memset(&c, 0, sizeof(c));
+ c.adaption = gsm->adaption;
+ c.encapsulation = gsm->encoding;
+ c.initiator = gsm->initiator;
+ c.t1 = gsm->t1;
+ c.t2 = gsm->t2;
+ c.t3 = 0; /* Not supported */
+ c.n2 = gsm->n2;
+ if (gsm->ftype == UIH)
+ c.i = 1;
+ else
+ c.i = 2;
+ printk("Ftype %d i %d\n", gsm->ftype, c.i);
+ c.mru = gsm->mru;
+ c.mtu = gsm->mtu;
+ c.k = 0;
+ if (copy_to_user((void *)arg, &c, sizeof(c)))
+ return -EFAULT;
+ return 0;
+ case GSMIOC_SETCONF:
+ if (copy_from_user(&c, (void *)arg, sizeof(c)))
+ return -EFAULT;
+ return gsmld_config(tty, gsm, &c);
+ default:
+ return n_tty_ioctl_helper(tty, file, cmd, arg);
+ }
+}
+
+
+/* Line discipline for real tty */
+struct tty_ldisc_ops tty_ldisc_packet = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_gsm",
+ .open = gsmld_open,
+ .close = gsmld_close,
+ .flush_buffer = gsmld_flush_buffer,
+ .chars_in_buffer = gsmld_chars_in_buffer,
+ .read = gsmld_read,
+ .write = gsmld_write,
+ .ioctl = gsmld_ioctl,
+ .poll = gsmld_poll,
+ .receive_buf = gsmld_receive_buf,
+ .write_wakeup = gsmld_write_wakeup
+};
+
+/*
+ * Virtual tty side
+ */
+
+#define TX_SIZE 512
+
+static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
+{
+ u8 modembits[5];
+ struct gsm_control *ctrl;
+ int len = 2;
+
+ if (brk)
+ len++;
+
+ modembits[0] = len << 1 | EA; /* Data bytes */
+ modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */
+ modembits[2] = gsm_encode_modem(dlci) << 1 | EA;
+ if (brk)
+ modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */
+ ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1);
+ if (ctrl == NULL)
+ return -ENOMEM;
+ return gsm_control_wait(dlci->gsm, ctrl);
+}
+
+static int gsm_carrier_raised(struct tty_port *port)
+{
+ struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ /* Not yet open so no carrier info */
+ if (dlci->state != DLCI_OPEN)
+ return 0;
+ if (debug & 2)
+ return 1;
+ return dlci->modem_rx & TIOCM_CD;
+}
+
+static void gsm_dtr_rts(struct tty_port *port, int onoff)
+{
+ struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ unsigned int modem_tx = dlci->modem_tx;
+ if (onoff)
+ modem_tx |= TIOCM_DTR | TIOCM_RTS;
+ else
+ modem_tx &= ~(TIOCM_DTR | TIOCM_RTS);
+ if (modem_tx != dlci->modem_tx) {
+ dlci->modem_tx = modem_tx;
+ gsmtty_modem_update(dlci, 0);
+ }
+}
+
+static const struct tty_port_operations gsm_port_ops = {
+ .carrier_raised = gsm_carrier_raised,
+ .dtr_rts = gsm_dtr_rts,
+};
+
+
+static int gsmtty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_mux *gsm;
+ struct gsm_dlci *dlci;
+ struct tty_port *port;
+ unsigned int line = tty->index;
+ unsigned int mux = line >> 6;
+
+ line = line & 0x3F;
+
+ if (mux >= MAX_MUX)
+ return -ENXIO;
+ /* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */
+ if (gsm_mux[mux] == NULL)
+ return -EUNATCH;
+ if (line == 0 || line > 61) /* 62/63 reserved */
+ return -ECHRNG;
+ gsm = gsm_mux[mux];
+ if (gsm->dead)
+ return -EL2HLT;
+ dlci = gsm->dlci[line];
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, line);
+ if (dlci == NULL)
+ return -ENOMEM;
+ port = &dlci->port;
+ port->count++;
+ tty->driver_data = dlci;
+ tty_port_tty_set(port, tty);
+
+ dlci->modem_rx = 0;
+ /* We could in theory open and close before we wait - eg if we get
+ a DM straight back. This is ok as that will have caused a hangup */
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ /* Start sending off SABM messages */
+ gsm_dlci_begin_open(dlci);
+ /* And wait for virtual carrier */
+ return tty_port_block_til_ready(port, tty, filp);
+}
+
+static void gsmtty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci == NULL)
+ return;
+ if (tty_port_close_start(&dlci->port, tty, filp) == 0)
+ return;
+ gsm_dlci_begin_close(dlci);
+ tty_port_close_end(&dlci->port, tty);
+ tty_port_tty_set(&dlci->port, NULL);
+}
+
+static void gsmtty_hangup(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ tty_port_hangup(&dlci->port);
+ gsm_dlci_begin_close(dlci);
+}
+
+static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
+ int len)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ /* Stuff the bytes into the fifo queue */
+ int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+ /* Need to kick the channel */
+ gsm_dlci_data_kick(dlci);
+ return sent;
+}
+
+static int gsmtty_write_room(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return TX_SIZE - kfifo_len(dlci->fifo);
+}
+
+static int gsmtty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return kfifo_len(dlci->fifo);
+}
+
+static void gsmtty_flush_buffer(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ /* Caution needed: If we implement reliable transport classes
+ then the data being transmitted can't simply be junked once
+ it has first hit the stack. Until then we can just blow it
+ away */
+ kfifo_reset(dlci->fifo);
+ /* Need to unhook this DLCI from the transmit queue logic */
+}
+
+static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ /* The FIFO handles the queue so the kernel will do the right
+ thing waiting on chars_in_buffer before calling us. No work
+ to do here */
+}
+
+static int gsmtty_tiocmget(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return dlci->modem_rx;
+}
+
+static int gsmtty_tiocmset(struct tty_struct *tty, struct file *filp,
+ unsigned int set, unsigned int clear)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ unsigned int modem_tx = dlci->modem_tx;
+
+ modem_tx &= clear;
+ modem_tx |= set;
+
+ if (modem_tx != dlci->modem_tx) {
+ dlci->modem_tx = modem_tx;
+ return gsmtty_modem_update(dlci, 0);
+ }
+ return 0;
+}
+
+
+static int gsmtty_ioctl(struct tty_struct *tty, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+ /* For the moment its fixed. In actual fact the speed information
+ for the virtual channel can be propogated in both directions by
+ the RPN control message. This however rapidly gets nasty as we
+ then have to remap modem signals each way according to whether
+ our virtual cable is null modem etc .. */
+ tty_termios_copy_hw(tty->termios, old);
+}
+
+static void gsmtty_throttle(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (tty->termios->c_cflag & CRTSCTS)
+ dlci->modem_tx &= ~TIOCM_DTR;
+ dlci->throttled = 1;
+ /* Send an MSC with DTR cleared */
+ gsmtty_modem_update(dlci, 0);
+}
+
+static void gsmtty_unthrottle(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (tty->termios->c_cflag & CRTSCTS)
+ dlci->modem_tx |= TIOCM_DTR;
+ dlci->throttled = 0;
+ /* Send an MSC with DTR set */
+ gsmtty_modem_update(dlci, 0);
+}
+
+static int gsmtty_break_ctl(struct tty_struct *tty, int state)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ int encode = 0; /* Off */
+
+ if (state == -1) /* "On indefinitely" - we can't encode this
+ properly */
+ encode = 0x0F;
+ else if (state > 0) {
+ encode = state / 200; /* mS to encoding */
+ if (encode > 0x0F)
+ encode = 0x0F; /* Best effort */
+ }
+ return gsmtty_modem_update(dlci, encode);
+}
+
+static struct tty_driver *gsm_tty_driver;
+
+/* Virtual ttys for the demux */
+static const struct tty_operations gsmtty_ops = {
+ .open = gsmtty_open,
+ .close = gsmtty_close,
+ .write = gsmtty_write,
+ .write_room = gsmtty_write_room,
+ .chars_in_buffer = gsmtty_chars_in_buffer,
+ .flush_buffer = gsmtty_flush_buffer,
+ .ioctl = gsmtty_ioctl,
+ .throttle = gsmtty_throttle,
+ .unthrottle = gsmtty_unthrottle,
+ .set_termios = gsmtty_set_termios,
+ .hangup = gsmtty_hangup,
+ .wait_until_sent = gsmtty_wait_until_sent,
+ .tiocmget = gsmtty_tiocmget,
+ .tiocmset = gsmtty_tiocmset,
+ .break_ctl = gsmtty_break_ctl,
+};
+
+
+
+static int __init gsm_init(void)
+{
+ /* Fill in our line protocol discipline, and register it */
+ int status = tty_register_ldisc(N_GSM0710, &tty_ldisc_packet);
+ if (status != 0) {
+ printk(KERN_ERR "n_gsm: can't register line discipline (err = %d)\n", status);
+ return status;
+ }
+
+ gsm_tty_driver = alloc_tty_driver(256);
+ if (!gsm_tty_driver) {
+ tty_unregister_ldisc(N_GSM0710);
+ printk(KERN_ERR "gsm_init: tty allocation failed.\n");
+ return -EINVAL;
+ }
+ gsm_tty_driver->owner = THIS_MODULE;
+ gsm_tty_driver->driver_name = "gsmtty";
+ gsm_tty_driver->name = "gsmtty";
+ gsm_tty_driver->major = 0; /* Dynamic */
+ gsm_tty_driver->minor_start = 0;
+ gsm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ gsm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ gsm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+ | TTY_DRIVER_HARDWARE_BREAK;
+ gsm_tty_driver->init_termios = tty_std_termios;
+ /* Fixme */
+ gsm_tty_driver->init_termios.c_lflag &= ~ECHO;
+ tty_set_operations(gsm_tty_driver, &gsmtty_ops);
+
+ spin_lock_init(&gsm_mux_lock);
+
+ if (tty_register_driver(gsm_tty_driver)) {
+ put_tty_driver(gsm_tty_driver);
+ tty_unregister_ldisc(N_GSM0710);
+ printk(KERN_ERR "gsm_init: tty registration failed.\n");
+ return -EBUSY;
+ }
+ printk(KERN_INFO "gsm_init: loaded as %d,%d.\n", gsm_tty_driver->major, gsm_tty_driver->minor_start);
+ return 0;
+}
+
+static void __exit gsm_exit(void)
+{
+ int status = tty_unregister_ldisc(N_GSM0710);
+ if (status != 0)
+ printk(KERN_ERR "n_gsm: can't unregister line discipline (err = %d)\n", status);
+ tty_unregister_driver(gsm_tty_driver);
+ put_tty_driver(gsm_tty_driver);
+ printk(KERN_INFO "gsm_init: unloaded.\n");
+}
+
+module_init(gsm_init);
+module_exit(gsm_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_GSM0710);
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 78a62eb..ecbe479 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -176,23 +176,6 @@ static void config_setup(struct cyclades_port *);
static void show_status(int);
#endif
-#ifdef CONFIG_REMOTE_DEBUG
-static void debug_setup(void);
-void queueDebugChar(int c);
-int getDebugChar(void);
-
-#define DEBUG_PORT 1
-#define DEBUG_LEN 256
-
-typedef struct {
- int in;
- int out;
- unsigned char buf[DEBUG_LEN];
-} debugq;
-
-debugq debugiq;
-#endif
-
/*
* I have my own version of udelay(), as it is needed when initialising
* the chip, before the delay loop has been calibrated. Should probably
@@ -515,11 +498,6 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
/* determine the channel and change to that context */
channel = (u_short) (base_addr[CyLICR] >> 2);
-#ifdef CONFIG_REMOTE_DEBUG
- if (channel == DEBUG_PORT) {
- panic("TxInt on debug port!!!");
- }
-#endif
/* validate the port number (as configured and open) */
if ((channel < 0) || (NR_PORTS <= channel)) {
base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
@@ -634,14 +612,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
info->last_active = jiffies;
save_cnt = char_count = base_addr[CyRFOC];
-#ifdef CONFIG_REMOTE_DEBUG
- if (channel == DEBUG_PORT) {
- while (char_count--) {
- data = base_addr[CyRDR];
- queueDebugChar(data);
- }
- } else
-#endif
/* if there is nowhere to put the data, discard it */
if (info->tty == 0) {
while (char_count--) {
@@ -2195,9 +2165,7 @@ static int __init serial167_init(void)
port_num++;
info++;
}
-#ifdef CONFIG_REMOTE_DEBUG
- debug_setup();
-#endif
+
ret = request_irq(MVME167_IRQ_SER_ERR, cd2401_rxerr_interrupt, 0,
"cd2401_errors", cd2401_rxerr_interrupt);
if (ret) {
@@ -2518,193 +2486,4 @@ static int __init serial167_console_init(void)
console_initcall(serial167_console_init);
-#ifdef CONFIG_REMOTE_DEBUG
-void putDebugChar(int c)
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- unsigned long flags;
- volatile u_char sink;
- u_char ier;
- int port;
-
- local_irq_save(flags);
-
- /* Ensure transmitter is enabled! */
-
- port = DEBUG_PORT;
- base_addr[CyCAR] = (u_char) port;
- while (base_addr[CyCCR])
- ;
- base_addr[CyCCR] = CyENB_XMTR;
-
- ier = base_addr[CyIER];
- base_addr[CyIER] = CyTxMpty;
-
- while (1) {
- if (pcc2chip[PccSCCTICR] & 0x20) {
- /* We have a Tx int. Acknowledge it */
- sink = pcc2chip[PccTPIACKR];
- if ((base_addr[CyLICR] >> 2) == port) {
- base_addr[CyTDR] = c;
- base_addr[CyTEOIR] = 0;
- break;
- } else
- base_addr[CyTEOIR] = CyNOTRANS;
- }
- }
-
- base_addr[CyIER] = ier;
-
- local_irq_restore(flags);
-}
-
-int getDebugChar()
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- unsigned long flags;
- volatile u_char sink;
- u_char ier;
- int port;
- int i, c;
-
- i = debugiq.out;
- if (i != debugiq.in) {
- c = debugiq.buf[i];
- if (++i == DEBUG_LEN)
- i = 0;
- debugiq.out = i;
- return c;
- }
- /* OK, nothing in queue, wait in poll loop */
-
- local_irq_save(flags);
-
- /* Ensure receiver is enabled! */
-
- port = DEBUG_PORT;
- base_addr[CyCAR] = (u_char) port;
-#if 0
- while (base_addr[CyCCR])
- ;
- base_addr[CyCCR] = CyENB_RCVR;
-#endif
- ier = base_addr[CyIER];
- base_addr[CyIER] = CyRxData;
-
- while (1) {
- if (pcc2chip[PccSCCRICR] & 0x20) {
- /* We have a Rx int. Acknowledge it */
- sink = pcc2chip[PccRPIACKR];
- if ((base_addr[CyLICR] >> 2) == port) {
- int cnt = base_addr[CyRFOC];
- while (cnt-- > 0) {
- c = base_addr[CyRDR];
- if (c == 0)
- printk
- ("!! debug char is null (cnt=%d) !!",
- cnt);
- else
- queueDebugChar(c);
- }
- base_addr[CyREOIR] = 0;
- i = debugiq.out;
- if (i == debugiq.in)
- panic("Debug input queue empty!");
- c = debugiq.buf[i];
- if (++i == DEBUG_LEN)
- i = 0;
- debugiq.out = i;
- break;
- } else
- base_addr[CyREOIR] = CyNOTRANS;
- }
- }
-
- base_addr[CyIER] = ier;
-
- local_irq_restore(flags);
-
- return (c);
-}
-
-void queueDebugChar(int c)
-{
- int i;
-
- i = debugiq.in;
- debugiq.buf[i] = c;
- if (++i == DEBUG_LEN)
- i = 0;
- if (i != debugiq.out)
- debugiq.in = i;
-}
-
-static void debug_setup()
-{
- unsigned long flags;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int i, cflag;
-
- cflag = B19200;
-
- local_irq_save(flags);
-
- for (i = 0; i < 4; i++) {
- base_addr[CyCAR] = i;
- base_addr[CyLICR] = i << 2;
- }
-
- debugiq.in = debugiq.out = 0;
-
- base_addr[CyCAR] = DEBUG_PORT;
-
- /* baud rate */
- i = cflag & CBAUD;
-
- base_addr[CyIER] = 0;
-
- base_addr[CyCMR] = CyASYNC;
- base_addr[CyLICR] = DEBUG_PORT << 2;
- base_addr[CyLIVR] = 0x5c;
-
- /* tx and rx baud rate */
-
- base_addr[CyTCOR] = baud_co[i];
- base_addr[CyTBPR] = baud_bpr[i];
- base_addr[CyRCOR] = baud_co[i] >> 5;
- base_addr[CyRBPR] = baud_bpr[i];
-
- /* set line characteristics according configuration */
-
- base_addr[CySCHR1] = 0;
- base_addr[CySCHR2] = 0;
- base_addr[CySCRL] = 0;
- base_addr[CySCRH] = 0;
- base_addr[CyCOR1] = Cy_8_BITS | CyPARITY_NONE;
- base_addr[CyCOR2] = 0;
- base_addr[CyCOR3] = Cy_1_STOP;
- base_addr[CyCOR4] = baud_cor4[i];
- base_addr[CyCOR5] = 0;
- base_addr[CyCOR6] = 0;
- base_addr[CyCOR7] = 0;
-
- write_cy_cmd(base_addr, CyINIT_CHAN);
- write_cy_cmd(base_addr, CyENB_RCVR);
-
- base_addr[CyCAR] = DEBUG_PORT; /* !!! Is this needed? */
-
- base_addr[CyRTPRL] = 2;
- base_addr[CyRTPRH] = 0;
-
- base_addr[CyMSVR1] = CyRTS;
- base_addr[CyMSVR2] = CyDTR;
-
- base_addr[CyIER] = CyRxData;
-
- local_irq_restore(flags);
-
-} /* debug_setup */
-
-#endif
-
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 7ee5216..cc1e985 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -238,7 +238,7 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room);
* @size: size
*
* Queue a series of bytes to the tty buffering. All the characters
- * passed are marked as without error. Returns the number added.
+ * passed are marked with the supplied flag. Returns the number added.
*
* Locking: Called functions may take tty->buf.lock
*/
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index fb09bb3..aa9bc9e 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -149,7 +149,7 @@ static ssize_t smi_data_buf_size_store(struct device *dev,
return count;
}
-static ssize_t smi_data_read(struct kobject *kobj,
+static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
@@ -162,7 +162,7 @@ static ssize_t smi_data_read(struct kobject *kobj,
return ret;
}
-static ssize_t smi_data_write(struct kobject *kobj,
+static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 3a44602..2f452f1 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -522,7 +522,7 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
rbu_data.image_update_buffer, rbu_data.bios_image_size);
}
-static ssize_t read_rbu_data(struct kobject *kobj,
+static ssize_t read_rbu_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -576,7 +576,7 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
release_firmware(fw);
}
-static ssize_t read_rbu_image_type(struct kobject *kobj,
+static ssize_t read_rbu_image_type(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -586,7 +586,7 @@ static ssize_t read_rbu_image_type(struct kobject *kobj,
return size;
}
-static ssize_t write_rbu_image_type(struct kobject *kobj,
+static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -647,7 +647,7 @@ static ssize_t write_rbu_image_type(struct kobject *kobj,
return rc;
}
-static ssize_t read_rbu_packet_size(struct kobject *kobj,
+static ssize_t read_rbu_packet_size(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -660,7 +660,7 @@ static ssize_t read_rbu_packet_size(struct kobject *kobj,
return size;
}
-static ssize_t write_rbu_packet_size(struct kobject *kobj,
+static ssize_t write_rbu_packet_size(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 81b70bd..2a62ec6 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -402,7 +402,7 @@ efivar_unregister(struct efivar_entry *var)
}
-static ssize_t efivar_create(struct kobject *kobj,
+static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
@@ -461,7 +461,7 @@ static ssize_t efivar_create(struct kobject *kobj,
return count;
}
-static ssize_t efivar_delete(struct kobject *kobj,
+static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index eb0c3fe..cae1b8c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -399,7 +399,7 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
goto free_id;
}
- pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, "value");
+ pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value");
if (!pdesc->value_sd) {
ret = -ENODEV;
goto free_id;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 25bbd30..387166d 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -193,8 +193,9 @@ static ssize_t enabled_show(struct device *device,
"disabled");
}
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
{
struct device *connector_dev = container_of(kobj, struct device, kobj);
struct drm_connector *connector = to_drm_connector(connector_dev);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 71d4c07..76ba59b 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -86,6 +86,12 @@ config HID_BELKIN
---help---
Support for Belkin Flip KVM and Wireless keyboard.
+config HID_CANDO
+ tristate "Cando dual touch panel"
+ depends on USB_HID
+ ---help---
+ Support for Cando dual touch panel.
+
config HID_CHERRY
tristate "Cherry" if EMBEDDED
depends on USB_HID
@@ -100,6 +106,21 @@ config HID_CHICONY
---help---
Support for Chicony Tactical pad.
+config HID_PRODIKEYS
+ tristate "Prodikeys PC-MIDI Keyboard support"
+ depends on USB_HID && SND
+ select SND_RAWMIDI
+ ---help---
+ Support for Prodikeys PC-MIDI Keyboard device support.
+ Say Y here to enable support for this device.
+ - Prodikeys PC-MIDI keyboard.
+ The Prodikeys PC-MIDI acts as a USB Audio device, with one MIDI
+ input and one MIDI output. These MIDI jacks appear as
+ a sound "card" in the ALSA sound system.
+ Note: if you say N here, this device will still function as a basic
+ multimedia keyboard, but will lack support for the musical keyboard
+ and some additional multimedia keys.
+
config HID_CYPRESS
tristate "Cypress" if EMBEDDED
depends on USB_HID
@@ -108,9 +129,8 @@ config HID_CYPRESS
Support for cypress mouse and barcode readers.
config HID_DRAGONRISE
- tristate "DragonRise Inc. support" if EMBEDDED
+ tristate "DragonRise Inc. support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have DragonRise Inc.game controllers.
@@ -122,6 +142,12 @@ config DRAGONRISE_FF
Say Y here if you want to enable force feedback support for DragonRise Inc.
game controllers.
+config HID_EGALAX
+ tristate "eGalax multi-touch panel"
+ depends on USB_HID
+ ---help---
+ Support for the eGalax dual-touch panel.
+
config HID_EZKEY
tristate "Ezkey" if EMBEDDED
depends on USB_HID
@@ -137,16 +163,14 @@ config HID_KYE
Support for Kye/Genius Ergo Mouse.
config HID_GYRATION
- tristate "Gyration" if EMBEDDED
+ tristate "Gyration"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Gyration remote control.
config HID_TWINHAN
- tristate "Twinhan" if EMBEDDED
+ tristate "Twinhan"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Twinhan IR remote control.
@@ -233,16 +257,14 @@ config HID_NTRIG
Support for N-Trig touch screen.
config HID_ORTEK
- tristate "Ortek" if EMBEDDED
+ tristate "Ortek"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Ortek WKB-2000 wireless keyboard + mouse trackpad.
config HID_PANTHERLORD
- tristate "Pantherlord support" if EMBEDDED
+ tristate "Pantherlord support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a PantherLord/GreenAsia based game controller
or adapter.
@@ -256,29 +278,90 @@ config PANTHERLORD_FF
or adapter and want to enable force feedback support for it.
config HID_PETALYNX
- tristate "Petalynx" if EMBEDDED
+ tristate "Petalynx"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Petalynx Maxter remote control.
+config HID_PICOLCD
+ tristate "PicoLCD (graphic version)"
+ depends on USB_HID
+ ---help---
+ This provides support for Minibox PicoLCD devices, currently
+ only the graphical ones are supported.
+
+ This includes support for the following device features:
+ - Keypad
+ - Switching between Firmware and Flash mode
+ - EEProm / Flash access (via debugfs)
+ Features selectively enabled:
+ - Framebuffer for monochrome 256x64 display
+ - Backlight control
+ - Contrast control
+ - General purpose outputs
+ Features that are not (yet) supported:
+ - IR
+
+config HID_PICOLCD_FB
+ bool "Framebuffer support" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=FB || FB=y
+ select FB_DEFERRED_IO
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ ---help---
+ Provide access to PicoLCD's 256x64 monochrome display via a
+ frambuffer device.
+
+config HID_PICOLCD_BACKLIGHT
+ bool "Backlight control" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
+ ---help---
+ Provide access to PicoLCD's backlight control via backlight
+ class.
+
+config HID_PICOLCD_LCD
+ bool "Contrast control" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
+ ---help---
+ Provide access to PicoLCD's LCD contrast via lcd class.
+
+config HID_PICOLCD_LEDS
+ bool "GPO via leds class" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
+ ---help---
+ Provide access to PicoLCD's GPO pins via leds class.
+
config HID_QUANTA
tristate "Quanta Optical Touch"
depends on USB_HID
---help---
Support for Quanta Optical Touch dual-touch panels.
+config HID_ROCCAT_KONE
+ tristate "Roccat Kone Mouse support"
+ depends on USB_HID
+ ---help---
+ Support for Roccat Kone mouse.
+
config HID_SAMSUNG
- tristate "Samsung" if EMBEDDED
+ tristate "Samsung"
depends on USB_HID
- default !EMBEDDED
---help---
- Support for Samsung InfraRed remote control.
+ Support for Samsung InfraRed remote control or keyboards.
config HID_SONY
- tristate "Sony" if EMBEDDED
+ tristate "Sony"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Sony PS3 controller.
@@ -289,16 +372,14 @@ config HID_STANTUM
Support for Stantum multitouch panel.
config HID_SUNPLUS
- tristate "Sunplus" if EMBEDDED
+ tristate "Sunplus"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Sunplus wireless desktop.
config HID_GREENASIA
- tristate "GreenAsia (Product ID 0x12) support" if EMBEDDED
+ tristate "GreenAsia (Product ID 0x12) support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a GreenAsia (Product ID 0x12) based game
controller or adapter.
@@ -313,9 +394,8 @@ config GREENASIA_FF
and want to enable force feedback support for it.
config HID_SMARTJOYPLUS
- tristate "SmartJoy PLUS PS2/USB adapter support" if EMBEDDED
+ tristate "SmartJoy PLUS PS2/USB adapter support"
depends on USB_HID
- default !EMBEDDED
---help---
Support for SmartJoy PLUS PS2/USB adapter.
@@ -328,16 +408,14 @@ config SMARTJOYPLUS_FF
enable force feedback support for it.
config HID_TOPSEED
- tristate "TopSeed Cyberlink remote control support" if EMBEDDED
+ tristate "TopSeed Cyberlink remote control support"
depends on USB_HID
- default !EMBEDDED
---help---
- Say Y if you have a TopSeed Cyberlink remote control.
+ Say Y if you have a TopSeed Cyberlink or BTC Emprex remote control.
config HID_THRUSTMASTER
- tristate "ThrustMaster devices support" if EMBEDDED
+ tristate "ThrustMaster devices support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
a THRUSTMASTER Ferrari GT Rumble Wheel.
@@ -357,10 +435,17 @@ config HID_WACOM
---help---
Support for Wacom Graphire Bluetooth tablet.
+config HID_WACOM_POWER_SUPPLY
+ bool "Wacom Bluetooth devices power supply status support"
+ depends on HID_WACOM
+ select POWER_SUPPLY
+ ---help---
+ Say Y here if you want to enable power supply status monitoring for
+ Wacom Bluetooth devices.
+
config HID_ZEROPLUS
- tristate "Zeroplus based game controller support" if EMBEDDED
+ tristate "Zeroplus based game controller support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a Zeroplus based game controller.
@@ -372,6 +457,12 @@ config ZEROPLUS_FF
Say Y here if you have a Zeroplus based game controller and want
to have force feedback support for it.
+config HID_ZYDACRON
+ tristate "Zydacron remote control support"
+ depends on USB_HID
+ ---help---
+ Support for Zydacron remote control.
+
endmenu
endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0b2618f..22e47eae 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -26,10 +26,12 @@ obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
+obj-$(CONFIG_HID_CANDO) += hid-cando.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
obj-$(CONFIG_HID_DRAGONRISE) += hid-drff.o
+obj-$(CONFIG_HID_EGALAX) += hid-egalax.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
@@ -41,9 +43,12 @@ obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
obj-$(CONFIG_HID_MOSART) += hid-mosart.o
obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
+obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o
obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
+obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
+obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
@@ -54,6 +59,7 @@ obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
+obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
obj-$(CONFIG_HID_WACOM) += hid-wacom.o
obj-$(CONFIG_USB_HID) += usbhid/
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c
index c31e0be..2a0d56b 100644
--- a/drivers/hid/hid-3m-pct.c
+++ b/drivers/hid/hid-3m-pct.c
@@ -1,7 +1,7 @@
/*
* HID driver for 3M PCT multitouch panels
*
- * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr>
+ * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
*
*/
@@ -25,7 +25,7 @@ MODULE_LICENSE("GPL");
#include "hid-ids.h"
struct mmm_finger {
- __s32 x, y;
+ __s32 x, y, w, h;
__u8 rank;
bool touch, valid;
};
@@ -82,7 +82,18 @@ static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi,
/* touchscreen emulation */
hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
return 1;
+ case HID_DG_WIDTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MAJOR);
+ return 1;
+ case HID_DG_HEIGHT:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MINOR);
+ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+ 1, 1, 0, 0);
+ return 1;
case HID_DG_CONTACTID:
+ field->logical_maximum = 59;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_TRACKING_ID);
return 1;
@@ -128,9 +139,15 @@ static void mmm_filter_event(struct mmm_data *md, struct input_dev *input)
/* this finger is just placeholder data, ignore */
} else if (f->touch) {
/* this finger is on the screen */
+ int wide = (f->w > f->h);
input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i);
input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR,
+ wide ? f->w : f->h);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR,
+ wide ? f->h : f->w);
input_mt_sync(input);
/*
* touchscreen emulation: maintain the age rank
@@ -197,6 +214,14 @@ static int mmm_event(struct hid_device *hid, struct hid_field *field,
case HID_DG_CONFIDENCE:
md->valid = value;
break;
+ case HID_DG_WIDTH:
+ if (md->valid)
+ md->f[md->curid].w = value;
+ break;
+ case HID_DG_HEIGHT:
+ if (md->valid)
+ md->f[md->curid].h = value;
+ break;
case HID_DG_CONTACTID:
if (md->valid) {
md->curid = value;
@@ -255,6 +280,7 @@ static void mmm_remove(struct hid_device *hdev)
static const struct hid_device_id mmm_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
{ }
};
MODULE_DEVICE_TABLE(hid, mmm_devices);
@@ -287,5 +313,4 @@ static void __exit mmm_exit(void)
module_init(mmm_init);
module_exit(mmm_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
new file mode 100644
index 0000000..4267a6f
--- /dev/null
+++ b/drivers/hid/hid-cando.c
@@ -0,0 +1,272 @@
+/*
+ * HID driver for Cando dual-touch panels
+ *
+ * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("Cando dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct cando_data {
+ __u16 x, y;
+ __u8 id;
+ __s8 oldest; /* id of the oldest finger in previous frame */
+ bool valid; /* valid finger data, or just placeholder? */
+ bool first; /* is this the first finger in this frame? */
+ __s8 firstid; /* id of the first finger in the frame */
+ __u16 firstx, firsty; /* (x, y) of the first finger in the frame */
+};
+
+static int cando_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ case HID_DG_TIPSWITCH:
+ case HID_DG_CONTACTMAX:
+ return -1;
+ case HID_DG_INRANGE:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ case HID_DG_CONTACTID:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TRACKING_ID);
+ return 1;
+ }
+ return 0;
+ }
+
+ return 0;
+}
+
+static int cando_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void cando_filter_event(struct cando_data *td, struct input_dev *input)
+{
+ td->first = !td->first; /* touchscreen emulation */
+
+ if (!td->valid) {
+ /*
+ * touchscreen emulation: if this is the second finger and
+ * the first was valid, the first was the oldest; if the
+ * first was not valid and there was a valid finger in the
+ * previous frame, this is a release.
+ */
+ if (td->first) {
+ td->firstid = -1;
+ } else if (td->firstid >= 0) {
+ input_event(input, EV_ABS, ABS_X, td->firstx);
+ input_event(input, EV_ABS, ABS_Y, td->firsty);
+ td->oldest = td->firstid;
+ } else if (td->oldest >= 0) {
+ input_event(input, EV_KEY, BTN_TOUCH, 0);
+ td->oldest = -1;
+ }
+
+ return;
+ }
+
+ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+
+ input_mt_sync(input);
+
+ /*
+ * touchscreen emulation: if there was no touching finger previously,
+ * emit touch event
+ */
+ if (td->oldest < 0) {
+ input_event(input, EV_KEY, BTN_TOUCH, 1);
+ td->oldest = td->id;
+ }
+
+ /*
+ * touchscreen emulation: if this is the first finger, wait for the
+ * second; the oldest is then the second if it was the oldest already
+ * or if there was no first, the first otherwise.
+ */
+ if (td->first) {
+ td->firstx = td->x;
+ td->firsty = td->y;
+ td->firstid = td->id;
+ } else {
+ int x, y, oldest;
+ if (td->id == td->oldest || td->firstid < 0) {
+ x = td->x;
+ y = td->y;
+ oldest = td->id;
+ } else {
+ x = td->firstx;
+ y = td->firsty;
+ oldest = td->firstid;
+ }
+ input_event(input, EV_ABS, ABS_X, x);
+ input_event(input, EV_ABS, ABS_Y, y);
+ td->oldest = oldest;
+ }
+}
+
+
+static int cando_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct cando_data *td = hid_get_drvdata(hid);
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ td->valid = value;
+ break;
+ case HID_DG_CONTACTID:
+ td->id = value;
+ break;
+ case HID_GD_X:
+ td->x = value;
+ break;
+ case HID_GD_Y:
+ td->y = value;
+ cando_filter_event(td, input);
+ break;
+ case HID_DG_TIPSWITCH:
+ /* avoid interference from generic hidinput handling */
+ break;
+
+ default:
+ /* fallback to the generic hidinput handling */
+ return 0;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int cando_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct cando_data *td;
+
+ td = kmalloc(sizeof(struct cando_data), GFP_KERNEL);
+ if (!td) {
+ dev_err(&hdev->dev, "cannot allocate Cando Touch data\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, td);
+ td->first = false;
+ td->oldest = -1;
+ td->valid = false;
+
+ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ if (ret)
+ kfree(td);
+
+ return ret;
+}
+
+static void cando_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id cando_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, cando_devices);
+
+static const struct hid_usage_id cando_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver cando_driver = {
+ .name = "cando-touch",
+ .id_table = cando_devices,
+ .probe = cando_probe,
+ .remove = cando_remove,
+ .input_mapping = cando_input_mapping,
+ .input_mapped = cando_input_mapped,
+ .usage_table = cando_grabbed_usages,
+ .event = cando_event,
+};
+
+static int __init cando_init(void)
+{
+ return hid_register_driver(&cando_driver);
+}
+
+static void __exit cando_exit(void)
+{
+ hid_unregister_driver(&cando_driver);
+}
+
+module_init(cando_init);
+module_exit(cando_exit);
+
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 143e788..e10e314 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -653,10 +653,9 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
if (device->driver->report_fixup)
device->driver->report_fixup(device, start, size);
- device->rdesc = kmalloc(size, GFP_KERNEL);
+ device->rdesc = kmemdup(start, size, GFP_KERNEL);
if (device->rdesc == NULL)
return -ENOMEM;
- memcpy(device->rdesc, start, size);
device->rsize = size;
parser = vmalloc(sizeof(struct hid_parser));
@@ -940,13 +939,8 @@ static void hid_output_field(struct hid_field *field, __u8 *data)
unsigned count = field->report_count;
unsigned offset = field->report_offset;
unsigned size = field->report_size;
- unsigned bitsused = offset + count * size;
unsigned n;
- /* make sure the unused bits in the last byte are zeros */
- if (count > 0 && size > 0 && (bitsused % 8) != 0)
- data[(bitsused-1)/8] &= (1 << (bitsused % 8)) - 1;
-
for (n = 0; n < count; n++) {
if (field->logical_minimum < 0) /* signed values */
implement(data, offset + n * size, size, s32ton(field->value[n], size));
@@ -966,6 +960,7 @@ void hid_output_report(struct hid_report *report, __u8 *data)
if (report->id > 0)
*data++ = report->id;
+ memset(data, 0, ((report->size - 1) >> 3) + 1);
for (n = 0; n < report->maxfield; n++)
hid_output_field(report->field[n], data);
}
@@ -1086,35 +1081,28 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
- if (!buf) {
- report = hid_get_report(report_enum, data);
+ if (!buf)
goto nomem;
- }
-
- snprintf(buf, HID_DEBUG_BUFSIZE - 1,
- "\nreport (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
- hid_debug_event(hid, buf);
-
- report = hid_get_report(report_enum, data);
- if (!report) {
- kfree(buf);
- return -1;
- }
/* dump the report */
snprintf(buf, HID_DEBUG_BUFSIZE - 1,
- "report %d (size %u) = ", report->id, size);
+ "\nreport (size %u) (%snumbered) = ", size, report_enum->numbered ? "" : "un");
hid_debug_event(hid, buf);
+
for (i = 0; i < size; i++) {
snprintf(buf, HID_DEBUG_BUFSIZE - 1,
" %02x", data[i]);
hid_debug_event(hid, buf);
}
hid_debug_event(hid, "\n");
-
kfree(buf);
nomem:
+ report = hid_get_report(report_enum, data);
+
+ if (!report)
+ return -1;
+
if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
ret = hdrv->raw_event(hid, report, data, size);
if (ret != 0)
@@ -1167,6 +1155,8 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
unsigned int i;
int len;
+ if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
+ connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
if (hdev->bus != BUS_USB)
connect_mask &= ~HID_CONNECT_HIDDEV;
if (hid_hiddev(hdev))
@@ -1246,6 +1236,7 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
/* a list of devices for which there is a specialized driver on HID bus */
static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
@@ -1290,14 +1281,19 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
@@ -1331,6 +1327,8 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
@@ -1342,7 +1340,9 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
@@ -1359,8 +1359,10 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ }
@@ -1757,7 +1759,7 @@ int hid_add_device(struct hid_device *hdev)
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
- if (hid_ignore(hdev))
+ if (!(hdev->quirks & HID_QUIRK_NO_IGNORE) && hid_ignore(hdev))
return -ENODEV;
/* XXX hack, any other cleaner solution after the driver core
@@ -1765,11 +1767,12 @@ int hid_add_device(struct hid_device *hdev)
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id));
+ hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
if (!ret)
hdev->status |= HID_STAT_ADDED;
-
- hid_debug_register(hdev, dev_name(&hdev->dev));
+ else
+ hid_debug_unregister(hdev);
return ret;
}
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
new file mode 100644
index 0000000..f44bdc0
--- /dev/null
+++ b/drivers/hid/hid-egalax.c
@@ -0,0 +1,281 @@
+/*
+ * HID driver for eGalax dual-touch panels
+ *
+ * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include "usbhid/usbhid.h"
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("eGalax dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct egalax_data {
+ __u16 x, y, z;
+ __u8 id;
+ bool first; /* is this the first finger in the frame? */
+ bool valid; /* valid finger data, or just placeholder? */
+ bool activity; /* at least one active finger previously? */
+ __u16 lastx, lasty; /* latest valid (x, y) in the frame */
+};
+
+static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ case HID_DG_TIPSWITCH:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ case HID_DG_INRANGE:
+ case HID_DG_CONFIDENCE:
+ case HID_DG_CONTACTCOUNT:
+ case HID_DG_CONTACTMAX:
+ return -1;
+ case HID_DG_CONTACTID:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TRACKING_ID);
+ return 1;
+ case HID_DG_TIPPRESSURE:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_PRESSURE);
+ return 1;
+ }
+ return 0;
+ }
+
+ /* ignore others (from other reports we won't get anyway) */
+ return -1;
+}
+
+static int egalax_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
+{
+ td->first = !td->first; /* touchscreen emulation */
+
+ if (td->valid) {
+ /* emit multitouch events */
+ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+ input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z);
+
+ input_mt_sync(input);
+
+ /*
+ * touchscreen emulation: store (x, y) as
+ * the last valid values in this frame
+ */
+ td->lastx = td->x;
+ td->lasty = td->y;
+ }
+
+ /*
+ * touchscreen emulation: if this is the second finger and at least
+ * one in this frame is valid, the latest valid in the frame is
+ * the oldest on the panel, the one we want for single touch
+ */
+ if (!td->first && td->activity) {
+ input_event(input, EV_ABS, ABS_X, td->lastx);
+ input_event(input, EV_ABS, ABS_Y, td->lasty);
+ }
+
+ if (!td->valid) {
+ /*
+ * touchscreen emulation: if the first finger is invalid
+ * and there previously was finger activity, this is a release
+ */
+ if (td->first && td->activity) {
+ input_event(input, EV_KEY, BTN_TOUCH, 0);
+ td->activity = false;
+ }
+ return;
+ }
+
+
+ /* touchscreen emulation: if no previous activity, emit touch event */
+ if (!td->activity) {
+ input_event(input, EV_KEY, BTN_TOUCH, 1);
+ td->activity = true;
+ }
+}
+
+
+static int egalax_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct egalax_data *td = hid_get_drvdata(hid);
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ case HID_DG_CONFIDENCE:
+ /* avoid interference from generic hidinput handling */
+ break;
+ case HID_DG_TIPSWITCH:
+ td->valid = value;
+ break;
+ case HID_DG_TIPPRESSURE:
+ td->z = value;
+ break;
+ case HID_DG_CONTACTID:
+ td->id = value;
+ break;
+ case HID_GD_X:
+ td->x = value;
+ break;
+ case HID_GD_Y:
+ td->y = value;
+ /* this is the last field in a finger */
+ egalax_filter_event(td, input);
+ break;
+ case HID_DG_CONTACTCOUNT:
+ /* touch emulation: this is the last field in a frame */
+ td->first = false;
+ break;
+
+ default:
+ /* fallback to the generic hidinput handling */
+ return 0;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct egalax_data *td;
+ struct hid_report *report;
+
+ td = kmalloc(sizeof(struct egalax_data), GFP_KERNEL);
+ if (!td) {
+ dev_err(&hdev->dev, "cannot allocate eGalax data\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, td);
+
+ ret = hid_parse(hdev);
+ if (ret)
+ goto end;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret)
+ goto end;
+
+ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[5];
+ if (report) {
+ report->field[0]->value[0] = 2;
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ }
+
+end:
+ if (ret)
+ kfree(td);
+
+ return ret;
+}
+
+static void egalax_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id egalax_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, egalax_devices);
+
+static const struct hid_usage_id egalax_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver egalax_driver = {
+ .name = "egalax-touch",
+ .id_table = egalax_devices,
+ .probe = egalax_probe,
+ .remove = egalax_remove,
+ .input_mapping = egalax_input_mapping,
+ .input_mapped = egalax_input_mapped,
+ .usage_table = egalax_grabbed_usages,
+ .event = egalax_event,
+};
+
+static int __init egalax_init(void)
+{
+ return hid_register_driver(&egalax_driver);
+}
+
+static void __exit egalax_exit(void)
+{
+ hid_unregister_driver(&egalax_driver);
+}
+
+module_init(egalax_init);
+module_exit(egalax_exit);
+
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 09d2764..9776896 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -20,6 +20,7 @@
#define USB_VENDOR_ID_3M 0x0596
#define USB_DEVICE_ID_3M1968 0x0500
+#define USB_DEVICE_ID_3M2256 0x0502
#define USB_VENDOR_ID_A4TECH 0x09da
#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
@@ -123,6 +124,13 @@
#define USB_VENDOR_ID_BERKSHIRE 0x0c98
#define USB_DEVICE_ID_BERKSHIRE_PCWD 0x1140
+#define USB_VENDOR_ID_BTC 0x046e
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
+
+#define USB_VENDOR_ID_CANDO 0x2087
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
+
#define USB_VENDOR_ID_CH 0x068e
#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2
#define USB_DEVICE_ID_CH_COMBATSTICK 0x00f4
@@ -148,6 +156,9 @@
#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
#define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff
+#define USB_VENDOR_ID_CREATIVELABS 0x041e
+#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
+
#define USB_VENDOR_ID_CYGNAL 0x10c4
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
@@ -171,6 +182,10 @@
#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_VENDOR_ID_DWAV 0x0eef
+#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
+
#define USB_VENDOR_ID_ELO 0x04E7
#define USB_DEVICE_ID_ELO_TS2700 0x0020
@@ -342,6 +357,8 @@
#define USB_VENDOR_ID_MICROCHIP 0x04d8
#define USB_DEVICE_ID_PICKIT1 0x0032
#define USB_DEVICE_ID_PICKIT2 0x0033
+#define USB_DEVICE_ID_PICOLCD 0xc002
+#define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002
#define USB_VENDOR_ID_MICROSOFT 0x045e
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
@@ -400,6 +417,9 @@
#define USB_VENDOR_ID_PRODIGE 0x05af
#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
+#define USB_VENDOR_ID_ROCCAT 0x1e7d
+#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
+
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
@@ -409,6 +429,7 @@
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
#define USB_VENDOR_ID_SONY 0x054c
#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
@@ -457,6 +478,7 @@
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
+#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0xbd
#define USB_VENDOR_ID_WISEGROUP 0x0925
#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
@@ -475,6 +497,9 @@
#define USB_VENDOR_ID_ZEROPLUS 0x0c12
+#define USB_VENDOR_ID_ZYDACRON 0x13EC
+#define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006
+
#define USB_VENDOR_ID_KYE 0x0458
#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 3677c90..f6433d8 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -126,6 +126,9 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1004: lg_map_key_clear(KEY_VIDEO); break;
case 0x1005: lg_map_key_clear(KEY_AUDIO); break;
case 0x100a: lg_map_key_clear(KEY_DOCUMENTS); break;
+ /* The following two entries are Playlist 1 and 2 on the MX3200 */
+ case 0x100f: lg_map_key_clear(KEY_FN_1); break;
+ case 0x1010: lg_map_key_clear(KEY_FN_2); break;
case 0x1011: lg_map_key_clear(KEY_PREVIOUSSONG); break;
case 0x1012: lg_map_key_clear(KEY_NEXTSONG); break;
case 0x1013: lg_map_key_clear(KEY_CAMERA); break;
@@ -137,6 +140,7 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1019: lg_map_key_clear(KEY_PROG1); break;
case 0x101a: lg_map_key_clear(KEY_PROG2); break;
case 0x101b: lg_map_key_clear(KEY_PROG3); break;
+ case 0x101c: lg_map_key_clear(KEY_CYCLEWINDOWS); break;
case 0x101f: lg_map_key_clear(KEY_ZOOMIN); break;
case 0x1020: lg_map_key_clear(KEY_ZOOMOUT); break;
case 0x1021: lg_map_key_clear(KEY_ZOOMRESET); break;
@@ -147,6 +151,11 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1029: lg_map_key_clear(KEY_SHUFFLE); break;
case 0x102a: lg_map_key_clear(KEY_BACK); break;
case 0x102b: lg_map_key_clear(KEY_CYCLEWINDOWS); break;
+ case 0x102d: lg_map_key_clear(KEY_WWW); break;
+ /* The following two are 'Start/answer call' and 'End/reject call'
+ on the MX3200 */
+ case 0x1031: lg_map_key_clear(KEY_OK); break;
+ case 0x1032: lg_map_key_clear(KEY_CANCEL); break;
case 0x1041: lg_map_key_clear(KEY_BATTERY); break;
case 0x1042: lg_map_key_clear(KEY_WORDPROCESSOR); break;
case 0x1043: lg_map_key_clear(KEY_SPREADSHEET); break;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 0d471fc2..f10d56a 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -354,12 +354,15 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_free;
}
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDINPUT);
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
dev_err(&hdev->dev, "magicmouse hw start failed\n");
goto err_free;
}
+ /* we are handling the input ourselves */
+ hidinput_disconnect(hdev);
+
report = hid_register_report(hdev, HID_INPUT_REPORT, TOUCH_REPORT_ID);
if (!report) {
dev_err(&hdev->dev, "unable to register touch report\n");
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 4777bbf..b6b0cae 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -24,6 +24,34 @@
#define NTRIG_DUPLICATE_USAGES 0x001
+static unsigned int min_width;
+module_param(min_width, uint, 0644);
+MODULE_PARM_DESC(min_width, "Minimum touch contact width to accept.");
+
+static unsigned int min_height;
+module_param(min_height, uint, 0644);
+MODULE_PARM_DESC(min_height, "Minimum touch contact height to accept.");
+
+static unsigned int activate_slack = 1;
+module_param(activate_slack, uint, 0644);
+MODULE_PARM_DESC(activate_slack, "Number of touch frames to ignore at "
+ "the start of touch input.");
+
+static unsigned int deactivate_slack = 4;
+module_param(deactivate_slack, uint, 0644);
+MODULE_PARM_DESC(deactivate_slack, "Number of empty frames to ignore before "
+ "deactivating touch.");
+
+static unsigned int activation_width = 64;
+module_param(activation_width, uint, 0644);
+MODULE_PARM_DESC(activation_width, "Width threshold to immediately start "
+ "processing touch events.");
+
+static unsigned int activation_height = 32;
+module_param(activation_height, uint, 0644);
+MODULE_PARM_DESC(activation_height, "Height threshold to immediately start "
+ "processing touch events.");
+
struct ntrig_data {
/* Incoming raw values for a single contact */
__u16 x, y, w, h;
@@ -37,6 +65,309 @@ struct ntrig_data {
__u8 mt_footer[4];
__u8 mt_foot_count;
+
+ /* The current activation state. */
+ __s8 act_state;
+
+ /* Empty frames to ignore before recognizing the end of activity */
+ __s8 deactivate_slack;
+
+ /* Frames to ignore before acknowledging the start of activity */
+ __s8 activate_slack;
+
+ /* Minimum size contact to accept */
+ __u16 min_width;
+ __u16 min_height;
+
+ /* Threshold to override activation slack */
+ __u16 activation_width;
+ __u16 activation_height;
+
+ __u16 sensor_logical_width;
+ __u16 sensor_logical_height;
+ __u16 sensor_physical_width;
+ __u16 sensor_physical_height;
+};
+
+
+static ssize_t show_phys_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_physical_width);
+}
+
+static DEVICE_ATTR(sensor_physical_width, S_IRUGO, show_phys_width, NULL);
+
+static ssize_t show_phys_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_physical_height);
+}
+
+static DEVICE_ATTR(sensor_physical_height, S_IRUGO, show_phys_height, NULL);
+
+static ssize_t show_log_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_logical_width);
+}
+
+static DEVICE_ATTR(sensor_logical_width, S_IRUGO, show_log_width, NULL);
+
+static ssize_t show_log_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_logical_height);
+}
+
+static DEVICE_ATTR(sensor_logical_height, S_IRUGO, show_log_height, NULL);
+
+static ssize_t show_min_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->min_width *
+ nd->sensor_physical_width /
+ nd->sensor_logical_width);
+}
+
+static ssize_t set_min_width(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_width)
+ return -EINVAL;
+
+ nd->min_width = val * nd->sensor_logical_width /
+ nd->sensor_physical_width;
+
+ return count;
+}
+
+static DEVICE_ATTR(min_width, S_IWUSR | S_IRUGO, show_min_width, set_min_width);
+
+static ssize_t show_min_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->min_height *
+ nd->sensor_physical_height /
+ nd->sensor_logical_height);
+}
+
+static ssize_t set_min_height(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_height)
+ return -EINVAL;
+
+ nd->min_height = val * nd->sensor_logical_height /
+ nd->sensor_physical_height;
+
+ return count;
+}
+
+static DEVICE_ATTR(min_height, S_IWUSR | S_IRUGO, show_min_height,
+ set_min_height);
+
+static ssize_t show_activate_slack(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activate_slack);
+}
+
+static ssize_t set_activate_slack(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 0x7f)
+ return -EINVAL;
+
+ nd->activate_slack = val;
+
+ return count;
+}
+
+static DEVICE_ATTR(activate_slack, S_IWUSR | S_IRUGO, show_activate_slack,
+ set_activate_slack);
+
+static ssize_t show_activation_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activation_width *
+ nd->sensor_physical_width /
+ nd->sensor_logical_width);
+}
+
+static ssize_t set_activation_width(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_width)
+ return -EINVAL;
+
+ nd->activation_width = val * nd->sensor_logical_width /
+ nd->sensor_physical_width;
+
+ return count;
+}
+
+static DEVICE_ATTR(activation_width, S_IWUSR | S_IRUGO, show_activation_width,
+ set_activation_width);
+
+static ssize_t show_activation_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activation_height *
+ nd->sensor_physical_height /
+ nd->sensor_logical_height);
+}
+
+static ssize_t set_activation_height(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_height)
+ return -EINVAL;
+
+ nd->activation_height = val * nd->sensor_logical_height /
+ nd->sensor_physical_height;
+
+ return count;
+}
+
+static DEVICE_ATTR(activation_height, S_IWUSR | S_IRUGO,
+ show_activation_height, set_activation_height);
+
+static ssize_t show_deactivate_slack(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", -nd->deactivate_slack);
+}
+
+static ssize_t set_deactivate_slack(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ /*
+ * No more than 8 terminal frames have been observed so far
+ * and higher slack is highly likely to leave the single
+ * touch emulation stuck down.
+ */
+ if (val > 7)
+ return -EINVAL;
+
+ nd->deactivate_slack = -val;
+
+ return count;
+}
+
+static DEVICE_ATTR(deactivate_slack, S_IWUSR | S_IRUGO, show_deactivate_slack,
+ set_deactivate_slack);
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_sensor_physical_width.attr,
+ &dev_attr_sensor_physical_height.attr,
+ &dev_attr_sensor_logical_width.attr,
+ &dev_attr_sensor_logical_height.attr,
+ &dev_attr_min_height.attr,
+ &dev_attr_min_width.attr,
+ &dev_attr_activate_slack.attr,
+ &dev_attr_activation_width.attr,
+ &dev_attr_activation_height.attr,
+ &dev_attr_deactivate_slack.attr,
+ NULL
+};
+
+static struct attribute_group ntrig_attribute_group = {
+ .attrs = sysfs_attrs
};
/*
@@ -49,6 +380,8 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
/* No special mappings needed for the pen and single touch */
if (field->physical)
return 0;
@@ -62,6 +395,21 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
input_set_abs_params(hi->input, ABS_X,
field->logical_minimum,
field->logical_maximum, 0, 0);
+
+ if (!nd->sensor_logical_width) {
+ nd->sensor_logical_width =
+ field->logical_maximum -
+ field->logical_minimum;
+ nd->sensor_physical_width =
+ field->physical_maximum -
+ field->physical_minimum;
+ nd->activation_width = activation_width *
+ nd->sensor_logical_width /
+ nd->sensor_physical_width;
+ nd->min_width = min_width *
+ nd->sensor_logical_width /
+ nd->sensor_physical_width;
+ }
return 1;
case HID_GD_Y:
hid_map_usage(hi, usage, bit, max,
@@ -69,6 +417,21 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
input_set_abs_params(hi->input, ABS_Y,
field->logical_minimum,
field->logical_maximum, 0, 0);
+
+ if (!nd->sensor_logical_height) {
+ nd->sensor_logical_height =
+ field->logical_maximum -
+ field->logical_minimum;
+ nd->sensor_physical_height =
+ field->physical_maximum -
+ field->physical_minimum;
+ nd->activation_height = activation_height *
+ nd->sensor_logical_height /
+ nd->sensor_physical_height;
+ nd->min_height = min_height *
+ nd->sensor_logical_height /
+ nd->sensor_physical_height;
+ }
return 1;
}
return 0;
@@ -201,20 +564,68 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
if (nd->mt_foot_count != 4)
break;
- /* Pen activity signal, trigger end of touch. */
+ /* Pen activity signal. */
if (nd->mt_footer[2]) {
+ /*
+ * When the pen deactivates touch, we see a
+ * bogus frame with ContactCount > 0.
+ * We can
+ * save a bit of work by ensuring act_state < 0
+ * even if deactivation slack is turned off.
+ */
+ nd->act_state = deactivate_slack - 1;
nd->confidence = 0;
break;
}
- /* If the contact was invalid */
- if (!(nd->confidence && nd->mt_footer[0])
- || nd->w <= 250
- || nd->h <= 190) {
- nd->confidence = 0;
+ /*
+ * The first footer value indicates the presence of a
+ * finger.
+ */
+ if (nd->mt_footer[0]) {
+ /*
+ * We do not want to process contacts under
+ * the size threshold, but do not want to
+ * ignore them for activation state
+ */
+ if (nd->w < nd->min_width ||
+ nd->h < nd->min_height)
+ nd->confidence = 0;
+ } else
break;
+
+ if (nd->act_state > 0) {
+ /*
+ * Contact meets the activation size threshold
+ */
+ if (nd->w >= nd->activation_width &&
+ nd->h >= nd->activation_height) {
+ if (nd->id)
+ /*
+ * first contact, activate now
+ */
+ nd->act_state = 0;
+ else {
+ /*
+ * avoid corrupting this frame
+ * but ensure next frame will
+ * be active
+ */
+ nd->act_state = 1;
+ break;
+ }
+ } else
+ /*
+ * Defer adjusting the activation state
+ * until the end of the frame.
+ */
+ break;
}
+ /* Discarding this contact */
+ if (!nd->confidence)
+ break;
+
/* emit a normal (X, Y) for the first point only */
if (nd->id == 0) {
/*
@@ -227,8 +638,15 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
input_event(input, EV_ABS, ABS_X, nd->x);
input_event(input, EV_ABS, ABS_Y, nd->y);
}
+
+ /* Emit MT events */
input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y);
+
+ /*
+ * Translate from height and width to size
+ * and orientation.
+ */
if (nd->w > nd->h) {
input_event(input, EV_ABS,
ABS_MT_ORIENTATION, 1);
@@ -248,12 +666,88 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
break;
case HID_DG_CONTACTCOUNT: /* End of a multitouch group */
- if (!nd->reading_mt)
+ if (!nd->reading_mt) /* Just to be sure */
break;
nd->reading_mt = 0;
- if (nd->first_contact_touch) {
+
+ /*
+ * Activation state machine logic:
+ *
+ * Fundamental states:
+ * state > 0: Inactive
+ * state <= 0: Active
+ * state < -deactivate_slack:
+ * Pen termination of touch
+ *
+ * Specific values of interest
+ * state == activate_slack
+ * no valid input since the last reset
+ *
+ * state == 0
+ * general operational state
+ *
+ * state == -deactivate_slack
+ * read sufficient empty frames to accept
+ * the end of input and reset
+ */
+
+ if (nd->act_state > 0) { /* Currently inactive */
+ if (value)
+ /*
+ * Consider each live contact as
+ * evidence of intentional activity.
+ */
+ nd->act_state = (nd->act_state > value)
+ ? nd->act_state - value
+ : 0;
+ else
+ /*
+ * Empty frame before we hit the
+ * activity threshold, reset.
+ */
+ nd->act_state = nd->activate_slack;
+
+ /*
+ * Entered this block inactive and no
+ * coordinates sent this frame, so hold off
+ * on button state.
+ */
+ break;
+ } else { /* Currently active */
+ if (value && nd->act_state >=
+ nd->deactivate_slack)
+ /*
+ * Live point: clear accumulated
+ * deactivation count.
+ */
+ nd->act_state = 0;
+ else if (nd->act_state <= nd->deactivate_slack)
+ /*
+ * We've consumed the deactivation
+ * slack, time to deactivate and reset.
+ */
+ nd->act_state =
+ nd->activate_slack;
+ else { /* Move towards deactivation */
+ nd->act_state--;
+ break;
+ }
+ }
+
+ if (nd->first_contact_touch && nd->act_state <= 0) {
+ /*
+ * Check to see if we're ready to start
+ * emitting touch events.
+ *
+ * Note: activation slack will decrease over
+ * the course of the frame, and it will be
+ * inconsistent from the start to the end of
+ * the frame. However if the frame starts
+ * with slack, first_contact_touch will still
+ * be 0 and we will not get to this point.
+ */
input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
input_report_key(input, BTN_TOUCH, 1);
} else {
@@ -263,7 +757,7 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
break;
default:
- /* fallback to the generic hidinput handling */
+ /* fall-back to the generic hidinput handling */
return 0;
}
}
@@ -293,6 +787,16 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
nd->reading_mt = 0;
+ nd->min_width = 0;
+ nd->min_height = 0;
+ nd->activate_slack = activate_slack;
+ nd->act_state = activate_slack;
+ nd->deactivate_slack = -deactivate_slack;
+ nd->sensor_logical_width = 0;
+ nd->sensor_logical_height = 0;
+ nd->sensor_physical_width = 0;
+ nd->sensor_physical_height = 0;
+
hid_set_drvdata(hdev, nd);
ret = hid_parse(hdev);
@@ -344,6 +848,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (report)
usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ ret = sysfs_create_group(&hdev->dev.kobj,
+ &ntrig_attribute_group);
return 0;
err_free:
@@ -353,6 +859,8 @@ err_free:
static void ntrig_remove(struct hid_device *hdev)
{
+ sysfs_remove_group(&hdev->dev.kobj,
+ &ntrig_attribute_group);
hid_hw_stop(hdev);
kfree(hid_get_drvdata(hdev));
}
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
new file mode 100644
index 0000000..7aabf65
--- /dev/null
+++ b/drivers/hid/hid-picolcd.c
@@ -0,0 +1,2631 @@
+/***************************************************************************
+ * Copyright (C) 2010 by Bruno Prémont <bonbons@linux-vserver.org> *
+ * *
+ * Based on Logitech G13 driver (v0.4) *
+ * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, version 2 of the License. *
+ * *
+ * This driver is distributed in the hope that it will be useful, but *
+ * WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
+ * General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this software. If not see <http://www.gnu.org/licenses/>. *
+ ***************************************************************************/
+
+#include <linux/hid.h>
+#include <linux/hid-debug.h>
+#include <linux/input.h>
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+#include <linux/usb.h>
+
+#include <linux/fb.h>
+#include <linux/vmalloc.h>
+#include <linux/backlight.h>
+#include <linux/lcd.h>
+
+#include <linux/leds.h>
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include <linux/completion.h>
+#include <linux/uaccess.h>
+
+#define PICOLCD_NAME "PicoLCD (graphic)"
+
+/* Report numbers */
+#define REPORT_ERROR_CODE 0x10 /* LCD: IN[16] */
+#define ERR_SUCCESS 0x00
+#define ERR_PARAMETER_MISSING 0x01
+#define ERR_DATA_MISSING 0x02
+#define ERR_BLOCK_READ_ONLY 0x03
+#define ERR_BLOCK_NOT_ERASABLE 0x04
+#define ERR_BLOCK_TOO_BIG 0x05
+#define ERR_SECTION_OVERFLOW 0x06
+#define ERR_INVALID_CMD_LEN 0x07
+#define ERR_INVALID_DATA_LEN 0x08
+#define REPORT_KEY_STATE 0x11 /* LCD: IN[2] */
+#define REPORT_IR_DATA 0x21 /* LCD: IN[63] */
+#define REPORT_EE_DATA 0x32 /* LCD: IN[63] */
+#define REPORT_MEMORY 0x41 /* LCD: IN[63] */
+#define REPORT_LED_STATE 0x81 /* LCD: OUT[1] */
+#define REPORT_BRIGHTNESS 0x91 /* LCD: OUT[1] */
+#define REPORT_CONTRAST 0x92 /* LCD: OUT[1] */
+#define REPORT_RESET 0x93 /* LCD: OUT[2] */
+#define REPORT_LCD_CMD 0x94 /* LCD: OUT[63] */
+#define REPORT_LCD_DATA 0x95 /* LCD: OUT[63] */
+#define REPORT_LCD_CMD_DATA 0x96 /* LCD: OUT[63] */
+#define REPORT_EE_READ 0xa3 /* LCD: OUT[63] */
+#define REPORT_EE_WRITE 0xa4 /* LCD: OUT[63] */
+#define REPORT_ERASE_MEMORY 0xb2 /* LCD: OUT[2] */
+#define REPORT_READ_MEMORY 0xb3 /* LCD: OUT[3] */
+#define REPORT_WRITE_MEMORY 0xb4 /* LCD: OUT[63] */
+#define REPORT_SPLASH_RESTART 0xc1 /* LCD: OUT[1] */
+#define REPORT_EXIT_KEYBOARD 0xef /* LCD: OUT[2] */
+#define REPORT_VERSION 0xf1 /* LCD: IN[2],OUT[1] Bootloader: IN[2],OUT[1] */
+#define REPORT_BL_ERASE_MEMORY 0xf2 /* Bootloader: IN[36],OUT[4] */
+#define REPORT_BL_READ_MEMORY 0xf3 /* Bootloader: IN[36],OUT[4] */
+#define REPORT_BL_WRITE_MEMORY 0xf4 /* Bootloader: IN[36],OUT[36] */
+#define REPORT_DEVID 0xf5 /* LCD: IN[5], OUT[1] Bootloader: IN[5],OUT[1] */
+#define REPORT_SPLASH_SIZE 0xf6 /* LCD: IN[4], OUT[1] */
+#define REPORT_HOOK_VERSION 0xf7 /* LCD: IN[2], OUT[1] */
+#define REPORT_EXIT_FLASHER 0xff /* Bootloader: OUT[2] */
+
+#ifdef CONFIG_HID_PICOLCD_FB
+/* Framebuffer
+ *
+ * The PicoLCD use a Topway LCD module of 256x64 pixel
+ * This display area is tiled over 4 controllers with 8 tiles
+ * each. Each tile has 8x64 pixel, each data byte representing
+ * a 1-bit wide vertical line of the tile.
+ *
+ * The display can be updated at a tile granularity.
+ *
+ * Chip 1 Chip 2 Chip 3 Chip 4
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 1 | Tile 1 | Tile 1 | Tile 1 |
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 2 | Tile 2 | Tile 2 | Tile 2 |
+ * +----------------+----------------+----------------+----------------+
+ * ...
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 8 | Tile 8 | Tile 8 | Tile 8 |
+ * +----------------+----------------+----------------+----------------+
+ */
+#define PICOLCDFB_NAME "picolcdfb"
+#define PICOLCDFB_WIDTH (256)
+#define PICOLCDFB_HEIGHT (64)
+#define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8)
+
+#define PICOLCDFB_UPDATE_RATE_LIMIT 10
+#define PICOLCDFB_UPDATE_RATE_DEFAULT 2
+
+/* Framebuffer visual structures */
+static const struct fb_fix_screeninfo picolcdfb_fix = {
+ .id = PICOLCDFB_NAME,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO01,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = PICOLCDFB_WIDTH / 8,
+ .accel = FB_ACCEL_NONE,
+};
+
+static const struct fb_var_screeninfo picolcdfb_var = {
+ .xres = PICOLCDFB_WIDTH,
+ .yres = PICOLCDFB_HEIGHT,
+ .xres_virtual = PICOLCDFB_WIDTH,
+ .yres_virtual = PICOLCDFB_HEIGHT,
+ .width = 103,
+ .height = 26,
+ .bits_per_pixel = 1,
+ .grayscale = 1,
+};
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+/* Input device
+ *
+ * The PicoLCD has an IR receiver header, a built-in keypad with 5 keys
+ * and header for 4x4 key matrix. The built-in keys are part of the matrix.
+ */
+static const unsigned short def_keymap[] = {
+ KEY_RESERVED, /* none */
+ KEY_BACK, /* col 4 + row 1 */
+ KEY_HOMEPAGE, /* col 3 + row 1 */
+ KEY_RESERVED, /* col 2 + row 1 */
+ KEY_RESERVED, /* col 1 + row 1 */
+ KEY_SCROLLUP, /* col 4 + row 2 */
+ KEY_OK, /* col 3 + row 2 */
+ KEY_SCROLLDOWN, /* col 2 + row 2 */
+ KEY_RESERVED, /* col 1 + row 2 */
+ KEY_RESERVED, /* col 4 + row 3 */
+ KEY_RESERVED, /* col 3 + row 3 */
+ KEY_RESERVED, /* col 2 + row 3 */
+ KEY_RESERVED, /* col 1 + row 3 */
+ KEY_RESERVED, /* col 4 + row 4 */
+ KEY_RESERVED, /* col 3 + row 4 */
+ KEY_RESERVED, /* col 2 + row 4 */
+ KEY_RESERVED, /* col 1 + row 4 */
+};
+#define PICOLCD_KEYS ARRAY_SIZE(def_keymap)
+
+/* Description of in-progress IO operation, used for operations
+ * that trigger response from device */
+struct picolcd_pending {
+ struct hid_report *out_report;
+ struct hid_report *in_report;
+ struct completion ready;
+ int raw_size;
+ u8 raw_data[64];
+};
+
+/* Per device data structure */
+struct picolcd_data {
+ struct hid_device *hdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debug_reset;
+ struct dentry *debug_eeprom;
+ struct dentry *debug_flash;
+ struct mutex mutex_flash;
+ int addr_sz;
+#endif
+ u8 version[2];
+ unsigned short opmode_delay;
+ /* input stuff */
+ u8 pressed_keys[2];
+ struct input_dev *input_keys;
+ struct input_dev *input_cir;
+ unsigned short keycode[PICOLCD_KEYS];
+
+#ifdef CONFIG_HID_PICOLCD_FB
+ /* Framebuffer stuff */
+ u8 fb_update_rate;
+ u8 fb_bpp;
+ u8 *fb_vbitmap; /* local copy of what was sent to PicoLCD */
+ u8 *fb_bitmap; /* framebuffer */
+ struct fb_info *fb_info;
+ struct fb_deferred_io fb_defio;
+#endif /* CONFIG_HID_PICOLCD_FB */
+#ifdef CONFIG_HID_PICOLCD_LCD
+ struct lcd_device *lcd;
+ u8 lcd_contrast;
+#endif /* CONFIG_HID_PICOLCD_LCD */
+#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
+ struct backlight_device *backlight;
+ u8 lcd_brightness;
+ u8 lcd_power;
+#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
+#ifdef CONFIG_HID_PICOLCD_LEDS
+ /* LED stuff */
+ u8 led_state;
+ struct led_classdev *led[8];
+#endif /* CONFIG_HID_PICOLCD_LEDS */
+
+ /* Housekeeping stuff */
+ spinlock_t lock;
+ struct mutex mutex;
+ struct picolcd_pending *pending;
+ int status;
+#define PICOLCD_BOOTLOADER 1
+#define PICOLCD_FAILED 2
+#define PICOLCD_READY_FB 4
+};
+
+
+/* Find a given report */
+#define picolcd_in_report(id, dev) picolcd_report(id, dev, HID_INPUT_REPORT)
+#define picolcd_out_report(id, dev) picolcd_report(id, dev, HID_OUTPUT_REPORT)
+
+static struct hid_report *picolcd_report(int id, struct hid_device *hdev, int dir)
+{
+ struct list_head *feature_report_list = &hdev->report_enum[dir].report_list;
+ struct hid_report *report = NULL;
+
+ list_for_each_entry(report, feature_report_list, list) {
+ if (report->id == id)
+ return report;
+ }
+ dev_warn(&hdev->dev, "No report with id 0x%x found\n", id);
+ return NULL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void picolcd_debug_out_report(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report);
+#define usbhid_submit_report(a, b, c) \
+ do { \
+ picolcd_debug_out_report(hid_get_drvdata(a), a, b); \
+ usbhid_submit_report(a, b, c); \
+ } while (0)
+#endif
+
+/* Submit a report and wait for a reply from device - if device fades away
+ * or does not respond in time, return NULL */
+static struct picolcd_pending *picolcd_send_and_wait(struct hid_device *hdev,
+ int report_id, const u8 *raw_data, int size)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct picolcd_pending *work;
+ struct hid_report *report = picolcd_out_report(report_id, hdev);
+ unsigned long flags;
+ int i, j, k;
+
+ if (!report || !data)
+ return NULL;
+ if (data->status & PICOLCD_FAILED)
+ return NULL;
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return NULL;
+
+ init_completion(&work->ready);
+ work->out_report = report;
+ work->in_report = NULL;
+ work->raw_size = 0;
+
+ mutex_lock(&data->mutex);
+ spin_lock_irqsave(&data->lock, flags);
+ for (i = k = 0; i < report->maxfield; i++)
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ hid_set_field(report->field[i], j, k < size ? raw_data[k] : 0);
+ k++;
+ }
+ data->pending = work;
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ wait_for_completion_interruptible_timeout(&work->ready, HZ*2);
+ spin_lock_irqsave(&data->lock, flags);
+ data->pending = NULL;
+ spin_unlock_irqrestore(&data->lock, flags);
+ mutex_unlock(&data->mutex);
+ return work;
+}
+
+#ifdef CONFIG_HID_PICOLCD_FB
+/* Send a given tile to PicoLCD */
+static int picolcd_fb_send_tile(struct hid_device *hdev, int chip, int tile)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct hid_report *report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, hdev);
+ struct hid_report *report2 = picolcd_out_report(REPORT_LCD_DATA, hdev);
+ unsigned long flags;
+ u8 *tdata;
+ int i;
+
+ if (!report1 || report1->maxfield != 1 || !report2 || report2->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report1->field[0], 0, chip << 2);
+ hid_set_field(report1->field[0], 1, 0x02);
+ hid_set_field(report1->field[0], 2, 0x00);
+ hid_set_field(report1->field[0], 3, 0x00);
+ hid_set_field(report1->field[0], 4, 0xb8 | tile);
+ hid_set_field(report1->field[0], 5, 0x00);
+ hid_set_field(report1->field[0], 6, 0x00);
+ hid_set_field(report1->field[0], 7, 0x40);
+ hid_set_field(report1->field[0], 8, 0x00);
+ hid_set_field(report1->field[0], 9, 0x00);
+ hid_set_field(report1->field[0], 10, 32);
+
+ hid_set_field(report2->field[0], 0, (chip << 2) | 0x01);
+ hid_set_field(report2->field[0], 1, 0x00);
+ hid_set_field(report2->field[0], 2, 0x00);
+ hid_set_field(report2->field[0], 3, 32);
+
+ tdata = data->fb_vbitmap + (tile * 4 + chip) * 64;
+ for (i = 0; i < 64; i++)
+ if (i < 32)
+ hid_set_field(report1->field[0], 11 + i, tdata[i]);
+ else
+ hid_set_field(report2->field[0], 4 + i - 32, tdata[i]);
+
+ usbhid_submit_report(data->hdev, report1, USB_DIR_OUT);
+ usbhid_submit_report(data->hdev, report2, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+/* Translate a single tile*/
+static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp,
+ int chip, int tile)
+{
+ int i, b, changed = 0;
+ u8 tdata[64];
+ u8 *vdata = vbitmap + (tile * 4 + chip) * 64;
+
+ if (bpp == 1) {
+ for (b = 7; b >= 0; b--) {
+ const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32;
+ for (i = 0; i < 64; i++) {
+ tdata[i] <<= 1;
+ tdata[i] |= (bdata[i/8] >> (7 - i % 8)) & 0x01;
+ }
+ }
+ } else if (bpp == 8) {
+ for (b = 7; b >= 0; b--) {
+ const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8;
+ for (i = 0; i < 64; i++) {
+ tdata[i] <<= 1;
+ tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00;
+ }
+ }
+ } else {
+ /* Oops, we should never get here! */
+ WARN_ON(1);
+ return 0;
+ }
+
+ for (i = 0; i < 64; i++)
+ if (tdata[i] != vdata[i]) {
+ changed = 1;
+ vdata[i] = tdata[i];
+ }
+ return changed;
+}
+
+/* Reconfigure LCD display */
+static int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+ struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev);
+ int i, j;
+ unsigned long flags;
+ static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 };
+
+ if (!report || report->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < report->field[0]->maxusage; j++)
+ if (j == 0)
+ hid_set_field(report->field[0], j, i << 2);
+ else if (j < sizeof(mapcmd))
+ hid_set_field(report->field[0], j, mapcmd[j]);
+ else
+ hid_set_field(report->field[0], j, 0);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ }
+
+ data->status |= PICOLCD_READY_FB;
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ if (data->fb_bitmap) {
+ if (clear) {
+ memset(data->fb_vbitmap, 0xff, PICOLCDFB_SIZE);
+ memset(data->fb_bitmap, 0, PICOLCDFB_SIZE*data->fb_bpp);
+ } else {
+ /* invert 1 byte in each tile to force resend */
+ for (i = 0; i < PICOLCDFB_SIZE; i += 64)
+ data->fb_vbitmap[i] = ~data->fb_vbitmap[i];
+ }
+ }
+
+ /* schedule first output of framebuffer */
+ if (data->fb_info)
+ schedule_delayed_work(&data->fb_info->deferred_work, 0);
+
+ return 0;
+}
+
+/* Update fb_vbitmap from the screen_base and send changed tiles to device */
+static void picolcd_fb_update(struct picolcd_data *data)
+{
+ int chip, tile, n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (!(data->status & PICOLCD_READY_FB)) {
+ spin_unlock_irqrestore(&data->lock, flags);
+ picolcd_fb_reset(data, 0);
+ } else {
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ /*
+ * Translate the framebuffer into the format needed by the PicoLCD.
+ * See display layout above.
+ * Do this one tile after the other and push those tiles that changed.
+ *
+ * Wait for our IO to complete as otherwise we might flood the queue!
+ */
+ n = 0;
+ for (chip = 0; chip < 4; chip++)
+ for (tile = 0; tile < 8; tile++)
+ if (picolcd_fb_update_tile(data->fb_vbitmap,
+ data->fb_bitmap, data->fb_bpp, chip, tile)) {
+ n += 2;
+ if (n >= HID_OUTPUT_FIFO_SIZE / 2) {
+ usbhid_wait_io(data->hdev);
+ n = 0;
+ }
+ picolcd_fb_send_tile(data->hdev, chip, tile);
+ }
+ if (n)
+ usbhid_wait_io(data->hdev);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ if (!info->par)
+ return;
+ sys_fillrect(info, rect);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ if (!info->par)
+ return;
+ sys_copyarea(info, area);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ if (!info->par)
+ return;
+ sys_imageblit(info, image);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t picolcd_fb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+ if (!info->par)
+ return -ENODEV;
+ ret = fb_sys_write(info, buf, count, ppos);
+ if (ret >= 0)
+ schedule_delayed_work(&info->deferred_work, 0);
+ return ret;
+}
+
+static int picolcd_fb_blank(int blank, struct fb_info *info)
+{
+ if (!info->par)
+ return -ENODEV;
+ /* We let fb notification do this for us via lcd/backlight device */
+ return 0;
+}
+
+static void picolcd_fb_destroy(struct fb_info *info)
+{
+ struct picolcd_data *data = info->par;
+ info->par = NULL;
+ if (data)
+ data->fb_info = NULL;
+ fb_deferred_io_cleanup(info);
+ framebuffer_release(info);
+}
+
+static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ __u32 bpp = var->bits_per_pixel;
+ __u32 activate = var->activate;
+
+ /* only allow 1/8 bit depth (8-bit is grayscale) */
+ *var = picolcdfb_var;
+ var->activate = activate;
+ if (bpp >= 8)
+ var->bits_per_pixel = 8;
+ else
+ var->bits_per_pixel = 1;
+ return 0;
+}
+
+static int picolcd_set_par(struct fb_info *info)
+{
+ struct picolcd_data *data = info->par;
+ u8 *o_fb, *n_fb;
+ if (info->var.bits_per_pixel == data->fb_bpp)
+ return 0;
+ /* switch between 1/8 bit depths */
+ if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8)
+ return -EINVAL;
+
+ o_fb = data->fb_bitmap;
+ n_fb = vmalloc(PICOLCDFB_SIZE*info->var.bits_per_pixel);
+ if (!n_fb)
+ return -ENOMEM;
+
+ fb_deferred_io_cleanup(info);
+ /* translate FB content to new bits-per-pixel */
+ if (info->var.bits_per_pixel == 1) {
+ int i, b;
+ for (i = 0; i < PICOLCDFB_SIZE; i++) {
+ u8 p = 0;
+ for (b = 0; b < 8; b++) {
+ p <<= 1;
+ p |= o_fb[i*8+b] ? 0x01 : 0x00;
+ }
+ }
+ info->fix.visual = FB_VISUAL_MONO01;
+ info->fix.line_length = PICOLCDFB_WIDTH / 8;
+ } else {
+ int i;
+ for (i = 0; i < PICOLCDFB_SIZE * 8; i++)
+ n_fb[i] = o_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.line_length = PICOLCDFB_WIDTH;
+ }
+
+ data->fb_bitmap = n_fb;
+ data->fb_bpp = info->var.bits_per_pixel;
+ info->screen_base = (char __force __iomem *)n_fb;
+ info->fix.smem_start = (unsigned long)n_fb;
+ info->fix.smem_len = PICOLCDFB_SIZE*data->fb_bpp;
+ fb_deferred_io_init(info);
+ vfree(o_fb);
+ return 0;
+}
+
+/* Note this can't be const because of struct fb_info definition */
+static struct fb_ops picolcdfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_destroy = picolcd_fb_destroy,
+ .fb_read = fb_sys_read,
+ .fb_write = picolcd_fb_write,
+ .fb_blank = picolcd_fb_blank,
+ .fb_fillrect = picolcd_fb_fillrect,
+ .fb_copyarea = picolcd_fb_copyarea,
+ .fb_imageblit = picolcd_fb_imageblit,
+ .fb_check_var = picolcd_fb_check_var,
+ .fb_set_par = picolcd_set_par,
+};
+
+
+/* Callback from deferred IO workqueue */
+static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+ picolcd_fb_update(info->par);
+}
+
+static const struct fb_deferred_io picolcd_fb_defio = {
+ .delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT,
+ .deferred_io = picolcd_fb_deferred_io,
+};
+
+
+/*
+ * The "fb_update_rate" sysfs attribute
+ */
+static ssize_t picolcd_fb_update_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ unsigned i, fb_update_rate = data->fb_update_rate;
+ size_t ret = 0;
+
+ for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++)
+ if (ret >= PAGE_SIZE)
+ break;
+ else if (i == fb_update_rate)
+ ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
+ else
+ ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
+ if (ret > 0)
+ buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
+ return ret;
+}
+
+static ssize_t picolcd_fb_update_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ int i;
+ unsigned u;
+
+ if (count < 1 || count > 10)
+ return -EINVAL;
+
+ i = sscanf(buf, "%u", &u);
+ if (i != 1)
+ return -EINVAL;
+
+ if (u > PICOLCDFB_UPDATE_RATE_LIMIT)
+ return -ERANGE;
+ else if (u == 0)
+ u = PICOLCDFB_UPDATE_RATE_DEFAULT;
+
+ data->fb_update_rate = u;
+ data->fb_defio.delay = HZ / data->fb_update_rate;
+ return count;
+}
+
+static DEVICE_ATTR(fb_update_rate, 0666, picolcd_fb_update_rate_show,
+ picolcd_fb_update_rate_store);
+
+/* initialize Framebuffer device */
+static int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+ struct device *dev = &data->hdev->dev;
+ struct fb_info *info = NULL;
+ int error = -ENOMEM;
+ u8 *fb_vbitmap = NULL;
+ u8 *fb_bitmap = NULL;
+
+ fb_bitmap = vmalloc(PICOLCDFB_SIZE*picolcdfb_var.bits_per_pixel);
+ if (fb_bitmap == NULL) {
+ dev_err(dev, "can't get a free page for framebuffer\n");
+ goto err_nomem;
+ }
+
+ fb_vbitmap = kmalloc(PICOLCDFB_SIZE, GFP_KERNEL);
+ if (fb_vbitmap == NULL) {
+ dev_err(dev, "can't alloc vbitmap image buffer\n");
+ goto err_nomem;
+ }
+
+ data->fb_update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT;
+ data->fb_defio = picolcd_fb_defio;
+ info = framebuffer_alloc(0, dev);
+ if (info == NULL) {
+ dev_err(dev, "failed to allocate a framebuffer\n");
+ goto err_nomem;
+ }
+
+ info->fbdefio = &data->fb_defio;
+ info->screen_base = (char __force __iomem *)fb_bitmap;
+ info->fbops = &picolcdfb_ops;
+ info->var = picolcdfb_var;
+ info->fix = picolcdfb_fix;
+ info->fix.smem_len = PICOLCDFB_SIZE;
+ info->fix.smem_start = (unsigned long)fb_bitmap;
+ info->par = data;
+ info->flags = FBINFO_FLAG_DEFAULT;
+
+ data->fb_vbitmap = fb_vbitmap;
+ data->fb_bitmap = fb_bitmap;
+ data->fb_bpp = picolcdfb_var.bits_per_pixel;
+ error = picolcd_fb_reset(data, 1);
+ if (error) {
+ dev_err(dev, "failed to configure display\n");
+ goto err_cleanup;
+ }
+ error = device_create_file(dev, &dev_attr_fb_update_rate);
+ if (error) {
+ dev_err(dev, "failed to create sysfs attributes\n");
+ goto err_cleanup;
+ }
+ data->fb_info = info;
+ error = register_framebuffer(info);
+ if (error) {
+ dev_err(dev, "failed to register framebuffer\n");
+ goto err_sysfs;
+ }
+ fb_deferred_io_init(info);
+ /* schedule first output of framebuffer */
+ schedule_delayed_work(&info->deferred_work, 0);
+ return 0;
+
+err_sysfs:
+ device_remove_file(dev, &dev_attr_fb_update_rate);
+err_cleanup:
+ data->fb_vbitmap = NULL;
+ data->fb_bitmap = NULL;
+ data->fb_bpp = 0;
+ data->fb_info = NULL;
+
+err_nomem:
+ framebuffer_release(info);
+ vfree(fb_bitmap);
+ kfree(fb_vbitmap);
+ return error;
+}
+
+static void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+ struct fb_info *info = data->fb_info;
+ u8 *fb_vbitmap = data->fb_vbitmap;
+ u8 *fb_bitmap = data->fb_bitmap;
+
+ if (!info)
+ return;
+
+ data->fb_vbitmap = NULL;
+ data->fb_bitmap = NULL;
+ data->fb_bpp = 0;
+ data->fb_info = NULL;
+ device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
+ fb_deferred_io_cleanup(info);
+ unregister_framebuffer(info);
+ vfree(fb_bitmap);
+ kfree(fb_vbitmap);
+}
+
+#define picolcd_fbinfo(d) ((d)->fb_info)
+#else
+static inline int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+ return 0;
+}
+static inline int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+ return 0;
+}
+static inline void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+}
+#define picolcd_fbinfo(d) NULL
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
+/*
+ * backlight class device
+ */
+static int picolcd_get_brightness(struct backlight_device *bdev)
+{
+ struct picolcd_data *data = bl_get_data(bdev);
+ return data->lcd_brightness;
+}
+
+static int picolcd_set_brightness(struct backlight_device *bdev)
+{
+ struct picolcd_data *data = bl_get_data(bdev);
+ struct hid_report *report = picolcd_out_report(REPORT_BRIGHTNESS, data->hdev);
+ unsigned long flags;
+
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return -ENODEV;
+
+ data->lcd_brightness = bdev->props.brightness & 0x0ff;
+ data->lcd_power = bdev->props.power;
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->lcd_power == FB_BLANK_UNBLANK ? data->lcd_brightness : 0);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+static int picolcd_check_bl_fb(struct backlight_device *bdev, struct fb_info *fb)
+{
+ return fb && fb == picolcd_fbinfo((struct picolcd_data *)bl_get_data(bdev));
+}
+
+static const struct backlight_ops picolcd_blops = {
+ .update_status = picolcd_set_brightness,
+ .get_brightness = picolcd_get_brightness,
+ .check_fb = picolcd_check_bl_fb,
+};
+
+static int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct backlight_device *bdev;
+ struct backlight_properties props;
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported BRIGHTNESS report");
+ return -EINVAL;
+ }
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = 0xff;
+ bdev = backlight_device_register(dev_name(dev), dev, data,
+ &picolcd_blops, &props);
+ if (IS_ERR(bdev)) {
+ dev_err(dev, "failed to register backlight\n");
+ return PTR_ERR(bdev);
+ }
+ bdev->props.brightness = 0xff;
+ data->lcd_brightness = 0xff;
+ data->backlight = bdev;
+ picolcd_set_brightness(bdev);
+ return 0;
+}
+
+static void picolcd_exit_backlight(struct picolcd_data *data)
+{
+ struct backlight_device *bdev = data->backlight;
+
+ data->backlight = NULL;
+ if (bdev)
+ backlight_device_unregister(bdev);
+}
+
+static inline int picolcd_resume_backlight(struct picolcd_data *data)
+{
+ if (!data->backlight)
+ return 0;
+ return picolcd_set_brightness(data->backlight);
+}
+
+#ifdef CONFIG_PM
+static void picolcd_suspend_backlight(struct picolcd_data *data)
+{
+ int bl_power = data->lcd_power;
+ if (!data->backlight)
+ return;
+
+ data->backlight->props.power = FB_BLANK_POWERDOWN;
+ picolcd_set_brightness(data->backlight);
+ data->lcd_power = data->backlight->props.power = bl_power;
+}
+#endif /* CONFIG_PM */
+#else
+static inline int picolcd_init_backlight(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_backlight(struct picolcd_data *data)
+{
+}
+static inline int picolcd_resume_backlight(struct picolcd_data *data)
+{
+ return 0;
+}
+static inline void picolcd_suspend_backlight(struct picolcd_data *data)
+{
+}
+#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
+
+#ifdef CONFIG_HID_PICOLCD_LCD
+/*
+ * lcd class device
+ */
+static int picolcd_get_contrast(struct lcd_device *ldev)
+{
+ struct picolcd_data *data = lcd_get_data(ldev);
+ return data->lcd_contrast;
+}
+
+static int picolcd_set_contrast(struct lcd_device *ldev, int contrast)
+{
+ struct picolcd_data *data = lcd_get_data(ldev);
+ struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev);
+ unsigned long flags;
+
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return -ENODEV;
+
+ data->lcd_contrast = contrast & 0x0ff;
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->lcd_contrast);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb)
+{
+ return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev));
+}
+
+static struct lcd_ops picolcd_lcdops = {
+ .get_contrast = picolcd_get_contrast,
+ .set_contrast = picolcd_set_contrast,
+ .check_fb = picolcd_check_lcd_fb,
+};
+
+static int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct lcd_device *ldev;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported CONTRAST report");
+ return -EINVAL;
+ }
+
+ ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops);
+ if (IS_ERR(ldev)) {
+ dev_err(dev, "failed to register LCD\n");
+ return PTR_ERR(ldev);
+ }
+ ldev->props.max_contrast = 0x0ff;
+ data->lcd_contrast = 0xe5;
+ data->lcd = ldev;
+ picolcd_set_contrast(ldev, 0xe5);
+ return 0;
+}
+
+static void picolcd_exit_lcd(struct picolcd_data *data)
+{
+ struct lcd_device *ldev = data->lcd;
+
+ data->lcd = NULL;
+ if (ldev)
+ lcd_device_unregister(ldev);
+}
+
+static inline int picolcd_resume_lcd(struct picolcd_data *data)
+{
+ if (!data->lcd)
+ return 0;
+ return picolcd_set_contrast(data->lcd, data->lcd_contrast);
+}
+#else
+static inline int picolcd_init_lcd(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_lcd(struct picolcd_data *data)
+{
+}
+static inline int picolcd_resume_lcd(struct picolcd_data *data)
+{
+ return 0;
+}
+#endif /* CONFIG_HID_PICOLCD_LCD */
+
+#ifdef CONFIG_HID_PICOLCD_LEDS
+/**
+ * LED class device
+ */
+static void picolcd_leds_set(struct picolcd_data *data)
+{
+ struct hid_report *report;
+ unsigned long flags;
+
+ if (!data->led[0])
+ return;
+ report = picolcd_out_report(REPORT_LED_STATE, data->hdev);
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->led_state);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void picolcd_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct picolcd_data *data;
+ int i, state = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data = hid_get_drvdata(hdev);
+ for (i = 0; i < 8; i++) {
+ if (led_cdev != data->led[i])
+ continue;
+ state = (data->led_state >> i) & 1;
+ if (value == LED_OFF && state) {
+ data->led_state &= ~(1 << i);
+ picolcd_leds_set(data);
+ } else if (value != LED_OFF && !state) {
+ data->led_state |= 1 << i;
+ picolcd_leds_set(data);
+ }
+ break;
+ }
+}
+
+static enum led_brightness picolcd_led_get_brightness(struct led_classdev *led_cdev)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct picolcd_data *data;
+ int i, value = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data = hid_get_drvdata(hdev);
+ for (i = 0; i < 8; i++)
+ if (led_cdev == data->led[i]) {
+ value = (data->led_state >> i) & 1;
+ break;
+ }
+ return value ? LED_FULL : LED_OFF;
+}
+
+static int picolcd_init_leds(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct led_classdev *led;
+ size_t name_sz = strlen(dev_name(dev)) + 8;
+ char *name;
+ int i, ret = 0;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported LED_STATE report");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 8; i++) {
+ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ if (!led) {
+ dev_err(dev, "can't allocate memory for LED %d\n", i);
+ ret = -ENOMEM;
+ goto err;
+ }
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz, "%s::GPO%d", dev_name(dev), i);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = picolcd_led_get_brightness;
+ led->brightness_set = picolcd_led_set_brightness;
+
+ data->led[i] = led;
+ ret = led_classdev_register(dev, data->led[i]);
+ if (ret) {
+ data->led[i] = NULL;
+ kfree(led);
+ dev_err(dev, "can't register LED %d\n", i);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ for (i = 0; i < 8; i++)
+ if (data->led[i]) {
+ led = data->led[i];
+ data->led[i] = NULL;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+ return ret;
+}
+
+static void picolcd_exit_leds(struct picolcd_data *data)
+{
+ struct led_classdev *led;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ led = data->led[i];
+ data->led[i] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+}
+
+#else
+static inline int picolcd_init_leds(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_leds(struct picolcd_data *data)
+{
+}
+static inline int picolcd_leds_set(struct picolcd_data *data)
+{
+ return 0;
+}
+#endif /* CONFIG_HID_PICOLCD_LEDS */
+
+/*
+ * input class device
+ */
+static int picolcd_raw_keypad(struct picolcd_data *data,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ /*
+ * Keypad event
+ * First and second data bytes list currently pressed keys,
+ * 0x00 means no key and at most 2 keys may be pressed at same time
+ */
+ int i, j;
+
+ /* determine newly pressed keys */
+ for (i = 0; i < size; i++) {
+ unsigned int key_code;
+ if (raw_data[i] == 0)
+ continue;
+ for (j = 0; j < sizeof(data->pressed_keys); j++)
+ if (data->pressed_keys[j] == raw_data[i])
+ goto key_already_down;
+ for (j = 0; j < sizeof(data->pressed_keys); j++)
+ if (data->pressed_keys[j] == 0) {
+ data->pressed_keys[j] = raw_data[i];
+ break;
+ }
+ input_event(data->input_keys, EV_MSC, MSC_SCAN, raw_data[i]);
+ if (raw_data[i] < PICOLCD_KEYS)
+ key_code = data->keycode[raw_data[i]];
+ else
+ key_code = KEY_UNKNOWN;
+ if (key_code != KEY_UNKNOWN) {
+ dbg_hid(PICOLCD_NAME " got key press for %u:%d",
+ raw_data[i], key_code);
+ input_report_key(data->input_keys, key_code, 1);
+ }
+ input_sync(data->input_keys);
+key_already_down:
+ continue;
+ }
+
+ /* determine newly released keys */
+ for (j = 0; j < sizeof(data->pressed_keys); j++) {
+ unsigned int key_code;
+ if (data->pressed_keys[j] == 0)
+ continue;
+ for (i = 0; i < size; i++)
+ if (data->pressed_keys[j] == raw_data[i])
+ goto key_still_down;
+ input_event(data->input_keys, EV_MSC, MSC_SCAN, data->pressed_keys[j]);
+ if (data->pressed_keys[j] < PICOLCD_KEYS)
+ key_code = data->keycode[data->pressed_keys[j]];
+ else
+ key_code = KEY_UNKNOWN;
+ if (key_code != KEY_UNKNOWN) {
+ dbg_hid(PICOLCD_NAME " got key release for %u:%d",
+ data->pressed_keys[j], key_code);
+ input_report_key(data->input_keys, key_code, 0);
+ }
+ input_sync(data->input_keys);
+ data->pressed_keys[j] = 0;
+key_still_down:
+ continue;
+ }
+ return 1;
+}
+
+static int picolcd_raw_cir(struct picolcd_data *data,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ /* Need understanding of CIR data format to implement ... */
+ return 1;
+}
+
+static int picolcd_check_version(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct picolcd_pending *verinfo;
+ int ret = 0;
+
+ if (!data)
+ return -ENODEV;
+
+ verinfo = picolcd_send_and_wait(hdev, REPORT_VERSION, NULL, 0);
+ if (!verinfo) {
+ dev_err(&hdev->dev, "no version response from PicoLCD");
+ return -ENODEV;
+ }
+
+ if (verinfo->raw_size == 2) {
+ data->version[0] = verinfo->raw_data[1];
+ data->version[1] = verinfo->raw_data[0];
+ if (data->status & PICOLCD_BOOTLOADER) {
+ dev_info(&hdev->dev, "PicoLCD, bootloader version %d.%d\n",
+ verinfo->raw_data[1], verinfo->raw_data[0]);
+ } else {
+ dev_info(&hdev->dev, "PicoLCD, firmware version %d.%d\n",
+ verinfo->raw_data[1], verinfo->raw_data[0]);
+ }
+ } else {
+ dev_err(&hdev->dev, "confused, got unexpected version response from PicoLCD\n");
+ ret = -EINVAL;
+ }
+ kfree(verinfo);
+ return ret;
+}
+
+/*
+ * Reset our device and wait for answer to VERSION request
+ */
+static int picolcd_reset(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct hid_report *report = picolcd_out_report(REPORT_RESET, hdev);
+ unsigned long flags;
+ int error;
+
+ if (!data || !report || report->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
+ data->status |= PICOLCD_BOOTLOADER;
+
+ /* perform the reset */
+ hid_set_field(report->field[0], 0, 1);
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ picolcd_resume_lcd(data);
+ picolcd_resume_backlight(data);
+#ifdef CONFIG_HID_PICOLCD_FB
+ if (data->fb_info)
+ schedule_delayed_work(&data->fb_info->deferred_work, 0);
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+ picolcd_leds_set(data);
+ return 0;
+}
+
+/*
+ * The "operation_mode" sysfs attribute
+ */
+static ssize_t picolcd_operation_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ return snprintf(buf, PAGE_SIZE, "[bootloader] lcd\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "bootloader [lcd]\n");
+}
+
+static ssize_t picolcd_operation_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ struct hid_report *report = NULL;
+ size_t cnt = count;
+ int timeout = data->opmode_delay;
+ unsigned long flags;
+
+ if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) {
+ if (data->status & PICOLCD_BOOTLOADER)
+ report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev);
+ buf += 3;
+ cnt -= 3;
+ } else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) {
+ if (!(data->status & PICOLCD_BOOTLOADER))
+ report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev);
+ buf += 10;
+ cnt -= 10;
+ }
+ if (!report)
+ return -EINVAL;
+
+ while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
+ cnt--;
+ if (cnt != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, timeout & 0xff);
+ hid_set_field(report->field[0], 1, (timeout >> 8) & 0xff);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return count;
+}
+
+static DEVICE_ATTR(operation_mode, 0644, picolcd_operation_mode_show,
+ picolcd_operation_mode_store);
+
+/*
+ * The "operation_mode_delay" sysfs attribute
+ */
+static ssize_t picolcd_operation_mode_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%hu\n", data->opmode_delay);
+}
+
+static ssize_t picolcd_operation_mode_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ unsigned u;
+ if (sscanf(buf, "%u", &u) != 1)
+ return -EINVAL;
+ if (u > 30000)
+ return -EINVAL;
+ else
+ data->opmode_delay = u;
+ return count;
+}
+
+static DEVICE_ATTR(operation_mode_delay, 0644, picolcd_operation_mode_delay_show,
+ picolcd_operation_mode_delay_store);
+
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * The "reset" file
+ */
+static int picolcd_debug_reset_show(struct seq_file *f, void *p)
+{
+ if (picolcd_fbinfo((struct picolcd_data *)f->private))
+ seq_printf(f, "all fb\n");
+ else
+ seq_printf(f, "all\n");
+ return 0;
+}
+
+static int picolcd_debug_reset_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, picolcd_debug_reset_show, inode->i_private);
+}
+
+static ssize_t picolcd_debug_reset_write(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct picolcd_data *data = ((struct seq_file *)f->private_data)->private;
+ char buf[32];
+ size_t cnt = min(count, sizeof(buf)-1);
+ if (copy_from_user(buf, user_buf, cnt))
+ return -EFAULT;
+
+ while (cnt > 0 && (buf[cnt-1] == ' ' || buf[cnt-1] == '\n'))
+ cnt--;
+ buf[cnt] = '\0';
+ if (strcmp(buf, "all") == 0) {
+ picolcd_reset(data->hdev);
+ picolcd_fb_reset(data, 1);
+ } else if (strcmp(buf, "fb") == 0) {
+ picolcd_fb_reset(data, 1);
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static const struct file_operations picolcd_debug_reset_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_reset_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = picolcd_debug_reset_write,
+ .release = single_release,
+};
+
+/*
+ * The "eeprom" file
+ */
+static int picolcd_debug_eeprom_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ struct picolcd_pending *resp;
+ u8 raw_data[3];
+ ssize_t ret = -EIO;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x0ff)
+ return 0;
+
+ /* prepare buffer with info about what we want to read (addr & len) */
+ raw_data[0] = *off & 0xff;
+ raw_data[1] = (*off >> 8) && 0xff;
+ raw_data[2] = s < 20 ? s : 20;
+ if (*off + raw_data[2] > 0xff)
+ raw_data[2] = 0x100 - *off;
+ resp = picolcd_send_and_wait(data->hdev, REPORT_EE_READ, raw_data,
+ sizeof(raw_data));
+ if (!resp)
+ return -EIO;
+
+ if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
+ /* successful read :) */
+ ret = resp->raw_data[2];
+ if (ret > s)
+ ret = s;
+ if (copy_to_user(u, resp->raw_data+3, ret))
+ ret = -EFAULT;
+ else
+ *off += ret;
+ } /* anything else is some kind of IO error */
+
+ kfree(resp);
+ return ret;
+}
+
+static ssize_t picolcd_debug_eeprom_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ struct picolcd_pending *resp;
+ ssize_t ret = -EIO;
+ u8 raw_data[23];
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x0ff)
+ return -ENOSPC;
+
+ memset(raw_data, 0, sizeof(raw_data));
+ raw_data[0] = *off & 0xff;
+ raw_data[1] = (*off >> 8) && 0xff;
+ raw_data[2] = s < 20 ? s : 20;
+ if (*off + raw_data[2] > 0xff)
+ raw_data[2] = 0x100 - *off;
+
+ if (copy_from_user(raw_data+3, u, raw_data[2]))
+ return -EFAULT;
+ resp = picolcd_send_and_wait(data->hdev, REPORT_EE_WRITE, raw_data,
+ sizeof(raw_data));
+
+ if (!resp)
+ return -EIO;
+
+ if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
+ /* check if written data matches */
+ if (memcmp(raw_data, resp->raw_data, 3+raw_data[2]) == 0) {
+ *off += raw_data[2];
+ ret = raw_data[2];
+ }
+ }
+ kfree(resp);
+ return ret;
+}
+
+/*
+ * Notes:
+ * - read/write happens in chunks of at most 20 bytes, it's up to userspace
+ * to loop in order to get more data.
+ * - on write errors on otherwise correct write request the bytes
+ * that should have been written are in undefined state.
+ */
+static const struct file_operations picolcd_debug_eeprom_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_eeprom_open,
+ .read = picolcd_debug_eeprom_read,
+ .write = picolcd_debug_eeprom_write,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * The "flash" file
+ */
+static int picolcd_debug_flash_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+/* record a flash address to buf (bounds check to be done by caller) */
+static int _picolcd_flash_setaddr(struct picolcd_data *data, u8 *buf, long off)
+{
+ buf[0] = off & 0xff;
+ buf[1] = (off >> 8) & 0xff;
+ if (data->addr_sz == 3)
+ buf[2] = (off >> 16) & 0xff;
+ return data->addr_sz == 2 ? 2 : 3;
+}
+
+/* read a given size of data (bounds check to be done by caller) */
+static ssize_t _picolcd_flash_read(struct picolcd_data *data, int report_id,
+ char __user *u, size_t s, loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[4];
+ ssize_t ret = 0;
+ int len_off, err = -EIO;
+
+ while (s > 0) {
+ err = -EIO;
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ raw_data[len_off] = s > 32 ? 32 : s;
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off+1);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_READ_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off+1) != 0)
+ goto skip;
+ if (copy_to_user(u+ret, resp->raw_data+len_off+1, raw_data[len_off])) {
+ err = -EFAULT;
+ goto skip;
+ }
+ *off += raw_data[len_off];
+ s -= raw_data[len_off];
+ ret += raw_data[len_off];
+ err = 0;
+ }
+skip:
+ kfree(resp);
+ if (err)
+ return ret > 0 ? ret : err;
+ }
+ return ret;
+}
+
+static ssize_t picolcd_debug_flash_read(struct file *f, char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x05fff)
+ return 0;
+ if (*off + s > 0x05fff)
+ s = 0x06000 - *off;
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ return _picolcd_flash_read(data, REPORT_BL_READ_MEMORY, u, s, off);
+ else
+ return _picolcd_flash_read(data, REPORT_READ_MEMORY, u, s, off);
+}
+
+/* erase block aligned to 64bytes boundary */
+static ssize_t _picolcd_flash_erase64(struct picolcd_data *data, int report_id,
+ loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[3];
+ int len_off;
+ ssize_t ret = -EIO;
+
+ if (*off & 0x3f)
+ return -EINVAL;
+
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_ERASE_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off) != 0)
+ goto skip;
+ ret = 0;
+ }
+skip:
+ kfree(resp);
+ return ret;
+}
+
+/* write a given size of data (bounds check to be done by caller) */
+static ssize_t _picolcd_flash_write(struct picolcd_data *data, int report_id,
+ const char __user *u, size_t s, loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[36];
+ ssize_t ret = 0;
+ int len_off, err = -EIO;
+
+ while (s > 0) {
+ err = -EIO;
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ raw_data[len_off] = s > 32 ? 32 : s;
+ if (copy_from_user(raw_data+len_off+1, u, raw_data[len_off])) {
+ err = -EFAULT;
+ break;
+ }
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data,
+ len_off+1+raw_data[len_off]);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_WRITE_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off+1+raw_data[len_off]) != 0)
+ goto skip;
+ *off += raw_data[len_off];
+ s -= raw_data[len_off];
+ ret += raw_data[len_off];
+ err = 0;
+ }
+skip:
+ kfree(resp);
+ if (err)
+ break;
+ }
+ return ret > 0 ? ret : err;
+}
+
+static ssize_t picolcd_debug_flash_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ ssize_t err, ret = 0;
+ int report_erase, report_write;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x5fff)
+ return -ENOSPC;
+ if (s & 0x3f)
+ return -EINVAL;
+ if (*off & 0x3f)
+ return -EINVAL;
+
+ if (data->status & PICOLCD_BOOTLOADER) {
+ report_erase = REPORT_BL_ERASE_MEMORY;
+ report_write = REPORT_BL_WRITE_MEMORY;
+ } else {
+ report_erase = REPORT_ERASE_MEMORY;
+ report_write = REPORT_WRITE_MEMORY;
+ }
+ mutex_lock(&data->mutex_flash);
+ while (s > 0) {
+ err = _picolcd_flash_erase64(data, report_erase, off);
+ if (err)
+ break;
+ err = _picolcd_flash_write(data, report_write, u, 64, off);
+ if (err < 0)
+ break;
+ ret += err;
+ *off += err;
+ s -= err;
+ if (err != 64)
+ break;
+ }
+ mutex_unlock(&data->mutex_flash);
+ return ret > 0 ? ret : err;
+}
+
+/*
+ * Notes:
+ * - concurrent writing is prevented by mutex and all writes must be
+ * n*64 bytes and 64-byte aligned, each write being preceeded by an
+ * ERASE which erases a 64byte block.
+ * If less than requested was written or an error is returned for an
+ * otherwise correct write request the next 64-byte block which should
+ * have been written is in undefined state (mostly: original, erased,
+ * (half-)written with write error)
+ * - reading can happend without special restriction
+ */
+static const struct file_operations picolcd_debug_flash_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_flash_open,
+ .read = picolcd_debug_flash_read,
+ .write = picolcd_debug_flash_write,
+ .llseek = generic_file_llseek,
+};
+
+
+/*
+ * Helper code for HID report level dumping/debugging
+ */
+static const char *error_codes[] = {
+ "success", "parameter missing", "data_missing", "block readonly",
+ "block not erasable", "block too big", "section overflow",
+ "invalid command length", "invalid data length",
+};
+
+static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data,
+ const size_t data_len)
+{
+ int i, j;
+ for (i = j = 0; i < data_len && j + 3 < dst_sz; i++) {
+ dst[j++] = hex_asc[(data[i] >> 4) & 0x0f];
+ dst[j++] = hex_asc[data[i] & 0x0f];
+ dst[j++] = ' ';
+ }
+ if (j < dst_sz) {
+ dst[j--] = '\0';
+ dst[j] = '\n';
+ } else
+ dst[j] = '\0';
+}
+
+static void picolcd_debug_out_report(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report)
+{
+ u8 raw_data[70];
+ int raw_size = (report->size >> 3) + 1;
+ char *buff;
+#define BUFF_SZ 256
+
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (!hdev->debug_events)
+ return;
+
+ buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
+ if (!buff)
+ return;
+
+ snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ",
+ report->id, raw_size);
+ hid_debug_event(hdev, buff);
+ if (raw_size + 5 > sizeof(raw_data)) {
+ hid_debug_event(hdev, " TOO BIG\n");
+ return;
+ } else {
+ raw_data[0] = report->id;
+ hid_output_report(report, raw_data);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
+ hid_debug_event(hdev, buff);
+ }
+
+ switch (report->id) {
+ case REPORT_LED_STATE:
+ /* 1 data byte with GPO state */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LED_STATE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tGPO state: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_BRIGHTNESS:
+ /* 1 data byte with brightness */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_BRIGHTNESS", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tBrightness: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_CONTRAST:
+ /* 1 data byte with contrast */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_CONTRAST", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tContrast: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_RESET:
+ /* 2 data bytes with reset duration in ms */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_RESET", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tDuration: 0x%02x%02x (%dms)\n",
+ raw_data[2], raw_data[1], raw_data[2] << 8 | raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_LCD_CMD:
+ /* 63 data bytes with LCD commands */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO: format decoding */
+ break;
+ case REPORT_LCD_DATA:
+ /* 63 data bytes with LCD data */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ /* TODO: format decoding */
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_LCD_CMD_DATA:
+ /* 63 data bytes with LCD commands and data */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ /* TODO: format decoding */
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EE_READ:
+ /* 3 data bytes with read area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EE_READ", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EE_WRITE:
+ /* 3+1..20 data bytes with write area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EE_WRITE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_ERASE_MEMORY:
+ case REPORT_BL_ERASE_MEMORY:
+ /* 3 data bytes with pointer inside erase block */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_ERASE_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_READ_MEMORY:
+ case REPORT_BL_READ_MEMORY:
+ /* 4 data bytes with read area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_READ_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_WRITE_MEMORY:
+ case REPORT_BL_WRITE_MEMORY:
+ /* 4+1..32 data bytes with write adrea description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_WRITE_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[4] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[4] + 5 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_RESTART:
+ /* TODO */
+ break;
+ case REPORT_EXIT_KEYBOARD:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EXIT_KEYBOARD", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
+ raw_data[1] | (raw_data[2] << 8),
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_VERSION:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_DEVID:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_DEVID", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_SIZE:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_SPLASH_SIZE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_HOOK_VERSION:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_HOOK_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EXIT_FLASHER:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
+ raw_data[1] | (raw_data[2] << 8),
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "<unknown>", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ }
+ wake_up_interruptible(&hdev->debug_wait);
+ kfree(buff);
+}
+
+static void picolcd_debug_raw_event(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ char *buff;
+
+#define BUFF_SZ 256
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (!hdev->debug_events)
+ return;
+
+ buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
+ if (!buff)
+ return;
+
+ switch (report->id) {
+ case REPORT_ERROR_CODE:
+ /* 2 data bytes with affected report and error code */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_ERROR_CODE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[2] < ARRAY_SIZE(error_codes))
+ snprintf(buff, BUFF_SZ, "\tError code 0x%02x (%s) in reply to report 0x%02x\n",
+ raw_data[2], error_codes[raw_data[2]], raw_data[1]);
+ else
+ snprintf(buff, BUFF_SZ, "\tError code 0x%02x in reply to report 0x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_KEY_STATE:
+ /* 2 data bytes with key state */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_KEY_STATE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[1] == 0)
+ snprintf(buff, BUFF_SZ, "\tNo key pressed\n");
+ else if (raw_data[2] == 0)
+ snprintf(buff, BUFF_SZ, "\tOne key pressed: 0x%02x (%d)\n",
+ raw_data[1], raw_data[1]);
+ else
+ snprintf(buff, BUFF_SZ, "\tTwo keys pressed: 0x%02x (%d), 0x%02x (%d)\n",
+ raw_data[1], raw_data[1], raw_data[2], raw_data[2]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_IR_DATA:
+ /* Up to 20 byes of IR scancode data */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_IR_DATA", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[1] == 0) {
+ snprintf(buff, BUFF_SZ, "\tUnexpectedly 0 data length\n");
+ hid_debug_event(hdev, buff);
+ } else if (raw_data[1] + 1 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n\tIR Data: ",
+ raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+2, raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tOverflowing data length: %d\n",
+ raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ }
+ break;
+ case REPORT_EE_DATA:
+ /* Data buffer in response to REPORT_EE_READ or REPORT_EE_WRITE */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_EE_DATA", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ hid_debug_event(hdev, buff);
+ } else if (raw_data[3] + 4 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ hid_debug_event(hdev, buff);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ hid_debug_event(hdev, buff);
+ }
+ break;
+ case REPORT_MEMORY:
+ /* Data buffer in response to REPORT_READ_MEMORY or REPORT_WRTIE_MEMORY */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[4] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[4] + 5 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_VERSION:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_BL_ERASE_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_ERASE_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_BL_READ_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_READ_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_BL_WRITE_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_WRITE_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_DEVID:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_DEVID", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tSerial: 0x%02x%02x%02x%02x\n",
+ raw_data[1], raw_data[2], raw_data[3], raw_data[4]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tType: 0x%02x\n",
+ raw_data[5]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_SIZE:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_SPLASH_SIZE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tTotal splash space: %d\n",
+ (raw_data[2] << 8) | raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tUsed splash space: %d\n",
+ (raw_data[4] << 8) | raw_data[3]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_HOOK_VERSION:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_HOOK_VERSION", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
+ raw_data[1], raw_data[2]);
+ hid_debug_event(hdev, buff);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "<unknown>", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ }
+ wake_up_interruptible(&hdev->debug_wait);
+ kfree(buff);
+}
+
+static void picolcd_init_devfs(struct picolcd_data *data,
+ struct hid_report *eeprom_r, struct hid_report *eeprom_w,
+ struct hid_report *flash_r, struct hid_report *flash_w,
+ struct hid_report *reset)
+{
+ struct hid_device *hdev = data->hdev;
+
+ mutex_init(&data->mutex_flash);
+
+ /* reset */
+ if (reset)
+ data->debug_reset = debugfs_create_file("reset", 0600,
+ hdev->debug_dir, data, &picolcd_debug_reset_fops);
+
+ /* eeprom */
+ if (eeprom_r || eeprom_w)
+ data->debug_eeprom = debugfs_create_file("eeprom",
+ (eeprom_w ? S_IWUSR : 0) | (eeprom_r ? S_IRUSR : 0),
+ hdev->debug_dir, data, &picolcd_debug_eeprom_fops);
+
+ /* flash */
+ if (flash_r && flash_r->maxfield == 1 && flash_r->field[0]->report_size == 8)
+ data->addr_sz = flash_r->field[0]->report_count - 1;
+ else
+ data->addr_sz = -1;
+ if (data->addr_sz == 2 || data->addr_sz == 3) {
+ data->debug_flash = debugfs_create_file("flash",
+ (flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0),
+ hdev->debug_dir, data, &picolcd_debug_flash_fops);
+ } else if (flash_r || flash_w)
+ dev_warn(&hdev->dev, "Unexpected FLASH access reports, "
+ "please submit rdesc for review\n");
+}
+
+static void picolcd_exit_devfs(struct picolcd_data *data)
+{
+ struct dentry *dent;
+
+ dent = data->debug_reset;
+ data->debug_reset = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ dent = data->debug_eeprom;
+ data->debug_eeprom = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ dent = data->debug_flash;
+ data->debug_flash = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ mutex_destroy(&data->mutex_flash);
+}
+#else
+static inline void picolcd_debug_raw_event(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+}
+static inline void picolcd_init_devfs(struct picolcd_data *data,
+ struct hid_report *eeprom_r, struct hid_report *eeprom_w,
+ struct hid_report *flash_r, struct hid_report *flash_w,
+ struct hid_report *reset)
+{
+}
+static inline void picolcd_exit_devfs(struct picolcd_data *data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * Handle raw report as sent by device
+ */
+static int picolcd_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!data)
+ return 1;
+
+ if (report->id == REPORT_KEY_STATE) {
+ if (data->input_keys)
+ ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
+ } else if (report->id == REPORT_IR_DATA) {
+ if (data->input_cir)
+ ret = picolcd_raw_cir(data, report, raw_data+1, size-1);
+ } else {
+ spin_lock_irqsave(&data->lock, flags);
+ /*
+ * We let the caller of picolcd_send_and_wait() check if the
+ * report we got is one of the expected ones or not.
+ */
+ if (data->pending) {
+ memcpy(data->pending->raw_data, raw_data+1, size-1);
+ data->pending->raw_size = size-1;
+ data->pending->in_report = report;
+ complete(&data->pending->ready);
+ }
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ picolcd_debug_raw_event(data, hdev, report, raw_data, size);
+ return 1;
+}
+
+#ifdef CONFIG_PM
+static int picolcd_suspend(struct hid_device *hdev, pm_message_t message)
+{
+ if (message.event & PM_EVENT_AUTO)
+ return 0;
+
+ picolcd_suspend_backlight(hid_get_drvdata(hdev));
+ dbg_hid(PICOLCD_NAME " device ready for suspend\n");
+ return 0;
+}
+
+static int picolcd_resume(struct hid_device *hdev)
+{
+ int ret;
+ ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
+ return 0;
+}
+
+static int picolcd_reset_resume(struct hid_device *hdev)
+{
+ int ret;
+ ret = picolcd_reset(hdev);
+ if (ret)
+ dbg_hid(PICOLCD_NAME " resetting our device failed: %d\n", ret);
+ ret = picolcd_fb_reset(hid_get_drvdata(hdev), 0);
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring framebuffer content failed: %d\n", ret);
+ ret = picolcd_resume_lcd(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring lcd failed: %d\n", ret);
+ ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
+ picolcd_leds_set(hid_get_drvdata(hdev));
+ return 0;
+}
+#endif
+
+/* initialize keypad input device */
+static int picolcd_init_keys(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ struct hid_device *hdev = data->hdev;
+ struct input_dev *idev;
+ int error, i;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 2 ||
+ report->field[0]->report_size != 8) {
+ dev_err(&hdev->dev, "unsupported KEY_STATE report");
+ return -EINVAL;
+ }
+
+ idev = input_allocate_device();
+ if (idev == NULL) {
+ dev_err(&hdev->dev, "failed to allocate input device");
+ return -ENOMEM;
+ }
+ input_set_drvdata(idev, hdev);
+ memcpy(data->keycode, def_keymap, sizeof(def_keymap));
+ idev->name = hdev->name;
+ idev->phys = hdev->phys;
+ idev->uniq = hdev->uniq;
+ idev->id.bustype = hdev->bus;
+ idev->id.vendor = hdev->vendor;
+ idev->id.product = hdev->product;
+ idev->id.version = hdev->version;
+ idev->dev.parent = hdev->dev.parent;
+ idev->keycode = &data->keycode;
+ idev->keycodemax = PICOLCD_KEYS;
+ idev->keycodesize = sizeof(data->keycode[0]);
+ input_set_capability(idev, EV_MSC, MSC_SCAN);
+ set_bit(EV_REP, idev->evbit);
+ for (i = 0; i < PICOLCD_KEYS; i++)
+ input_set_capability(idev, EV_KEY, data->keycode[i]);
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(&hdev->dev, "error registering the input device");
+ input_free_device(idev);
+ return error;
+ }
+ data->input_keys = idev;
+ return 0;
+}
+
+static void picolcd_exit_keys(struct picolcd_data *data)
+{
+ struct input_dev *idev = data->input_keys;
+
+ data->input_keys = NULL;
+ if (idev)
+ input_unregister_device(idev);
+}
+
+/* initialize CIR input device */
+static inline int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
+{
+ /* support not implemented yet */
+ return 0;
+}
+
+static inline void picolcd_exit_cir(struct picolcd_data *data)
+{
+}
+
+static int picolcd_probe_lcd(struct hid_device *hdev, struct picolcd_data *data)
+{
+ int error;
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ if (data->version[0] != 0 && data->version[1] != 3)
+ dev_info(&hdev->dev, "Device with untested firmware revision, "
+ "please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+ dev_name(&hdev->dev));
+
+ /* Setup keypad input device */
+ error = picolcd_init_keys(data, picolcd_in_report(REPORT_KEY_STATE, hdev));
+ if (error)
+ goto err;
+
+ /* Setup CIR input device */
+ error = picolcd_init_cir(data, picolcd_in_report(REPORT_IR_DATA, hdev));
+ if (error)
+ goto err;
+
+ /* Set up the framebuffer device */
+ error = picolcd_init_framebuffer(data);
+ if (error)
+ goto err;
+
+ /* Setup lcd class device */
+ error = picolcd_init_lcd(data, picolcd_out_report(REPORT_CONTRAST, hdev));
+ if (error)
+ goto err;
+
+ /* Setup backlight class device */
+ error = picolcd_init_backlight(data, picolcd_out_report(REPORT_BRIGHTNESS, hdev));
+ if (error)
+ goto err;
+
+ /* Setup the LED class devices */
+ error = picolcd_init_leds(data, picolcd_out_report(REPORT_LED_STATE, hdev));
+ if (error)
+ goto err;
+
+ picolcd_init_devfs(data, picolcd_out_report(REPORT_EE_READ, hdev),
+ picolcd_out_report(REPORT_EE_WRITE, hdev),
+ picolcd_out_report(REPORT_READ_MEMORY, hdev),
+ picolcd_out_report(REPORT_WRITE_MEMORY, hdev),
+ picolcd_out_report(REPORT_RESET, hdev));
+ return 0;
+err:
+ picolcd_exit_leds(data);
+ picolcd_exit_backlight(data);
+ picolcd_exit_lcd(data);
+ picolcd_exit_framebuffer(data);
+ picolcd_exit_cir(data);
+ picolcd_exit_keys(data);
+ return error;
+}
+
+static int picolcd_probe_bootloader(struct hid_device *hdev, struct picolcd_data *data)
+{
+ int error;
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ if (data->version[0] != 1 && data->version[1] != 0)
+ dev_info(&hdev->dev, "Device with untested bootloader revision, "
+ "please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+ dev_name(&hdev->dev));
+
+ picolcd_init_devfs(data, NULL, NULL,
+ picolcd_out_report(REPORT_BL_READ_MEMORY, hdev),
+ picolcd_out_report(REPORT_BL_WRITE_MEMORY, hdev), NULL);
+ return 0;
+}
+
+static int picolcd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct picolcd_data *data;
+ int error = -ENOMEM;
+
+ dbg_hid(PICOLCD_NAME " hardware probe...\n");
+
+ /*
+ * Let's allocate the picolcd data structure, set some reasonable
+ * defaults, and associate it with the device
+ */
+ data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&hdev->dev, "can't allocate space for Minibox PicoLCD device data\n");
+ error = -ENOMEM;
+ goto err_no_cleanup;
+ }
+
+ spin_lock_init(&data->lock);
+ mutex_init(&data->mutex);
+ data->hdev = hdev;
+ data->opmode_delay = 5000;
+ if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
+ data->status |= PICOLCD_BOOTLOADER;
+ hid_set_drvdata(hdev, data);
+
+ /* Parse the device reports and start it up */
+ error = hid_parse(hdev);
+ if (error) {
+ dev_err(&hdev->dev, "device report parse failed\n");
+ goto err_cleanup_data;
+ }
+
+ /* We don't use hidinput but hid_hw_start() fails if nothing is
+ * claimed. So spoof claimed input. */
+ hdev->claimed = HID_CLAIMED_INPUT;
+ error = hid_hw_start(hdev, 0);
+ hdev->claimed = 0;
+ if (error) {
+ dev_err(&hdev->dev, "hardware start failed\n");
+ goto err_cleanup_data;
+ }
+
+ error = hdev->ll_driver->open(hdev);
+ if (error) {
+ dev_err(&hdev->dev, "failed to open input interrupt pipe for key and IR events\n");
+ goto err_cleanup_hid_hw;
+ }
+
+ error = device_create_file(&hdev->dev, &dev_attr_operation_mode_delay);
+ if (error) {
+ dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+ goto err_cleanup_hid_ll;
+ }
+
+ error = device_create_file(&hdev->dev, &dev_attr_operation_mode);
+ if (error) {
+ dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+ goto err_cleanup_sysfs1;
+ }
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ error = picolcd_probe_bootloader(hdev, data);
+ else
+ error = picolcd_probe_lcd(hdev, data);
+ if (error)
+ goto err_cleanup_sysfs2;
+
+ dbg_hid(PICOLCD_NAME " activated and initialized\n");
+ return 0;
+
+err_cleanup_sysfs2:
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode);
+err_cleanup_sysfs1:
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
+err_cleanup_hid_ll:
+ hdev->ll_driver->close(hdev);
+err_cleanup_hid_hw:
+ hid_hw_stop(hdev);
+err_cleanup_data:
+ kfree(data);
+err_no_cleanup:
+ hid_set_drvdata(hdev, NULL);
+
+ return error;
+}
+
+static void picolcd_remove(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ unsigned long flags;
+
+ dbg_hid(PICOLCD_NAME " hardware remove...\n");
+ spin_lock_irqsave(&data->lock, flags);
+ data->status |= PICOLCD_FAILED;
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ picolcd_exit_devfs(data);
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode);
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
+ hdev->ll_driver->close(hdev);
+ hid_hw_stop(hdev);
+ hid_set_drvdata(hdev, NULL);
+
+ /* Shortcut potential pending reply that will never arrive */
+ spin_lock_irqsave(&data->lock, flags);
+ if (data->pending)
+ complete(&data->pending->ready);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* Cleanup LED */
+ picolcd_exit_leds(data);
+ /* Clean up the framebuffer */
+ picolcd_exit_backlight(data);
+ picolcd_exit_lcd(data);
+ picolcd_exit_framebuffer(data);
+ /* Cleanup input */
+ picolcd_exit_cir(data);
+ picolcd_exit_keys(data);
+
+ mutex_destroy(&data->mutex);
+ /* Finally, clean up the picolcd data itself */
+ kfree(data);
+}
+
+static const struct hid_device_id picolcd_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, picolcd_devices);
+
+static struct hid_driver picolcd_driver = {
+ .name = "hid-picolcd",
+ .id_table = picolcd_devices,
+ .probe = picolcd_probe,
+ .remove = picolcd_remove,
+ .raw_event = picolcd_raw_event,
+#ifdef CONFIG_PM
+ .suspend = picolcd_suspend,
+ .resume = picolcd_resume,
+ .reset_resume = picolcd_reset_resume,
+#endif
+};
+
+static int __init picolcd_init(void)
+{
+ return hid_register_driver(&picolcd_driver);
+}
+
+static void __exit picolcd_exit(void)
+{
+ hid_unregister_driver(&picolcd_driver);
+}
+
+module_init(picolcd_init);
+module_exit(picolcd_exit);
+MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
new file mode 100644
index 0000000..845f428
--- /dev/null
+++ b/drivers/hid/hid-prodikeys.c
@@ -0,0 +1,910 @@
+/*
+ * HID driver for the Prodikeys PC-MIDI Keyboard
+ * providing midi & extra multimedia keys functionality
+ *
+ * Copyright (c) 2009 Don Prince <dhprince.devel@yahoo.co.uk>
+ *
+ * Controls for Octave Shift Up/Down, Channel, and
+ * Sustain Duration available via sysfs.
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/hid.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/rawmidi.h>
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+
+
+#define pk_debug(format, arg...) \
+ pr_debug("hid-prodikeys: " format "\n" , ## arg)
+#define pk_error(format, arg...) \
+ pr_err("hid-prodikeys: " format "\n" , ## arg)
+
+struct pcmidi_snd;
+
+struct pk_device {
+ unsigned long quirks;
+
+ struct hid_device *hdev;
+ struct pcmidi_snd *pm; /* pcmidi device context */
+};
+
+struct pcmidi_snd;
+
+struct pcmidi_sustain {
+ unsigned long in_use;
+ struct pcmidi_snd *pm;
+ struct timer_list timer;
+ unsigned char status;
+ unsigned char note;
+ unsigned char velocity;
+};
+
+#define PCMIDI_SUSTAINED_MAX 32
+struct pcmidi_snd {
+ struct pk_device *pk;
+ unsigned short ifnum;
+ struct hid_report *pcmidi_report6;
+ struct input_dev *input_ep82;
+ unsigned short midi_mode;
+ unsigned short midi_sustain_mode;
+ unsigned short midi_sustain;
+ unsigned short midi_channel;
+ short midi_octave;
+ struct pcmidi_sustain sustained_notes[PCMIDI_SUSTAINED_MAX];
+ unsigned short fn_state;
+ unsigned short last_key[24];
+ spinlock_t rawmidi_in_lock;
+ struct snd_card *card;
+ struct snd_rawmidi *rwmidi;
+ struct snd_rawmidi_substream *in_substream;
+ struct snd_rawmidi_substream *out_substream;
+ unsigned long in_triggered;
+ unsigned long out_active;
+};
+
+#define PK_QUIRK_NOGET 0x00010000
+#define PCMIDI_MIDDLE_C 60
+#define PCMIDI_CHANNEL_MIN 0
+#define PCMIDI_CHANNEL_MAX 15
+#define PCMIDI_OCTAVE_MIN (-2)
+#define PCMIDI_OCTAVE_MAX 2
+#define PCMIDI_SUSTAIN_MIN 0
+#define PCMIDI_SUSTAIN_MAX 5000
+
+static const char shortname[] = "PC-MIDI";
+static const char longname[] = "Prodikeys PC-MIDI Keyboard";
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+
+module_param_array(index, int, NULL, 0444);
+module_param_array(id, charp, NULL, 0444);
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for the PC-MIDI virtual audio driver");
+MODULE_PARM_DESC(id, "ID string for the PC-MIDI virtual audio driver");
+MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver");
+
+
+/* Output routine for the sysfs channel file */
+static ssize_t show_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel);
+
+ return sprintf(buf, "%u (min:%u, max:%u)\n", pk->pm->midi_channel,
+ PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX);
+}
+
+/* Input routine for the sysfs channel file */
+static ssize_t store_channel(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ unsigned channel = 0;
+
+ if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) {
+ dbg_hid("pcmidi sysfs write channel=%u\n", channel);
+ pk->pm->midi_channel = channel;
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(channel, S_IRUGO | S_IWUGO, show_channel,
+ store_channel);
+
+static struct device_attribute *sysfs_device_attr_channel = {
+ &dev_attr_channel,
+ };
+
+/* Output routine for the sysfs sustain file */
+static ssize_t show_sustain(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain);
+
+ return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pk->pm->midi_sustain,
+ PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX);
+}
+
+/* Input routine for the sysfs sustain file */
+static ssize_t store_sustain(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ unsigned sustain = 0;
+
+ if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) {
+ dbg_hid("pcmidi sysfs write sustain=%u\n", sustain);
+ pk->pm->midi_sustain = sustain;
+ pk->pm->midi_sustain_mode =
+ (0 == sustain || !pk->pm->midi_mode) ? 0 : 1;
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(sustain, S_IRUGO | S_IWUGO, show_sustain,
+ store_sustain);
+
+static struct device_attribute *sysfs_device_attr_sustain = {
+ &dev_attr_sustain,
+ };
+
+/* Output routine for the sysfs octave file */
+static ssize_t show_octave(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave);
+
+ return sprintf(buf, "%d (min:%d, max:%d)\n", pk->pm->midi_octave,
+ PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX);
+}
+
+/* Input routine for the sysfs octave file */
+static ssize_t store_octave(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ int octave = 0;
+
+ if (sscanf(buf, "%d", &octave) > 0 &&
+ octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) {
+ dbg_hid("pcmidi sysfs write octave=%d\n", octave);
+ pk->pm->midi_octave = octave;
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(octave, S_IRUGO | S_IWUGO, show_octave,
+ store_octave);
+
+static struct device_attribute *sysfs_device_attr_octave = {
+ &dev_attr_octave,
+ };
+
+
+static void pcmidi_send_note(struct pcmidi_snd *pm,
+ unsigned char status, unsigned char note, unsigned char velocity)
+{
+ unsigned long flags;
+ unsigned char buffer[3];
+
+ buffer[0] = status;
+ buffer[1] = note;
+ buffer[2] = velocity;
+
+ spin_lock_irqsave(&pm->rawmidi_in_lock, flags);
+
+ if (!pm->in_substream)
+ goto drop_note;
+ if (!test_bit(pm->in_substream->number, &pm->in_triggered))
+ goto drop_note;
+
+ snd_rawmidi_receive(pm->in_substream, buffer, 3);
+
+drop_note:
+ spin_unlock_irqrestore(&pm->rawmidi_in_lock, flags);
+
+ return;
+}
+
+void pcmidi_sustained_note_release(unsigned long data)
+{
+ struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
+
+ pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
+ pms->in_use = 0;
+}
+
+void init_sustain_timers(struct pcmidi_snd *pm)
+{
+ struct pcmidi_sustain *pms;
+ unsigned i;
+
+ for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+ pms = &pm->sustained_notes[i];
+ pms->in_use = 0;
+ pms->pm = pm;
+ setup_timer(&pms->timer, pcmidi_sustained_note_release,
+ (unsigned long)pms);
+ }
+}
+
+void stop_sustain_timers(struct pcmidi_snd *pm)
+{
+ struct pcmidi_sustain *pms;
+ unsigned i;
+
+ for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+ pms = &pm->sustained_notes[i];
+ pms->in_use = 1;
+ del_timer_sync(&pms->timer);
+ }
+}
+
+static int pcmidi_get_output_report(struct pcmidi_snd *pm)
+{
+ struct hid_device *hdev = pm->pk->hdev;
+ struct hid_report *report;
+
+ list_for_each_entry(report,
+ &hdev->report_enum[HID_OUTPUT_REPORT].report_list, list) {
+ if (!(6 == report->id))
+ continue;
+
+ if (report->maxfield < 1) {
+ dev_err(&hdev->dev, "output report is empty\n");
+ break;
+ }
+ if (report->field[0]->report_count != 2) {
+ dev_err(&hdev->dev, "field count too low\n");
+ break;
+ }
+ pm->pcmidi_report6 = report;
+ return 0;
+ }
+ /* should never get here */
+ return -ENODEV;
+}
+
+static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state)
+{
+ struct hid_device *hdev = pm->pk->hdev;
+ struct hid_report *report = pm->pcmidi_report6;
+ report->field[0]->value[0] = 0x01;
+ report->field[0]->value[1] = state;
+
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+}
+
+static int pcmidi_handle_report1(struct pcmidi_snd *pm, u8 *data)
+{
+ u32 bit_mask;
+
+ bit_mask = data[1];
+ bit_mask = (bit_mask << 8) | data[2];
+ bit_mask = (bit_mask << 8) | data[3];
+
+ dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+
+ /*KEY_MAIL or octave down*/
+ if (pm->midi_mode && bit_mask == 0x004000) {
+ /* octave down */
+ pm->midi_octave--;
+ if (pm->midi_octave < -2)
+ pm->midi_octave = -2;
+ dbg_hid("pcmidi mode: %d octave: %d\n",
+ pm->midi_mode, pm->midi_octave);
+ return 1;
+ }
+ /*KEY_WWW or sustain*/
+ else if (pm->midi_mode && bit_mask == 0x000004) {
+ /* sustain on/off*/
+ pm->midi_sustain_mode ^= 0x1;
+ return 1;
+ }
+
+ return 0; /* continue key processing */
+}
+
+static int pcmidi_handle_report3(struct pcmidi_snd *pm, u8 *data, int size)
+{
+ struct pcmidi_sustain *pms;
+ unsigned i, j;
+ unsigned char status, note, velocity;
+
+ unsigned num_notes = (size-1)/2;
+ for (j = 0; j < num_notes; j++) {
+ note = data[j*2+1];
+ velocity = data[j*2+2];
+
+ if (note < 0x81) { /* note on */
+ status = 128 + 16 + pm->midi_channel; /* 1001nnnn */
+ note = note - 0x54 + PCMIDI_MIDDLE_C +
+ (pm->midi_octave * 12);
+ if (0 == velocity)
+ velocity = 1; /* force note on */
+ } else { /* note off */
+ status = 128 + pm->midi_channel; /* 1000nnnn */
+ note = note - 0x94 + PCMIDI_MIDDLE_C +
+ (pm->midi_octave*12);
+
+ if (pm->midi_sustain_mode) {
+ for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+ pms = &pm->sustained_notes[i];
+ if (!pms->in_use) {
+ pms->status = status;
+ pms->note = note;
+ pms->velocity = velocity;
+ pms->in_use = 1;
+
+ mod_timer(&pms->timer,
+ jiffies +
+ msecs_to_jiffies(pm->midi_sustain));
+ return 1;
+ }
+ }
+ }
+ }
+ pcmidi_send_note(pm, status, note, velocity);
+ }
+
+ return 1;
+}
+
+static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data)
+{
+ unsigned key;
+ u32 bit_mask;
+ u32 bit_index;
+
+ bit_mask = data[1];
+ bit_mask = (bit_mask << 8) | data[2];
+ bit_mask = (bit_mask << 8) | data[3];
+
+ /* break keys */
+ for (bit_index = 0; bit_index < 24; bit_index++) {
+ key = pm->last_key[bit_index];
+ if (!((0x01 << bit_index) & bit_mask)) {
+ input_event(pm->input_ep82, EV_KEY,
+ pm->last_key[bit_index], 0);
+ pm->last_key[bit_index] = 0;
+ }
+ }
+
+ /* make keys */
+ for (bit_index = 0; bit_index < 24; bit_index++) {
+ key = 0;
+ switch ((0x01 << bit_index) & bit_mask) {
+ case 0x000010: /* Fn lock*/
+ pm->fn_state ^= 0x000010;
+ if (pm->fn_state)
+ pcmidi_submit_output_report(pm, 0xc5);
+ else
+ pcmidi_submit_output_report(pm, 0xc6);
+ continue;
+ case 0x020000: /* midi launcher..send a key (qwerty) or not? */
+ pcmidi_submit_output_report(pm, 0xc1);
+ pm->midi_mode ^= 0x01;
+
+ dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+ continue;
+ case 0x100000: /* KEY_MESSENGER or octave up */
+ dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+ if (pm->midi_mode) {
+ pm->midi_octave++;
+ if (pm->midi_octave > 2)
+ pm->midi_octave = 2;
+ dbg_hid("pcmidi mode: %d octave: %d\n",
+ pm->midi_mode, pm->midi_octave);
+ continue;
+ } else
+ key = KEY_MESSENGER;
+ break;
+ case 0x400000:
+ key = KEY_CALENDAR;
+ break;
+ case 0x080000:
+ key = KEY_ADDRESSBOOK;
+ break;
+ case 0x040000:
+ key = KEY_DOCUMENTS;
+ break;
+ case 0x800000:
+ key = KEY_WORDPROCESSOR;
+ break;
+ case 0x200000:
+ key = KEY_SPREADSHEET;
+ break;
+ case 0x010000:
+ key = KEY_COFFEE;
+ break;
+ case 0x000100:
+ key = KEY_HELP;
+ break;
+ case 0x000200:
+ key = KEY_SEND;
+ break;
+ case 0x000400:
+ key = KEY_REPLY;
+ break;
+ case 0x000800:
+ key = KEY_FORWARDMAIL;
+ break;
+ case 0x001000:
+ key = KEY_NEW;
+ break;
+ case 0x002000:
+ key = KEY_OPEN;
+ break;
+ case 0x004000:
+ key = KEY_CLOSE;
+ break;
+ case 0x008000:
+ key = KEY_SAVE;
+ break;
+ case 0x000001:
+ key = KEY_UNDO;
+ break;
+ case 0x000002:
+ key = KEY_REDO;
+ break;
+ case 0x000004:
+ key = KEY_SPELLCHECK;
+ break;
+ case 0x000008:
+ key = KEY_PRINT;
+ break;
+ }
+ if (key) {
+ input_event(pm->input_ep82, EV_KEY, key, 1);
+ pm->last_key[bit_index] = key;
+ }
+ }
+
+ return 1;
+}
+
+int pcmidi_handle_report(
+ struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size)
+{
+ int ret = 0;
+
+ switch (report_id) {
+ case 0x01: /* midi keys (qwerty)*/
+ ret = pcmidi_handle_report1(pm, data);
+ break;
+ case 0x03: /* midi keyboard (musical)*/
+ ret = pcmidi_handle_report3(pm, data, size);
+ break;
+ case 0x04: /* multimedia/midi keys (qwerty)*/
+ ret = pcmidi_handle_report4(pm, data);
+ break;
+ }
+ return ret;
+}
+
+void pcmidi_setup_extra_keys(struct pcmidi_snd *pm, struct input_dev *input)
+{
+ /* reassigned functionality for N/A keys
+ MY PICTURES => KEY_WORDPROCESSOR
+ MY MUSIC=> KEY_SPREADSHEET
+ */
+ unsigned int keys[] = {
+ KEY_FN,
+ KEY_MESSENGER, KEY_CALENDAR,
+ KEY_ADDRESSBOOK, KEY_DOCUMENTS,
+ KEY_WORDPROCESSOR,
+ KEY_SPREADSHEET,
+ KEY_COFFEE,
+ KEY_HELP, KEY_SEND,
+ KEY_REPLY, KEY_FORWARDMAIL,
+ KEY_NEW, KEY_OPEN,
+ KEY_CLOSE, KEY_SAVE,
+ KEY_UNDO, KEY_REDO,
+ KEY_SPELLCHECK, KEY_PRINT,
+ 0
+ };
+
+ unsigned int *pkeys = &keys[0];
+ unsigned short i;
+
+ if (pm->ifnum != 1) /* only set up ONCE for interace 1 */
+ return;
+
+ pm->input_ep82 = input;
+
+ for (i = 0; i < 24; i++)
+ pm->last_key[i] = 0;
+
+ while (*pkeys != 0) {
+ set_bit(*pkeys, pm->input_ep82->keybit);
+ ++pkeys;
+ }
+}
+
+static int pcmidi_set_operational(struct pcmidi_snd *pm)
+{
+ if (pm->ifnum != 1)
+ return 0; /* only set up ONCE for interace 1 */
+
+ pcmidi_get_output_report(pm);
+ pcmidi_submit_output_report(pm, 0xc1);
+ return 0;
+}
+
+static int pcmidi_snd_free(struct snd_device *dev)
+{
+ return 0;
+}
+
+static int pcmidi_in_open(struct snd_rawmidi_substream *substream)
+{
+ struct pcmidi_snd *pm = substream->rmidi->private_data;
+
+ dbg_hid("pcmidi in open\n");
+ pm->in_substream = substream;
+ return 0;
+}
+
+static int pcmidi_in_close(struct snd_rawmidi_substream *substream)
+{
+ dbg_hid("pcmidi in close\n");
+ return 0;
+}
+
+static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up)
+{
+ struct pcmidi_snd *pm = substream->rmidi->private_data;
+
+ dbg_hid("pcmidi in trigger %d\n", up);
+
+ pm->in_triggered = up;
+}
+
+static struct snd_rawmidi_ops pcmidi_in_ops = {
+ .open = pcmidi_in_open,
+ .close = pcmidi_in_close,
+ .trigger = pcmidi_in_trigger
+};
+
+int pcmidi_snd_initialise(struct pcmidi_snd *pm)
+{
+ static int dev;
+ struct snd_card *card;
+ struct snd_rawmidi *rwmidi;
+ int err;
+
+ static struct snd_device_ops ops = {
+ .dev_free = pcmidi_snd_free,
+ };
+
+ if (pm->ifnum != 1)
+ return 0; /* only set up midi device ONCE for interace 1 */
+
+ if (dev >= SNDRV_CARDS)
+ return -ENODEV;
+
+ if (!enable[dev]) {
+ dev++;
+ return -ENOENT;
+ }
+
+ /* Setup sound card */
+
+ err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
+ if (err < 0) {
+ pk_error("failed to create pc-midi sound card\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+ pm->card = card;
+
+ /* Setup sound device */
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pm, &ops);
+ if (err < 0) {
+ pk_error("failed to create pc-midi sound device: error %d\n",
+ err);
+ goto fail;
+ }
+
+ strncpy(card->driver, shortname, sizeof(card->driver));
+ strncpy(card->shortname, shortname, sizeof(card->shortname));
+ strncpy(card->longname, longname, sizeof(card->longname));
+
+ /* Set up rawmidi */
+ err = snd_rawmidi_new(card, card->shortname, 0,
+ 0, 1, &rwmidi);
+ if (err < 0) {
+ pk_error("failed to create pc-midi rawmidi device: error %d\n",
+ err);
+ goto fail;
+ }
+ pm->rwmidi = rwmidi;
+ strncpy(rwmidi->name, card->shortname, sizeof(rwmidi->name));
+ rwmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT;
+ rwmidi->private_data = pm;
+
+ snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT,
+ &pcmidi_in_ops);
+
+ snd_card_set_dev(card, &pm->pk->hdev->dev);
+
+ /* create sysfs variables */
+ err = device_create_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_channel);
+ if (err < 0) {
+ pk_error("failed to create sysfs attribute channel: error %d\n",
+ err);
+ goto fail;
+ }
+
+ err = device_create_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_sustain);
+ if (err < 0) {
+ pk_error("failed to create sysfs attribute sustain: error %d\n",
+ err);
+ goto fail_attr_sustain;
+ }
+
+ err = device_create_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_octave);
+ if (err < 0) {
+ pk_error("failed to create sysfs attribute octave: error %d\n",
+ err);
+ goto fail_attr_octave;
+ }
+
+ spin_lock_init(&pm->rawmidi_in_lock);
+
+ init_sustain_timers(pm);
+ pcmidi_set_operational(pm);
+
+ /* register it */
+ err = snd_card_register(card);
+ if (err < 0) {
+ pk_error("failed to register pc-midi sound card: error %d\n",
+ err);
+ goto fail_register;
+ }
+
+ dbg_hid("pcmidi_snd_initialise finished ok\n");
+ return 0;
+
+fail_register:
+ stop_sustain_timers(pm);
+ device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave);
+fail_attr_octave:
+ device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain);
+fail_attr_sustain:
+ device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel);
+fail:
+ if (pm->card) {
+ snd_card_free(pm->card);
+ pm->card = NULL;
+ }
+ return err;
+}
+
+int pcmidi_snd_terminate(struct pcmidi_snd *pm)
+{
+ if (pm->card) {
+ stop_sustain_timers(pm);
+
+ device_remove_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_channel);
+ device_remove_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_sustain);
+ device_remove_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_octave);
+
+ snd_card_disconnect(pm->card);
+ snd_card_free_when_closed(pm->card);
+ }
+
+ return 0;
+}
+
+/*
+ * PC-MIDI report descriptor for report id is wrong.
+ */
+static void pk_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int rsize)
+{
+ if (rsize == 178 &&
+ rdesc[111] == 0x06 && rdesc[112] == 0x00 &&
+ rdesc[113] == 0xff) {
+ dev_info(&hdev->dev, "fixing up pc-midi keyboard report "
+ "descriptor\n");
+
+ rdesc[144] = 0x18; /* report 4: was 0x10 report count */
+ }
+}
+
+static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm;
+
+ pm = pk->pm;
+
+ if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) &&
+ 1 == pm->ifnum) {
+ pcmidi_setup_extra_keys(pm, hi->input);
+ return 0;
+ }
+
+ return 0;
+}
+
+
+static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+ int ret = 0;
+
+ if (1 == pk->pm->ifnum) {
+ if (report->id == data[0])
+ switch (report->id) {
+ case 0x01: /* midi keys (qwerty)*/
+ case 0x03: /* midi keyboard (musical)*/
+ case 0x04: /* extra/midi keys (qwerty)*/
+ ret = pcmidi_handle_report(pk->pm,
+ report->id, data, size);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+ unsigned long quirks = id->driver_data;
+ struct pk_device *pk;
+ struct pcmidi_snd *pm = NULL;
+
+ pk = kzalloc(sizeof(*pk), GFP_KERNEL);
+ if (pk == NULL) {
+ dev_err(&hdev->dev, "prodikeys: can't alloc descriptor\n");
+ return -ENOMEM;
+ }
+
+ pk->hdev = hdev;
+
+ pm = kzalloc(sizeof(*pm), GFP_KERNEL);
+ if (pm == NULL) {
+ dev_err(&hdev->dev,
+ "prodikeys: can't alloc descriptor\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ pm->pk = pk;
+ pk->pm = pm;
+ pm->ifnum = ifnum;
+
+ hid_set_drvdata(hdev, pk);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "prodikeys: hid parse failed\n");
+ goto err_free;
+ }
+
+ if (quirks & PK_QUIRK_NOGET) { /* hid_parse cleared all the quirks */
+ hdev->quirks |= HID_QUIRK_NOGET;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ dev_err(&hdev->dev, "prodikeys: hw start failed\n");
+ goto err_free;
+ }
+
+ ret = pcmidi_snd_initialise(pm);
+ if (ret < 0)
+ goto err_stop;
+
+ return 0;
+err_stop:
+ hid_hw_stop(hdev);
+err_free:
+ if (pm != NULL)
+ kfree(pm);
+
+ kfree(pk);
+ return ret;
+}
+
+static void pk_remove(struct hid_device *hdev)
+{
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm;
+
+ pm = pk->pm;
+ if (pm) {
+ pcmidi_snd_terminate(pm);
+ kfree(pm);
+ }
+
+ hid_hw_stop(hdev);
+
+ kfree(pk);
+}
+
+static const struct hid_device_id pk_devices[] = {
+ {HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS,
+ USB_DEVICE_ID_PRODIKEYS_PCMIDI),
+ .driver_data = PK_QUIRK_NOGET},
+ { }
+};
+MODULE_DEVICE_TABLE(hid, pk_devices);
+
+static struct hid_driver pk_driver = {
+ .name = "prodikeys",
+ .id_table = pk_devices,
+ .report_fixup = pk_report_fixup,
+ .input_mapping = pk_input_mapping,
+ .raw_event = pk_raw_event,
+ .probe = pk_probe,
+ .remove = pk_remove,
+};
+
+static int pk_init(void)
+{
+ int ret;
+
+ ret = hid_register_driver(&pk_driver);
+ if (ret)
+ printk(KERN_ERR "can't register prodikeys driver\n");
+
+ return ret;
+}
+
+static void pk_exit(void)
+{
+ hid_unregister_driver(&pk_driver);
+}
+
+module_init(pk_init);
+module_exit(pk_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
new file mode 100644
index 0000000..66e6940
--- /dev/null
+++ b/drivers/hid/hid-roccat-kone.c
@@ -0,0 +1,994 @@
+/*
+ * Roccat Kone driver for Linux
+ *
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Kone is a gamer mouse which consists of a mouse part and a keyboard
+ * part. The keyboard part enables the mouse to execute stored macros with mixed
+ * key- and button-events.
+ *
+ * TODO implement on-the-fly polling-rate change
+ * The windows driver has the ability to change the polling rate of the
+ * device on the press of a mousebutton.
+ * Is it possible to remove and reinstall the urb in raw-event- or any
+ * other handler, or to defer this action to be executed somewhere else?
+ *
+ * TODO implement notification mechanism for overlong macro execution
+ * If user wants to execute an overlong macro only the names of macroset
+ * and macro are given. Should userland tap hidraw or is there an
+ * additional streaming mechanism?
+ *
+ * TODO is it possible to overwrite group for sysfs attributes via udev?
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "hid-ids.h"
+#include "hid-roccat-kone.h"
+
+static void kone_set_settings_checksum(struct kone_settings *settings)
+{
+ uint16_t checksum = 0;
+ unsigned char *address = (unsigned char *)settings;
+ int i;
+
+ for (i = 0; i < sizeof(struct kone_settings) - 2; ++i, ++address)
+ checksum += *address;
+ settings->checksum = cpu_to_le16(checksum);
+}
+
+/*
+ * Checks success after writing data to mouse
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_check_write(struct usb_device *usb_dev)
+{
+ int len;
+ unsigned char *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ do {
+ /*
+ * Mouse needs 50 msecs until it says ok, but there are
+ * 30 more msecs needed for next write to work.
+ */
+ msleep(80);
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE |
+ USB_DIR_IN,
+ kone_command_confirm_write, 0, data, 1,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != 1) {
+ kfree(data);
+ return -EIO;
+ }
+
+ /*
+ * value of 3 seems to mean something like
+ * "not finished yet, but it looks good"
+ * So check again after a moment.
+ */
+ } while (*data == 3);
+
+ if (*data == 1) { /* everything alright */
+ kfree(data);
+ return 0;
+ } else { /* unknown answer */
+ dev_err(&usb_dev->dev, "got retval %d when checking write\n",
+ *data);
+ kfree(data);
+ return -EIO;
+ }
+}
+
+/*
+ * Reads settings from mouse and stores it in @buf
+ * @buf has to be alloced with GFP_KERNEL
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_settings(struct usb_device *usb_dev,
+ struct kone_settings *buf)
+{
+ int len;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_settings, 0, buf,
+ sizeof(struct kone_settings), USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_settings))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Writes settings from @buf to mouse
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_set_settings(struct usb_device *usb_dev,
+ struct kone_settings const *settings)
+{
+ int len;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_settings, 0, (char *)settings,
+ sizeof(struct kone_settings),
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_settings))
+ return -EIO;
+
+ if (kone_check_write(usb_dev))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Reads profile data from mouse and stores it in @buf
+ * @number: profile number to read
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_profile(struct usb_device *usb_dev,
+ struct kone_profile *buf, int number)
+{
+ int len;
+
+ if (number < 1 || number > 5)
+ return -EINVAL;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_profile, number, buf,
+ sizeof(struct kone_profile), USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_profile))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Writes profile data to mouse.
+ * @number: profile number to write
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_set_profile(struct usb_device *usb_dev,
+ struct kone_profile const *profile, int number)
+{
+ int len;
+
+ if (number < 1 || number > 5)
+ return -EINVAL;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_profile, number, (char *)profile,
+ sizeof(struct kone_profile),
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_profile))
+ return len;
+
+ if (kone_check_write(usb_dev))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Reads value of "fast-clip-weight" and stores it in @result
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_weight(struct usb_device *usb_dev, int *result)
+{
+ int len;
+ uint8_t *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_weight, 0, data, 1, USB_CTRL_SET_TIMEOUT);
+
+ if (len != 1) {
+ kfree(data);
+ return -EIO;
+ }
+ *result = (int)*data;
+ kfree(data);
+ return 0;
+}
+
+/*
+ * Reads firmware_version of mouse and stores it in @result
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
+{
+ int len;
+ unsigned char *data;
+
+ data = kmalloc(2, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_firmware_version, 0, data, 2,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != 2) {
+ kfree(data);
+ return -EIO;
+ }
+ *result = le16_to_cpu(*data);
+ kfree(data);
+ return 0;
+}
+
+static ssize_t kone_sysfs_read_settings(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kone_settings))
+ return 0;
+
+ if (off + count > sizeof(struct kone_settings))
+ count = sizeof(struct kone_settings) - off;
+
+ mutex_lock(&kone->kone_lock);
+ memcpy(buf, &kone->settings + off, count);
+ mutex_unlock(&kone->kone_lock);
+
+ return count;
+}
+
+/*
+ * Writing settings automatically activates startup_profile.
+ * This function keeps values in kone_device up to date and assumes that in
+ * case of error the old data is still valid
+ */
+static ssize_t kone_sysfs_write_settings(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0, difference;
+
+ /* I need to get my data in one piece */
+ if (off != 0 || count != sizeof(struct kone_settings))
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+ difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
+ if (difference) {
+ retval = kone_set_settings(usb_dev,
+ (struct kone_settings const *)buf);
+ if (!retval)
+ memcpy(&kone->settings, buf,
+ sizeof(struct kone_settings));
+ }
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ /*
+ * If we get here, treat settings as okay and update actual values
+ * according to startup_profile
+ */
+ kone->actual_profile = kone->settings.startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+
+ return sizeof(struct kone_settings);
+}
+
+static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, int number) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kone_profile))
+ return 0;
+
+ if (off + count > sizeof(struct kone_profile))
+ count = sizeof(struct kone_profile) - off;
+
+ mutex_lock(&kone->kone_lock);
+ memcpy(buf, &kone->profiles[number - 1], sizeof(struct kone_profile));
+ mutex_unlock(&kone->kone_lock);
+
+ return count;
+}
+
+static ssize_t kone_sysfs_read_profile1(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
+}
+
+static ssize_t kone_sysfs_read_profile2(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
+}
+
+static ssize_t kone_sysfs_read_profile3(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
+}
+
+static ssize_t kone_sysfs_read_profile4(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
+}
+
+static ssize_t kone_sysfs_read_profile5(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
+}
+
+/* Writes data only if different to stored data */
+static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, int number) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ struct kone_profile *profile;
+ int retval = 0, difference;
+
+ /* I need to get my data in one piece */
+ if (off != 0 || count != sizeof(struct kone_profile))
+ return -EINVAL;
+
+ profile = &kone->profiles[number - 1];
+
+ mutex_lock(&kone->kone_lock);
+ difference = memcmp(buf, profile, sizeof(struct kone_profile));
+ if (difference) {
+ retval = kone_set_profile(usb_dev,
+ (struct kone_profile const *)buf, number);
+ if (!retval)
+ memcpy(profile, buf, sizeof(struct kone_profile));
+ }
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ return sizeof(struct kone_profile);
+}
+
+static ssize_t kone_sysfs_write_profile1(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
+}
+
+static ssize_t kone_sysfs_write_profile2(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
+}
+
+static ssize_t kone_sysfs_write_profile3(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
+}
+
+static ssize_t kone_sysfs_write_profile4(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
+}
+
+static ssize_t kone_sysfs_write_profile5(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
+}
+
+static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile);
+}
+
+static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi);
+}
+
+/* weight is read each time, since we don't get informed when it's changed */
+static ssize_t kone_sysfs_show_weight(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int weight = 0;
+ int retval;
+
+ mutex_lock(&kone->kone_lock);
+ retval = kone_get_weight(usb_dev, &weight);
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+ return snprintf(buf, PAGE_SIZE, "%d\n", weight);
+}
+
+static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version);
+}
+
+static ssize_t kone_sysfs_show_tcu(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.tcu);
+}
+
+static int kone_tcu_command(struct usb_device *usb_dev, int number)
+{
+ int len;
+ char *value;
+
+ value = kmalloc(1, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ *value = number;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_calibrate, 0, value, 1,
+ USB_CTRL_SET_TIMEOUT);
+
+ kfree(value);
+ return ((len != 1) ? -EIO : 0);
+}
+
+/*
+ * Calibrating the tcu is the only action that changes settings data inside the
+ * mouse, so this data needs to be reread
+ */
+static ssize_t kone_sysfs_set_tcu(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+ unsigned long state;
+
+ retval = strict_strtoul(buf, 10, &state);
+ if (retval)
+ return retval;
+
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+
+ if (state == 1) { /* state activate */
+ retval = kone_tcu_command(usb_dev, 1);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 2);
+ if (retval)
+ goto exit_unlock;
+ ssleep(5); /* tcu needs this time for calibration */
+ retval = kone_tcu_command(usb_dev, 3);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 0);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 4);
+ if (retval)
+ goto exit_unlock;
+ /*
+ * Kone needs this time to settle things.
+ * Reading settings too early will result in invalid data.
+ * Roccat's driver waits 1 sec, maybe this time could be
+ * shortened.
+ */
+ ssleep(1);
+ }
+
+ /* calibration changes values in settings, so reread */
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ goto exit_no_settings;
+
+ /* only write settings back if activation state is different */
+ if (kone->settings.tcu != state) {
+ kone->settings.tcu = state;
+ kone_set_settings_checksum(&kone->settings);
+
+ retval = kone_set_settings(usb_dev, &kone->settings);
+ if (retval) {
+ dev_err(&usb_dev->dev, "couldn't set tcu state\n");
+ /*
+ * try to reread valid settings into buffer overwriting
+ * first error code
+ */
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ goto exit_no_settings;
+ goto exit_unlock;
+ }
+ }
+
+ retval = size;
+exit_no_settings:
+ dev_err(&usb_dev->dev, "couldn't read settings\n");
+exit_unlock:
+ mutex_unlock(&kone->kone_lock);
+ return retval;
+}
+
+static ssize_t kone_sysfs_show_startup_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.startup_profile);
+}
+
+static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+ unsigned long new_startup_profile;
+
+ retval = strict_strtoul(buf, 10, &new_startup_profile);
+ if (retval)
+ return retval;
+
+ if (new_startup_profile < 1 || new_startup_profile > 5)
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+
+ kone->settings.startup_profile = new_startup_profile;
+ kone_set_settings_checksum(&kone->settings);
+
+ retval = kone_set_settings(usb_dev, &kone->settings);
+
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ /* changing the startup profile immediately activates this profile */
+ kone->actual_profile = new_startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+
+ return size;
+}
+
+/*
+ * This file is used by userland software to find devices that are handled by
+ * this driver. This provides a consistent way for actual and older kernels
+ * where this driver replaced usbhid instead of generic-usb.
+ * Driver capabilities are determined by version number.
+ */
+static ssize_t kone_sysfs_show_driver_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, ROCCAT_KONE_DRIVER_VERSION "\n");
+}
+
+/*
+ * Read actual dpi settings.
+ * Returns raw value for further processing. Refer to enum kone_polling_rates to
+ * get real value.
+ */
+static DEVICE_ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL);
+
+static DEVICE_ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL);
+
+/*
+ * The mouse can be equipped with one of four supplied weights from 5 to 20
+ * grams which are recognized and its value can be read out.
+ * This returns the raw value reported by the mouse for easy evaluation by
+ * software. Refer to enum kone_weights to get corresponding real weight.
+ */
+static DEVICE_ATTR(weight, 0440, kone_sysfs_show_weight, NULL);
+
+/*
+ * Prints firmware version stored in mouse as integer.
+ * The raw value reported by the mouse is returned for easy evaluation, to get
+ * the real version number the decimal point has to be shifted 2 positions to
+ * the left. E.g. a value of 138 means 1.38.
+ */
+static DEVICE_ATTR(firmware_version, 0440,
+ kone_sysfs_show_firmware_version, NULL);
+
+/*
+ * Prints state of Tracking Control Unit as number where 0 = off and 1 = on
+ * Writing 0 deactivates tcu and writing 1 calibrates and activates the tcu
+ */
+static DEVICE_ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu);
+
+/* Prints and takes the number of the profile the mouse starts with */
+static DEVICE_ATTR(startup_profile, 0660,
+ kone_sysfs_show_startup_profile,
+ kone_sysfs_set_startup_profile);
+
+static DEVICE_ATTR(kone_driver_version, 0440,
+ kone_sysfs_show_driver_version, NULL);
+
+static struct attribute *kone_attributes[] = {
+ &dev_attr_actual_dpi.attr,
+ &dev_attr_actual_profile.attr,
+ &dev_attr_weight.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_tcu.attr,
+ &dev_attr_startup_profile.attr,
+ &dev_attr_kone_driver_version.attr,
+ NULL
+};
+
+static struct attribute_group kone_attribute_group = {
+ .attrs = kone_attributes
+};
+
+static struct bin_attribute kone_settings_attr = {
+ .attr = { .name = "settings", .mode = 0660 },
+ .size = sizeof(struct kone_settings),
+ .read = kone_sysfs_read_settings,
+ .write = kone_sysfs_write_settings
+};
+
+static struct bin_attribute kone_profile1_attr = {
+ .attr = { .name = "profile1", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile1,
+ .write = kone_sysfs_write_profile1
+};
+
+static struct bin_attribute kone_profile2_attr = {
+ .attr = { .name = "profile2", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile2,
+ .write = kone_sysfs_write_profile2
+};
+
+static struct bin_attribute kone_profile3_attr = {
+ .attr = { .name = "profile3", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile3,
+ .write = kone_sysfs_write_profile3
+};
+
+static struct bin_attribute kone_profile4_attr = {
+ .attr = { .name = "profile4", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile4,
+ .write = kone_sysfs_write_profile4
+};
+
+static struct bin_attribute kone_profile5_attr = {
+ .attr = { .name = "profile5", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile5,
+ .write = kone_sysfs_write_profile5
+};
+
+static int kone_create_sysfs_attributes(struct usb_interface *intf)
+{
+ int retval;
+
+ retval = sysfs_create_group(&intf->dev.kobj, &kone_attribute_group);
+ if (retval)
+ goto exit_1;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_settings_attr);
+ if (retval)
+ goto exit_2;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+ if (retval)
+ goto exit_3;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+ if (retval)
+ goto exit_4;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+ if (retval)
+ goto exit_5;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+ if (retval)
+ goto exit_6;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile5_attr);
+ if (retval)
+ goto exit_7;
+
+ return 0;
+
+exit_7:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+exit_6:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+exit_5:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+exit_4:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+exit_3:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
+exit_2:
+ sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
+exit_1:
+ return retval;
+}
+
+static void kone_remove_sysfs_attributes(struct usb_interface *intf)
+{
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile5_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
+ sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
+}
+
+static int kone_init_kone_device_struct(struct usb_device *usb_dev,
+ struct kone_device *kone)
+{
+ uint i;
+ int retval;
+
+ mutex_init(&kone->kone_lock);
+
+ for (i = 0; i < 5; ++i) {
+ retval = kone_get_profile(usb_dev, &kone->profiles[i], i + 1);
+ if (retval)
+ return retval;
+ }
+
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ return retval;
+
+ retval = kone_get_firmware_version(usb_dev, &kone->firmware_version);
+ if (retval)
+ return retval;
+
+ kone->actual_profile = kone->settings.startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile].startup_dpi;
+
+ return 0;
+}
+
+/*
+ * Since IGNORE_MOUSE quirk moved to hid-apple, there is no way to bind only to
+ * mousepart if usb_hid is compiled into the kernel and kone is compiled as
+ * module.
+ * Secial behaviour is bound only to mousepart since only mouseevents contain
+ * additional notifications.
+ */
+static int kone_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct kone_device *kone;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+
+ kone = kzalloc(sizeof(*kone), GFP_KERNEL);
+ if (!kone) {
+ dev_err(&hdev->dev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, kone);
+
+ retval = kone_init_kone_device_struct(usb_dev, kone);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "couldn't init struct kone_device\n");
+ goto exit_free;
+ }
+ retval = kone_create_sysfs_attributes(intf);
+ if (retval) {
+ dev_err(&hdev->dev, "cannot create sysfs files\n");
+ goto exit_free;
+ }
+ } else {
+ hid_set_drvdata(hdev, NULL);
+ }
+
+ return 0;
+exit_free:
+ kfree(kone);
+ return retval;
+}
+
+
+static void kone_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+ kone_remove_sysfs_attributes(intf);
+ kfree(hid_get_drvdata(hdev));
+ }
+}
+
+static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ dev_err(&hdev->dev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ dev_err(&hdev->dev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = kone_init_specials(hdev);
+ if (retval) {
+ dev_err(&hdev->dev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void kone_remove(struct hid_device *hdev)
+{
+ kone_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+/* handle special events and keep actual profile and dpi values up to date */
+static void kone_keep_values_up_to_date(struct kone_device *kone,
+ struct kone_mouse_event const *event)
+{
+ switch (event->event) {
+ case kone_mouse_event_switch_profile:
+ case kone_mouse_event_osd_profile:
+ kone->actual_profile = event->value;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].
+ startup_dpi;
+ break;
+ case kone_mouse_event_switch_dpi:
+ case kone_mouse_event_osd_dpi:
+ kone->actual_dpi = event->value;
+ break;
+ }
+}
+
+/*
+ * Is called for keyboard- and mousepart.
+ * Only mousepart gets informations about special events in its extended event
+ * structure.
+ */
+static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct kone_device *kone = hid_get_drvdata(hdev);
+ struct kone_mouse_event *event = (struct kone_mouse_event *)data;
+
+ /* keyboard events are always processed by default handler */
+ if (size != sizeof(struct kone_mouse_event))
+ return 0;
+
+ /*
+ * Firmware 1.38 introduced new behaviour for tilt and special buttons.
+ * Pressed button is reported in each movement event.
+ * Workaround sends only one event per press.
+ */
+ if (memcmp(&kone->last_mouse_event.tilt, &event->tilt, 5))
+ memcpy(&kone->last_mouse_event, event,
+ sizeof(struct kone_mouse_event));
+ else
+ memset(&event->tilt, 0, 5);
+
+ kone_keep_values_up_to_date(kone, event);
+
+ return 0; /* always do further processing */
+}
+
+static const struct hid_device_id kone_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, kone_devices);
+
+static struct hid_driver kone_driver = {
+ .name = "kone",
+ .id_table = kone_devices,
+ .probe = kone_probe,
+ .remove = kone_remove,
+ .raw_event = kone_raw_event
+};
+
+static int __init kone_init(void)
+{
+ return hid_register_driver(&kone_driver);
+}
+
+static void __exit kone_exit(void)
+{
+ hid_unregister_driver(&kone_driver);
+}
+
+module_init(kone_init);
+module_exit(kone_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Kone driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
new file mode 100644
index 0000000..b413b10
--- /dev/null
+++ b/drivers/hid/hid-roccat-kone.h
@@ -0,0 +1,224 @@
+#ifndef __HID_ROCCAT_KONE_H
+#define __HID_ROCCAT_KONE_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+#define ROCCAT_KONE_DRIVER_VERSION "v0.3.1"
+
+#pragma pack(push)
+#pragma pack(1)
+
+struct kone_keystroke {
+ uint8_t key;
+ uint8_t action;
+ uint16_t period; /* in milliseconds */
+};
+
+enum kone_keystroke_buttons {
+ kone_keystroke_button_1 = 0xf0, /* left mouse button */
+ kone_keystroke_button_2 = 0xf1, /* right mouse button */
+ kone_keystroke_button_3 = 0xf2, /* wheel */
+ kone_keystroke_button_9 = 0xf3, /* side button up */
+ kone_keystroke_button_8 = 0xf4 /* side button down */
+};
+
+enum kone_keystroke_actions {
+ kone_keystroke_action_press = 0,
+ kone_keystroke_action_release = 1
+};
+
+struct kone_button_info {
+ uint8_t number; /* range 1-8 */
+ uint8_t type;
+ uint8_t macro_type; /* 0 = short, 1 = overlong */
+ uint8_t macro_set_name[16]; /* can be max 15 chars long */
+ uint8_t macro_name[16]; /* can be max 15 chars long */
+ uint8_t count;
+ struct kone_keystroke keystrokes[20];
+};
+
+enum kone_button_info_types {
+ /* valid button types until firmware 1.32 */
+ kone_button_info_type_button_1 = 0x1, /* click (left mouse button) */
+ kone_button_info_type_button_2 = 0x2, /* menu (right mouse button)*/
+ kone_button_info_type_button_3 = 0x3, /* scroll (wheel) */
+ kone_button_info_type_double_click = 0x4,
+ kone_button_info_type_key = 0x5,
+ kone_button_info_type_macro = 0x6,
+ kone_button_info_type_off = 0x7,
+ /* TODO clarify function and rename */
+ kone_button_info_type_osd_xy_prescaling = 0x8,
+ kone_button_info_type_osd_dpi = 0x9,
+ kone_button_info_type_osd_profile = 0xa,
+ kone_button_info_type_button_9 = 0xb, /* ie forward */
+ kone_button_info_type_button_8 = 0xc, /* ie backward */
+ kone_button_info_type_dpi_up = 0xd, /* internal */
+ kone_button_info_type_dpi_down = 0xe, /* internal */
+ kone_button_info_type_button_7 = 0xf, /* tilt left */
+ kone_button_info_type_button_6 = 0x10, /* tilt right */
+ kone_button_info_type_profile_up = 0x11, /* internal */
+ kone_button_info_type_profile_down = 0x12, /* internal */
+ /* additional valid button types since firmware 1.38 */
+ kone_button_info_type_multimedia_open_player = 0x20,
+ kone_button_info_type_multimedia_next_track = 0x21,
+ kone_button_info_type_multimedia_prev_track = 0x22,
+ kone_button_info_type_multimedia_play_pause = 0x23,
+ kone_button_info_type_multimedia_stop = 0x24,
+ kone_button_info_type_multimedia_mute = 0x25,
+ kone_button_info_type_multimedia_volume_up = 0x26,
+ kone_button_info_type_multimedia_volume_down = 0x27
+};
+
+enum kone_button_info_numbers {
+ kone_button_top = 1,
+ kone_button_wheel_tilt_left = 2,
+ kone_button_wheel_tilt_right = 3,
+ kone_button_forward = 4,
+ kone_button_backward = 5,
+ kone_button_middle = 6,
+ kone_button_plus = 7,
+ kone_button_minus = 8,
+};
+
+struct kone_light_info {
+ uint8_t number; /* number of light 1-5 */
+ uint8_t mod; /* 1 = on, 2 = off */
+ uint8_t red; /* range 0x00-0xff */
+ uint8_t green; /* range 0x00-0xff */
+ uint8_t blue; /* range 0x00-0xff */
+};
+
+struct kone_profile {
+ uint16_t size; /* always 975 */
+ uint16_t unused; /* always 0 */
+
+ /*
+ * range 1-5
+ * This number does not need to correspond with location where profile
+ * saved
+ */
+ uint8_t profile; /* range 1-5 */
+
+ uint16_t main_sensitivity; /* range 100-1000 */
+ uint8_t xy_sensitivity_enabled; /* 1 = on, 2 = off */
+ uint16_t x_sensitivity; /* range 100-1000 */
+ uint16_t y_sensitivity; /* range 100-1000 */
+ uint8_t dpi_rate; /* bit 1 = 800, ... */
+ uint8_t startup_dpi; /* range 1-6 */
+ uint8_t polling_rate; /* 1 = 125Hz, 2 = 500Hz, 3 = 1000Hz */
+ /* kone has no dcu
+ * value is always 2 in firmwares <= 1.32 and
+ * 1 in firmwares > 1.32
+ */
+ uint8_t dcu_flag;
+ uint8_t light_effect_1; /* range 1-3 */
+ uint8_t light_effect_2; /* range 1-5 */
+ uint8_t light_effect_3; /* range 1-4 */
+ uint8_t light_effect_speed; /* range 0-255 */
+
+ struct kone_light_info light_infos[5];
+ /* offset is kone_button_info_numbers - 1 */
+ struct kone_button_info button_infos[8];
+
+ uint16_t checksum; /* \brief holds checksum of struct */
+};
+
+enum kone_polling_rates {
+ kone_polling_rate_125 = 1,
+ kone_polling_rate_500 = 2,
+ kone_polling_rate_1000 = 3
+};
+
+struct kone_settings {
+ uint16_t size; /* always 36 */
+ uint8_t startup_profile; /* 1-5 */
+ uint8_t unknown1;
+ uint8_t tcu; /* 0 = off, 1 = on */
+ uint8_t unknown2[23];
+ uint8_t calibration_data[4];
+ uint8_t unknown3[2];
+ uint16_t checksum;
+};
+
+/*
+ * 12 byte mouse event read by interrupt_read
+ */
+struct kone_mouse_event {
+ uint8_t report_number; /* always 1 */
+ uint8_t button;
+ uint16_t x;
+ uint16_t y;
+ uint8_t wheel; /* up = 1, down = -1 */
+ uint8_t tilt; /* right = 1, left = -1 */
+ uint8_t unknown;
+ uint8_t event;
+ uint8_t value; /* press = 0, release = 1 */
+ uint8_t macro_key; /* 0 to 8 */
+};
+
+enum kone_mouse_events {
+ /* osd events are thought to be display on screen */
+ kone_mouse_event_osd_dpi = 0xa0,
+ kone_mouse_event_osd_profile = 0xb0,
+ /* TODO clarify meaning and occurence of kone_mouse_event_calibration */
+ kone_mouse_event_calibration = 0xc0,
+ kone_mouse_event_call_overlong_macro = 0xe0,
+ /* switch events notify if user changed values with mousebutton click */
+ kone_mouse_event_switch_dpi = 0xf0,
+ kone_mouse_event_switch_profile = 0xf1
+};
+
+enum kone_commands {
+ kone_command_profile = 0x5a,
+ kone_command_settings = 0x15a,
+ kone_command_firmware_version = 0x25a,
+ kone_command_weight = 0x45a,
+ kone_command_calibrate = 0x55a,
+ kone_command_confirm_write = 0x65a,
+ kone_command_firmware = 0xe5a
+};
+
+#pragma pack(pop)
+
+struct kone_device {
+ /*
+ * Storing actual values when we get informed about changes since there
+ * is no way of getting this information from the device on demand
+ */
+ int actual_profile, actual_dpi;
+ /* Used for neutralizing abnormal button behaviour */
+ struct kone_mouse_event last_mouse_event;
+
+ /*
+ * It's unlikely that multiple sysfs attributes are accessed at a time,
+ * so only one mutex is used to secure hardware access and profiles and
+ * settings of this struct.
+ */
+ struct mutex kone_lock;
+
+ /*
+ * Storing the data here reduces IO and ensures that data is available
+ * when its needed (E.g. interrupt handler).
+ */
+ struct kone_profile profiles[5];
+ struct kone_settings settings;
+
+ /*
+ * firmware doesn't change unless firmware update is implemented,
+ * so it's read only once
+ */
+ int firmware_version;
+};
+
+#endif
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 510dd13..bda0fd6 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -7,6 +7,18 @@
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2007 Paul Walmsley
* Copyright (c) 2008 Jiri Slaby
+ * Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk>
+ *
+ *
+ * This driver supports several HID devices:
+ *
+ * [0419:0001] Samsung IrDA remote controller (reports as Cypress USB Mouse).
+ * various hid report fixups for different variants.
+ *
+ * [0419:0600] Creative Desktop Wireless 6000 keyboard/mouse combo
+ * several key mappings used from the consumer usage page
+ * deviate from the USB HUT 1.12 standard.
+ *
*/
/*
@@ -17,14 +29,13 @@
*/
#include <linux/device.h>
+#include <linux/usb.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
/*
- * Samsung IrDA remote controller (reports as Cypress USB Mouse).
- *
* There are several variants for 0419:0001:
*
* 1. 184 byte report descriptor
@@ -43,21 +54,21 @@
* 4. 171 byte report descriptor
* Report #3 has an array field with logical range 0..1 instead of 1..3.
*/
-static inline void samsung_dev_trace(struct hid_device *hdev,
+static inline void samsung_irda_dev_trace(struct hid_device *hdev,
unsigned int rsize)
{
dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
"descriptor\n", rsize);
}
-static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static void samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int rsize)
{
if (rsize == 184 && rdesc[175] == 0x25 && rdesc[176] == 0x40 &&
rdesc[177] == 0x75 && rdesc[178] == 0x30 &&
rdesc[179] == 0x95 && rdesc[180] == 0x01 &&
rdesc[182] == 0x40) {
- samsung_dev_trace(hdev, 184);
+ samsung_irda_dev_trace(hdev, 184);
rdesc[176] = 0xff;
rdesc[178] = 0x08;
rdesc[180] = 0x06;
@@ -65,24 +76,80 @@ static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
} else
if (rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 &&
rdesc[194] == 0x25 && rdesc[195] == 0x12) {
- samsung_dev_trace(hdev, 203);
+ samsung_irda_dev_trace(hdev, 203);
rdesc[193] = 0x1;
rdesc[195] = 0xf;
} else
if (rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 &&
rdesc[126] == 0x25 && rdesc[127] == 0x11) {
- samsung_dev_trace(hdev, 135);
+ samsung_irda_dev_trace(hdev, 135);
rdesc[125] = 0x1;
rdesc[127] = 0xe;
} else
if (rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 &&
rdesc[162] == 0x25 && rdesc[163] == 0x01) {
- samsung_dev_trace(hdev, 171);
+ samsung_irda_dev_trace(hdev, 171);
rdesc[161] = 0x1;
rdesc[163] = 0x3;
}
}
+#define samsung_kbd_mouse_map_key_clear(c) \
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int samsung_kbd_mouse_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ if (1 != ifnum || HID_UP_CONSUMER != (usage->hid & HID_USAGE_PAGE))
+ return 0;
+
+ dbg_hid("samsung wireless keyboard/mouse input mapping event [0x%x]\n",
+ usage->hid & HID_USAGE);
+
+ switch (usage->hid & HID_USAGE) {
+ /* report 2 */
+ case 0x183: samsung_kbd_mouse_map_key_clear(KEY_MEDIA); break;
+ case 0x195: samsung_kbd_mouse_map_key_clear(KEY_EMAIL); break;
+ case 0x196: samsung_kbd_mouse_map_key_clear(KEY_CALC); break;
+ case 0x197: samsung_kbd_mouse_map_key_clear(KEY_COMPUTER); break;
+ case 0x22b: samsung_kbd_mouse_map_key_clear(KEY_SEARCH); break;
+ case 0x22c: samsung_kbd_mouse_map_key_clear(KEY_WWW); break;
+ case 0x22d: samsung_kbd_mouse_map_key_clear(KEY_BACK); break;
+ case 0x22e: samsung_kbd_mouse_map_key_clear(KEY_FORWARD); break;
+ case 0x22f: samsung_kbd_mouse_map_key_clear(KEY_FAVORITES); break;
+ case 0x230: samsung_kbd_mouse_map_key_clear(KEY_REFRESH); break;
+ case 0x231: samsung_kbd_mouse_map_key_clear(KEY_STOP); break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int rsize)
+{
+ if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product)
+ samsung_irda_report_fixup(hdev, rdesc, rsize);
+}
+
+static int samsung_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ int ret = 0;
+
+ if (USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE == hdev->product)
+ ret = samsung_kbd_mouse_input_mapping(hdev,
+ hi, field, usage, bit, max);
+
+ return ret;
+}
+
static int samsung_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -95,10 +162,12 @@ static int samsung_probe(struct hid_device *hdev,
goto err_free;
}
- if (hdev->rsize == 184) {
- /* disable hidinput, force hiddev */
- cmask = (cmask & ~HID_CONNECT_HIDINPUT) |
- HID_CONNECT_HIDDEV_FORCE;
+ if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product) {
+ if (hdev->rsize == 184) {
+ /* disable hidinput, force hiddev */
+ cmask = (cmask & ~HID_CONNECT_HIDINPUT) |
+ HID_CONNECT_HIDDEV_FORCE;
+ }
}
ret = hid_hw_start(hdev, cmask);
@@ -114,6 +183,7 @@ err_free:
static const struct hid_device_id samsung_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
{ }
};
MODULE_DEVICE_TABLE(hid, samsung_devices);
@@ -122,6 +192,7 @@ static struct hid_driver samsung_driver = {
.name = "samsung",
.id_table = samsung_devices,
.report_fixup = samsung_report_fixup,
+ .input_mapping = samsung_input_mapping,
.probe = samsung_probe,
};
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 6925eda..2eebdcc 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -3,6 +3,9 @@
*
* Copyright (c) 2008 Lev Babiev
* based on hid-cherry driver
+ *
+ * Modified to also support BTC "Emprex 3009URF III Vista MCE Remote" by
+ * Wayne Thomas 2010.
*/
/*
@@ -24,23 +27,29 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if ((usage->hid & HID_USAGE_PAGE) != 0x0ffbc0000)
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
return 0;
switch (usage->hid & HID_USAGE) {
- case 0x00d: ts_map_key_clear(KEY_HOME); break;
- case 0x024: ts_map_key_clear(KEY_MENU); break;
- case 0x025: ts_map_key_clear(KEY_TV); break;
- case 0x048: ts_map_key_clear(KEY_RED); break;
- case 0x047: ts_map_key_clear(KEY_GREEN); break;
- case 0x049: ts_map_key_clear(KEY_YELLOW); break;
- case 0x04a: ts_map_key_clear(KEY_BLUE); break;
- case 0x04b: ts_map_key_clear(KEY_ANGLE); break;
- case 0x04c: ts_map_key_clear(KEY_LANGUAGE); break;
- case 0x04d: ts_map_key_clear(KEY_SUBTITLE); break;
- case 0x031: ts_map_key_clear(KEY_AUDIO); break;
- case 0x032: ts_map_key_clear(KEY_TEXT); break;
- case 0x033: ts_map_key_clear(KEY_CHANNEL); break;
+ case 0x00d: ts_map_key_clear(KEY_MEDIA); break;
+ case 0x024: ts_map_key_clear(KEY_MENU); break;
+ case 0x025: ts_map_key_clear(KEY_TV); break;
+ case 0x031: ts_map_key_clear(KEY_AUDIO); break;
+ case 0x032: ts_map_key_clear(KEY_TEXT); break;
+ case 0x033: ts_map_key_clear(KEY_CHANNEL); break;
+ case 0x047: ts_map_key_clear(KEY_MP3); break;
+ case 0x048: ts_map_key_clear(KEY_TV2); break;
+ case 0x049: ts_map_key_clear(KEY_CAMERA); break;
+ case 0x04a: ts_map_key_clear(KEY_VIDEO); break;
+ case 0x04b: ts_map_key_clear(KEY_ANGLE); break;
+ case 0x04c: ts_map_key_clear(KEY_LANGUAGE); break;
+ case 0x04d: ts_map_key_clear(KEY_SUBTITLE); break;
+ case 0x050: ts_map_key_clear(KEY_RADIO); break;
+ case 0x05a: ts_map_key_clear(KEY_TEXT); break;
+ case 0x05b: ts_map_key_clear(KEY_RED); break;
+ case 0x05c: ts_map_key_clear(KEY_GREEN); break;
+ case 0x05d: ts_map_key_clear(KEY_YELLOW); break;
+ case 0x05e: ts_map_key_clear(KEY_BLUE); break;
default:
return 0;
}
@@ -50,6 +59,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id ts_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
{ }
};
MODULE_DEVICE_TABLE(hid, ts_devices);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index f947d83..1e051f1 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -22,14 +22,159 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+#include <linux/power_supply.h>
+#endif
#include "hid-ids.h"
struct wacom_data {
__u16 tool;
unsigned char butstate;
+ unsigned char high_speed;
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ int battery_capacity;
+ struct power_supply battery;
+ struct power_supply ac;
+#endif
};
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+/*percent of battery capacity, 0 means AC online*/
+static unsigned short batcap[8] = { 1, 15, 25, 35, 50, 70, 100, 0 };
+
+static enum power_supply_property wacom_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY
+};
+
+static enum power_supply_property wacom_ac_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE
+};
+
+static int wacom_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wacom_data *wdata = container_of(psy,
+ struct wacom_data, battery);
+ int power_state = batcap[wdata->battery_capacity];
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ /* show 100% battery capacity when charging */
+ if (power_state == 0)
+ val->intval = 100;
+ else
+ val->intval = power_state;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int wacom_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wacom_data *wdata = container_of(psy, struct wacom_data, ac);
+ int power_state = batcap[wdata->battery_capacity];
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ /* fall through */
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (power_state == 0)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+#endif
+
+static void wacom_poke(struct hid_device *hdev, u8 speed)
+{
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+ int limit, ret;
+ char rep_data[2];
+
+ rep_data[0] = 0x03 ; rep_data[1] = 0x00;
+ limit = 3;
+ do {
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ } while (ret < 0 && limit-- > 0);
+
+ if (ret >= 0) {
+ if (speed == 0)
+ rep_data[0] = 0x05;
+ else
+ rep_data[0] = 0x06;
+
+ rep_data[1] = 0x00;
+ limit = 3;
+ do {
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ } while (ret < 0 && limit-- > 0);
+
+ if (ret >= 0) {
+ wdata->high_speed = speed;
+ return;
+ }
+ }
+
+ /*
+ * Note that if the raw queries fail, it's not a hard failure and it
+ * is safe to continue
+ */
+ dev_warn(&hdev->dev, "failed to poke device, command %d, err %d\n",
+ rep_data[0], ret);
+ return;
+}
+
+static ssize_t wacom_show_speed(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct wacom_data *wdata = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%i\n", wdata->high_speed);
+}
+
+static ssize_t wacom_store_speed(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ int new_speed;
+
+ if (sscanf(buf, "%1d", &new_speed ) != 1)
+ return -EINVAL;
+
+ if (new_speed == 0 || new_speed == 1) {
+ wacom_poke(hdev, new_speed);
+ return strnlen(buf, PAGE_SIZE);
+ } else
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(speed, S_IRUGO | S_IWUGO,
+ wacom_show_speed, wacom_store_speed);
+
static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
@@ -148,6 +293,12 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
input_sync(input);
}
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ /* Store current battery capacity */
+ rw = (data[7] >> 2 & 0x07);
+ if (rw != wdata->battery_capacity)
+ wdata->battery_capacity = rw;
+#endif
return 1;
}
@@ -157,9 +308,7 @@ static int wacom_probe(struct hid_device *hdev,
struct hid_input *hidinput;
struct input_dev *input;
struct wacom_data *wdata;
- char rep_data[2];
int ret;
- int limit;
wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
if (wdata == NULL) {
@@ -182,31 +331,53 @@ static int wacom_probe(struct hid_device *hdev,
goto err_free;
}
- /*
- * Note that if the raw queries fail, it's not a hard failure and it
- * is safe to continue
- */
+ ret = device_create_file(&hdev->dev, &dev_attr_speed);
+ if (ret)
+ dev_warn(&hdev->dev,
+ "can't create sysfs speed attribute err: %d\n", ret);
- /* Set Wacom mode2 */
- rep_data[0] = 0x03; rep_data[1] = 0x00;
- limit = 3;
- do {
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
- } while (ret < 0 && limit-- > 0);
- if (ret < 0)
- dev_warn(&hdev->dev, "failed to poke device #1, %d\n", ret);
+ /* Set Wacom mode 2 with high reporting speed */
+ wacom_poke(hdev, 1);
- /* 0x06 - high reporting speed, 0x05 - low speed */
- rep_data[0] = 0x06; rep_data[1] = 0x00;
- limit = 3;
- do {
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
- } while (ret < 0 && limit-- > 0);
- if (ret < 0)
- dev_warn(&hdev->dev, "failed to poke device #2, %d\n", ret);
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ wdata->battery.properties = wacom_battery_props;
+ wdata->battery.num_properties = ARRAY_SIZE(wacom_battery_props);
+ wdata->battery.get_property = wacom_battery_get_property;
+ wdata->battery.name = "wacom_battery";
+ wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ wdata->battery.use_for_apm = 0;
+ ret = power_supply_register(&hdev->dev, &wdata->battery);
+ if (ret) {
+ dev_warn(&hdev->dev,
+ "can't create sysfs battery attribute, err: %d\n", ret);
+ /*
+ * battery attribute is not critical for the tablet, but if it
+ * failed then there is no need to create ac attribute
+ */
+ goto move_on;
+ }
+
+ wdata->ac.properties = wacom_ac_props;
+ wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props);
+ wdata->ac.get_property = wacom_ac_get_property;
+ wdata->ac.name = "wacom_ac";
+ wdata->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ wdata->ac.use_for_apm = 0;
+
+ ret = power_supply_register(&hdev->dev, &wdata->ac);
+ if (ret) {
+ dev_warn(&hdev->dev,
+ "can't create ac battery attribute, err: %d\n", ret);
+ /*
+ * ac attribute is not critical for the tablet, but if it
+ * failed then we don't want to battery attribute to exist
+ */
+ power_supply_unregister(&wdata->battery);
+ }
+
+move_on:
+#endif
hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
input = hidinput->input;
@@ -251,13 +422,21 @@ err_free:
static void wacom_remove(struct hid_device *hdev)
{
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+#endif
hid_hw_stop(hdev);
+
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ power_supply_unregister(&wdata->battery);
+ power_supply_unregister(&wdata->ac);
+#endif
kfree(hid_get_drvdata(hdev));
}
static const struct hid_device_id wacom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
-
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ }
};
MODULE_DEVICE_TABLE(hid, wacom_devices);
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
new file mode 100644
index 0000000..9e8d35a
--- /dev/null
+++ b/drivers/hid/hid-zydacron.c
@@ -0,0 +1,237 @@
+/*
+* HID driver for zydacron remote control
+*
+* Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk>
+*/
+
+/*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation; either version 2 of the License, or (at your option)
+* any later version.
+*/
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct zc_device {
+ struct input_dev *input_ep81;
+ unsigned short last_key[4];
+};
+
+
+/*
+* Zydacron remote control has an invalid HID report descriptor,
+* that needs fixing before we can parse it.
+*/
+static void zc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int rsize)
+{
+ if (rsize >= 253 &&
+ rdesc[0x96] == 0xbc && rdesc[0x97] == 0xff &&
+ rdesc[0xca] == 0xbc && rdesc[0xcb] == 0xff &&
+ rdesc[0xe1] == 0xbc && rdesc[0xe2] == 0xff) {
+ dev_info(&hdev->dev,
+ "fixing up zydacron remote control report "
+ "descriptor\n");
+ rdesc[0x96] = rdesc[0xca] = rdesc[0xe1] = 0x0c;
+ rdesc[0x97] = rdesc[0xcb] = rdesc[0xe2] = 0x00;
+ }
+}
+
+#define zc_map_key_clear(c) \
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int zc_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ int i;
+ struct zc_device *zc = hid_get_drvdata(hdev);
+ zc->input_ep81 = hi->input;
+
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
+ return 0;
+
+ dbg_hid("zynacron input mapping event [0x%x]\n",
+ usage->hid & HID_USAGE);
+
+ switch (usage->hid & HID_USAGE) {
+ /* report 2 */
+ case 0x10:
+ zc_map_key_clear(KEY_MODE);
+ break;
+ case 0x30:
+ zc_map_key_clear(KEY_SCREEN);
+ break;
+ case 0x70:
+ zc_map_key_clear(KEY_INFO);
+ break;
+ /* report 3 */
+ case 0x04:
+ zc_map_key_clear(KEY_RADIO);
+ break;
+ /* report 4 */
+ case 0x0d:
+ zc_map_key_clear(KEY_PVR);
+ break;
+ case 0x25:
+ zc_map_key_clear(KEY_TV);
+ break;
+ case 0x47:
+ zc_map_key_clear(KEY_AUDIO);
+ break;
+ case 0x49:
+ zc_map_key_clear(KEY_AUX);
+ break;
+ case 0x4a:
+ zc_map_key_clear(KEY_VIDEO);
+ break;
+ case 0x48:
+ zc_map_key_clear(KEY_DVD);
+ break;
+ case 0x24:
+ zc_map_key_clear(KEY_MENU);
+ break;
+ case 0x32:
+ zc_map_key_clear(KEY_TEXT);
+ break;
+ default:
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++)
+ zc->last_key[i] = 0;
+
+ return 1;
+}
+
+static int zc_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct zc_device *zc = hid_get_drvdata(hdev);
+ int ret = 0;
+ unsigned key;
+ unsigned short index;
+
+ if (report->id == data[0]) {
+
+ /* break keys */
+ for (index = 0; index < 4; index++) {
+ key = zc->last_key[index];
+ if (key) {
+ input_event(zc->input_ep81, EV_KEY, key, 0);
+ zc->last_key[index] = 0;
+ }
+ }
+
+ key = 0;
+ switch (report->id) {
+ case 0x02:
+ case 0x03:
+ switch (data[1]) {
+ case 0x10:
+ key = KEY_MODE;
+ index = 0;
+ break;
+ case 0x30:
+ key = KEY_SCREEN;
+ index = 1;
+ break;
+ case 0x70:
+ key = KEY_INFO;
+ index = 2;
+ break;
+ case 0x04:
+ key = KEY_RADIO;
+ index = 3;
+ break;
+ }
+
+ if (key) {
+ input_event(zc->input_ep81, EV_KEY, key, 1);
+ zc->last_key[index] = key;
+ }
+
+ ret = 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct zc_device *zc;
+
+ zc = kzalloc(sizeof(*zc), GFP_KERNEL);
+ if (zc == NULL) {
+ dev_err(&hdev->dev, "zydacron: can't alloc descriptor\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, zc);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "zydacron: parse failed\n");
+ goto err_free;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ dev_err(&hdev->dev, "zydacron: hw start failed\n");
+ goto err_free;
+ }
+
+ return 0;
+err_free:
+ kfree(zc);
+
+ return ret;
+}
+
+static void zc_remove(struct hid_device *hdev)
+{
+ struct zc_device *zc = hid_get_drvdata(hdev);
+
+ hid_hw_stop(hdev);
+
+ if (NULL != zc)
+ kfree(zc);
+}
+
+static const struct hid_device_id zc_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, zc_devices);
+
+static struct hid_driver zc_driver = {
+ .name = "zydacron",
+ .id_table = zc_devices,
+ .report_fixup = zc_report_fixup,
+ .input_mapping = zc_input_mapping,
+ .raw_event = zc_raw_event,
+ .probe = zc_probe,
+ .remove = zc_remove,
+};
+
+static int __init zc_init(void)
+{
+ return hid_register_driver(&zc_driver);
+}
+
+static void __exit zc_exit(void)
+{
+ hid_unregister_driver(&zc_driver);
+}
+
+module_init(zc_init);
+module_exit(zc_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 6eadf1a..3ccd478 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -106,38 +106,48 @@ out:
static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file->f_path.dentry->d_inode);
- /* FIXME: What stops hidraw_table going NULL */
- struct hid_device *dev = hidraw_table[minor]->hid;
+ struct hid_device *dev;
__u8 *buf;
int ret = 0;
- if (!dev->hid_output_raw_report)
- return -ENODEV;
+ mutex_lock(&minors_lock);
+ dev = hidraw_table[minor]->hid;
+
+ if (!dev->hid_output_raw_report) {
+ ret = -ENODEV;
+ goto out;
+ }
if (count > HID_MAX_BUFFER_SIZE) {
printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
task_pid_nr(current));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (count < 2) {
printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
task_pid_nr(current));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (copy_from_user(buf, buffer, count)) {
ret = -EFAULT;
- goto out;
+ goto out_free;
}
ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT);
-out:
+out_free:
kfree(buf);
+out:
+ mutex_unlock(&minors_lock);
return ret;
}
@@ -165,11 +175,8 @@ static int hidraw_open(struct inode *inode, struct file *file)
goto out;
}
- lock_kernel();
mutex_lock(&minors_lock);
if (!hidraw_table[minor]) {
- printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
- minor);
kfree(list);
err = -ENODEV;
goto out_unlock;
@@ -197,7 +204,6 @@ static int hidraw_open(struct inode *inode, struct file *file)
out_unlock:
mutex_unlock(&minors_lock);
- unlock_kernel();
out:
return err;
@@ -209,11 +215,8 @@ static int hidraw_release(struct inode * inode, struct file * file)
struct hidraw *dev;
struct hidraw_list *list = file->private_data;
- if (!hidraw_table[minor]) {
- printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
- minor);
+ if (!hidraw_table[minor])
return -ENODEV;
- }
list_del(&list->node);
dev = hidraw_table[minor];
@@ -238,11 +241,12 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
struct inode *inode = file->f_path.dentry->d_inode;
unsigned int minor = iminor(inode);
long ret = 0;
- /* FIXME: What stops hidraw_table going NULL */
- struct hidraw *dev = hidraw_table[minor];
+ struct hidraw *dev;
void __user *user_arg = (void __user*) arg;
- lock_kernel();
+ mutex_lock(&minors_lock);
+ dev = hidraw_table[minor];
+
switch (cmd) {
case HIDIOCGRDESCSIZE:
if (put_user(dev->hid->rsize, (int __user *)arg))
@@ -311,11 +315,11 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
-EFAULT : len;
break;
}
- }
+ }
ret = -ENOTTY;
}
- unlock_kernel();
+ mutex_unlock(&minors_lock);
return ret;
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 7b85b69..1ebd324 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -623,6 +623,7 @@ int usbhid_wait_io(struct hid_device *hid)
return 0;
}
+EXPORT_SYMBOL_GPL(usbhid_wait_io);
static int hid_set_idle(struct usb_device *dev, int ifnum, int report, int idle)
{
@@ -783,13 +784,12 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
- usbhid->inbuf = usb_buffer_alloc(dev, usbhid->bufsize, GFP_KERNEL,
+ usbhid->inbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL,
&usbhid->inbuf_dma);
- usbhid->outbuf = usb_buffer_alloc(dev, usbhid->bufsize, GFP_KERNEL,
+ usbhid->outbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL,
&usbhid->outbuf_dma);
- usbhid->cr = usb_buffer_alloc(dev, sizeof(*usbhid->cr), GFP_KERNEL,
- &usbhid->cr_dma);
- usbhid->ctrlbuf = usb_buffer_alloc(dev, usbhid->bufsize, GFP_KERNEL,
+ usbhid->cr = kmalloc(sizeof(*usbhid->cr), GFP_KERNEL);
+ usbhid->ctrlbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL,
&usbhid->ctrlbuf_dma);
if (!usbhid->inbuf || !usbhid->outbuf || !usbhid->cr ||
!usbhid->ctrlbuf)
@@ -807,16 +807,36 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
struct usb_host_interface *interface = intf->cur_altsetting;
int ret;
- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- HID_REQ_SET_REPORT,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- ((report_type + 1) << 8) | *buf,
- interface->desc.bInterfaceNumber, buf + 1, count - 1,
- USB_CTRL_SET_TIMEOUT);
-
- /* count also the report id */
- if (ret > 0)
- ret++;
+ if (usbhid->urbout) {
+ int actual_length;
+ int skipped_report_id = 0;
+ if (buf[0] == 0x0) {
+ /* Don't send the Report ID */
+ buf++;
+ count--;
+ skipped_report_id = 1;
+ }
+ ret = usb_interrupt_msg(dev, usbhid->urbout->pipe,
+ buf, count, &actual_length,
+ USB_CTRL_SET_TIMEOUT);
+ /* return the number of bytes transferred */
+ if (ret == 0) {
+ ret = actual_length;
+ /* count also the report id */
+ if (skipped_report_id)
+ ret++;
+ }
+ } else {
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ HID_REQ_SET_REPORT,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ ((report_type + 1) << 8) | *buf,
+ interface->desc.bInterfaceNumber, buf + 1, count - 1,
+ USB_CTRL_SET_TIMEOUT);
+ /* count also the report id */
+ if (ret > 0)
+ ret++;
+ }
return ret;
}
@@ -844,10 +864,10 @@ static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
- usb_buffer_free(dev, usbhid->bufsize, usbhid->inbuf, usbhid->inbuf_dma);
- usb_buffer_free(dev, usbhid->bufsize, usbhid->outbuf, usbhid->outbuf_dma);
- usb_buffer_free(dev, sizeof(*(usbhid->cr)), usbhid->cr, usbhid->cr_dma);
- usb_buffer_free(dev, usbhid->bufsize, usbhid->ctrlbuf, usbhid->ctrlbuf_dma);
+ usb_free_coherent(dev, usbhid->bufsize, usbhid->inbuf, usbhid->inbuf_dma);
+ usb_free_coherent(dev, usbhid->bufsize, usbhid->outbuf, usbhid->outbuf_dma);
+ kfree(usbhid->cr);
+ usb_free_coherent(dev, usbhid->bufsize, usbhid->ctrlbuf, usbhid->ctrlbuf_dma);
}
static int usbhid_parse(struct hid_device *hid)
@@ -1007,9 +1027,8 @@ static int usbhid_start(struct hid_device *hid)
usb_fill_control_urb(usbhid->urbctrl, dev, 0, (void *) usbhid->cr,
usbhid->ctrlbuf, 1, hid_ctrl, hid);
- usbhid->urbctrl->setup_dma = usbhid->cr_dma;
usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma;
- usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
+ usbhid->urbctrl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
usbhid_init_reports(hid);
@@ -1019,12 +1038,15 @@ static int usbhid_start(struct hid_device *hid)
/* Some keyboards don't work until their LEDs have been set.
* Since BIOSes do set the LEDs, it must be safe for any device
* that supports the keyboard boot protocol.
+ * In addition, enable remote wakeup by default for all keyboard
+ * devices supporting the boot protocol.
*/
if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT &&
interface->desc.bInterfaceProtocol ==
- USB_INTERFACE_PROTOCOL_KEYBOARD)
+ USB_INTERFACE_PROTOCOL_KEYBOARD) {
usbhid_set_leds(hid);
-
+ device_set_wakeup_enable(&dev->dev, 1);
+ }
return 0;
fail:
@@ -1133,6 +1155,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
hid->product = le16_to_cpu(dev->descriptor.idProduct);
hid->name[0] = 0;
+ hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product);
if (intf->cur_altsetting->desc.bInterfaceProtocol ==
USB_INTERFACE_PROTOCOL_MOUSE)
hid->type = HID_TYPE_USBMOUSE;
@@ -1289,6 +1312,11 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
{
set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
+ if (hid->driver && hid->driver->suspend) {
+ status = hid->driver->suspend(hid, message);
+ if (status < 0)
+ return status;
+ }
} else {
usbhid_mark_busy(usbhid);
spin_unlock_irq(&usbhid->lock);
@@ -1296,6 +1324,11 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
}
} else {
+ if (hid->driver && hid->driver->suspend) {
+ status = hid->driver->suspend(hid, message);
+ if (status < 0)
+ return status;
+ }
spin_lock_irq(&usbhid->lock);
set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
@@ -1350,6 +1383,11 @@ static int hid_resume(struct usb_interface *intf)
hid_io_error(hid);
usbhid_restart_queues(usbhid);
+ if (status >= 0 && hid->driver && hid->driver->resume) {
+ int ret = hid->driver->resume(hid);
+ if (ret < 0)
+ status = ret;
+ }
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
}
@@ -1358,9 +1396,16 @@ static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
+ int status;
clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
- return hid_post_reset(intf);
+ status = hid_post_reset(intf);
+ if (status >= 0 && hid->driver && hid->driver->reset_resume) {
+ int ret = hid->driver->reset_resume(hid);
+ if (ret < 0)
+ status = ret;
+ }
+ return status;
}
#endif /* CONFIG_PM */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 1152f9b..5ff8d32 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+ { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 433602a..c24d2fa 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -267,6 +267,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
struct hiddev_list *list;
int res, i;
+ /* See comment in hiddev_connect() for BKL explanation */
lock_kernel();
i = iminor(inode) - HIDDEV_MINOR_BASE;
@@ -894,8 +895,22 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
hiddev->hid = hid;
hiddev->exist = 1;
- /* when lock_kernel() usage is fixed in usb_open(),
- * we could also fix it here */
+ /*
+ * BKL here is used to avoid race after usb_register_dev().
+ * Once the device node has been created, open() could happen on it.
+ * The code below will then fail, as hiddev_table hasn't been
+ * updated.
+ *
+ * The obvious fix -- introducing mutex to guard hiddev_table[]
+ * doesn't work, as usb_open() and usb_register_dev() both take
+ * minor_rwsem, thus we'll have ABBA deadlock.
+ *
+ * Before BKL pushdown, usb_open() had been acquiring it in right
+ * order, so _open() was safe to use it to protect from this race.
+ * Now the order is different, but AB-BA deadlock still doesn't occur
+ * as BKL is dropped on schedule() (i.e. while sleeping on
+ * minor_rwsem). Fugly.
+ */
lock_kernel();
retval = usb_register_dev(usbhid->intf, &hiddev_class);
if (retval) {
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index ec20400..693fd3e 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -75,7 +75,6 @@ struct usbhid_device {
struct urb *urbctrl; /* Control URB */
struct usb_ctrlrequest *cr; /* Control request struct */
- dma_addr_t cr_dma; /* Control request struct dma */
struct hid_control_fifo ctrl[HID_CONTROL_FIFO_SIZE]; /* Control fifo */
unsigned char ctrlhead, ctrltail; /* Control fifo head & tail */
char *ctrlbuf; /* Control buffer */
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index f843443..a948605 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -74,7 +74,6 @@ struct usb_kbd {
unsigned char *new;
struct usb_ctrlrequest *cr;
unsigned char *leds;
- dma_addr_t cr_dma;
dma_addr_t new_dma;
dma_addr_t leds_dma;
};
@@ -197,11 +196,11 @@ static int usb_kbd_alloc_mem(struct usb_device *dev, struct usb_kbd *kbd)
return -1;
if (!(kbd->led = usb_alloc_urb(0, GFP_KERNEL)))
return -1;
- if (!(kbd->new = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
+ if (!(kbd->new = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
return -1;
- if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), GFP_ATOMIC, &kbd->cr_dma)))
+ if (!(kbd->cr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL)))
return -1;
- if (!(kbd->leds = usb_buffer_alloc(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
+ if (!(kbd->leds = usb_alloc_coherent(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
return -1;
return 0;
@@ -211,9 +210,9 @@ static void usb_kbd_free_mem(struct usb_device *dev, struct usb_kbd *kbd)
{
usb_free_urb(kbd->irq);
usb_free_urb(kbd->led);
- usb_buffer_free(dev, 8, kbd->new, kbd->new_dma);
- usb_buffer_free(dev, sizeof(struct usb_ctrlrequest), kbd->cr, kbd->cr_dma);
- usb_buffer_free(dev, 1, kbd->leds, kbd->leds_dma);
+ usb_free_coherent(dev, 8, kbd->new, kbd->new_dma);
+ kfree(kbd->cr);
+ usb_free_coherent(dev, 1, kbd->leds, kbd->leds_dma);
}
static int usb_kbd_probe(struct usb_interface *iface,
@@ -304,15 +303,15 @@ static int usb_kbd_probe(struct usb_interface *iface,
usb_fill_control_urb(kbd->led, dev, usb_sndctrlpipe(dev, 0),
(void *) kbd->cr, kbd->leds, 1,
usb_kbd_led, kbd);
- kbd->led->setup_dma = kbd->cr_dma;
kbd->led->transfer_dma = kbd->leds_dma;
- kbd->led->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
+ kbd->led->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
error = input_register_device(kbd->dev);
if (error)
goto fail2;
usb_set_intfdata(iface, kbd);
+ device_set_wakeup_enable(&dev->dev, 1);
return 0;
fail2:
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 72ab4b2..79b2bf8 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -142,7 +142,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
if (!mouse || !input_dev)
goto fail1;
- mouse->data = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &mouse->data_dma);
+ mouse->data = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &mouse->data_dma);
if (!mouse->data)
goto fail1;
@@ -205,7 +205,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
fail3:
usb_free_urb(mouse->irq);
fail2:
- usb_buffer_free(dev, 8, mouse->data, mouse->data_dma);
+ usb_free_coherent(dev, 8, mouse->data, mouse->data_dma);
fail1:
input_free_device(input_dev);
kfree(mouse);
@@ -221,7 +221,7 @@ static void usb_mouse_disconnect(struct usb_interface *intf)
usb_kill_urb(mouse->irq);
input_unregister_device(mouse->dev);
usb_free_urb(mouse->irq);
- usb_buffer_free(interface_to_usbdev(intf), 8, mouse->data, mouse->data_dma);
+ usb_free_coherent(interface_to_usbdev(intf), 8, mouse->data, mouse->data_dma);
kfree(mouse);
}
}
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index dcdaf8e..2b9a8f5 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -109,13 +109,13 @@ static void pca_stop(struct i2c_algo_pca_data *adap)
* returns after the address has been sent
*/
static int pca_address(struct i2c_algo_pca_data *adap,
- struct i2c_msg *msg)
+ struct i2c_msg *msg)
{
int sta = pca_get_con(adap);
int addr;
- addr = ( (0x7f & msg->addr) << 1 );
- if (msg->flags & I2C_M_RD )
+ addr = ((0x7f & msg->addr) << 1);
+ if (msg->flags & I2C_M_RD)
addr |= 1;
DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n",
msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr);
@@ -134,7 +134,7 @@ static int pca_address(struct i2c_algo_pca_data *adap,
* Returns after the byte has been transmitted
*/
static int pca_tx_byte(struct i2c_algo_pca_data *adap,
- __u8 b)
+ __u8 b)
{
int sta = pca_get_con(adap);
DEB2("=== WRITE %#04x\n", b);
@@ -164,13 +164,13 @@ static void pca_rx_byte(struct i2c_algo_pca_data *adap,
* Returns after next byte has arrived.
*/
static int pca_rx_ack(struct i2c_algo_pca_data *adap,
- int ack)
+ int ack)
{
int sta = pca_get_con(adap);
sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI|I2C_PCA_CON_AA);
- if ( ack )
+ if (ack)
sta |= I2C_PCA_CON_AA;
pca_set_con(adap, sta);
@@ -178,12 +178,12 @@ static int pca_rx_ack(struct i2c_algo_pca_data *adap,
}
static int pca_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs,
- int num)
+ struct i2c_msg *msgs,
+ int num)
{
- struct i2c_algo_pca_data *adap = i2c_adap->algo_data;
- struct i2c_msg *msg = NULL;
- int curmsg;
+ struct i2c_algo_pca_data *adap = i2c_adap->algo_data;
+ struct i2c_msg *msg = NULL;
+ int curmsg;
int numbytes = 0;
int state;
int ret;
@@ -202,21 +202,21 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
DEB1("{{{ XFER %d messages\n", num);
- if (i2c_debug>=2) {
+ if (i2c_debug >= 2) {
for (curmsg = 0; curmsg < num; curmsg++) {
int addr, i;
msg = &msgs[curmsg];
addr = (0x7f & msg->addr) ;
- if (msg->flags & I2C_M_RD )
+ if (msg->flags & I2C_M_RD)
printk(KERN_INFO " [%02d] RD %d bytes from %#02x [%#02x, ...]\n",
- curmsg, msg->len, addr, (addr<<1) | 1);
+ curmsg, msg->len, addr, (addr << 1) | 1);
else {
printk(KERN_INFO " [%02d] WR %d bytes to %#02x [%#02x%s",
- curmsg, msg->len, addr, addr<<1,
+ curmsg, msg->len, addr, addr << 1,
msg->len == 0 ? "" : ", ");
- for(i=0; i < msg->len; i++)
+ for (i = 0; i < msg->len; i++)
printk("%#04x%s", msg->buf[i], i == msg->len - 1 ? "" : ", ");
printk("]\n");
}
@@ -305,7 +305,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
goto out;
case 0x58: /* Data byte has been received; NOT ACK has been returned */
- if ( numbytes == msg->len - 1 ) {
+ if (numbytes == msg->len - 1) {
pca_rx_byte(adap, &msg->buf[numbytes], 0);
curmsg++; numbytes = 0;
if (curmsg == num)
@@ -352,7 +352,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
static u32 pca_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm pca_algo = {
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index bd8f1e4..906a3ca5 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -60,7 +60,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* ALI1535 SMBus address offsets */
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 659f63f..b14f6d6 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -67,7 +67,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* ALI15X3 SMBus address offsets */
#define SMBHSTSTS (0 + ali15x3_smba)
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index c5a9fa4..03bcd07 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -43,7 +43,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* AMD756 SMBus address offsets */
#define SMB_ADDR_OFFSET 0xE0
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 2fbef27..af1e5e2 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -18,7 +18,7 @@
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Vojtech Pavlik <vojtech@suse.cz>");
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 06e1ecb..305c075 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -23,8 +23,7 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <mach/at91_twi.h>
#include <mach/board.h>
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 6122556..e5b1a3b 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -37,8 +37,8 @@
#include <linux/isa.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pcf.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include "../algos/i2c-algo-pcf.h"
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index c21077d..d9aa9a6 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -211,7 +211,7 @@ static int __init i2c_gpio_init(void)
return ret;
}
-module_init(i2c_gpio_init);
+subsys_initcall(i2c_gpio_init);
static void __exit i2c_gpio_exit(void)
{
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index c767295..9ff1695 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/hydra.h>
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 299b918..f4b21f2 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -138,6 +138,17 @@ static struct pci_dev *I801_dev;
#define FEATURE_I2C_BLOCK_READ (1 << 3)
static unsigned int i801_features;
+static const char *i801_feature_names[] = {
+ "SMBus PEC",
+ "Block buffer",
+ "Block process call",
+ "I2C block read",
+};
+
+static unsigned int disable_features;
+module_param(disable_features, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_features, "Disable selected driver features");
+
/* Make sure the SMBus host is ready to start transmitting.
Return 0 if it is, -EBUSY if it is not. */
static int i801_check_pre(void)
@@ -341,9 +352,8 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
do {
msleep(1);
status = inb_p(SMBHSTSTS);
- }
- while ((!(status & SMBHSTSTS_BYTE_DONE))
- && (timeout++ < MAX_TIMEOUT));
+ } while ((!(status & SMBHSTSTS_BYTE_DONE))
+ && (timeout++ < MAX_TIMEOUT));
result = i801_check_post(status, timeout > MAX_TIMEOUT);
if (result < 0)
@@ -440,9 +450,9 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
}
/* Return negative errno on error. */
-static s32 i801_access(struct i2c_adapter * adap, u16 addr,
+static s32 i801_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write, u8 command,
- int size, union i2c_smbus_data * data)
+ int size, union i2c_smbus_data *data)
{
int hwpec;
int block = 0;
@@ -511,7 +521,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
else
outb_p(inb_p(SMBAUXCTL) & (~SMBAUXCTL_CRC), SMBAUXCTL);
- if(block)
+ if (block)
ret = i801_block_transaction(data, read_write, size, hwpec);
else
ret = i801_transaction(xact | ENABLE_INT9);
@@ -523,9 +533,9 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
SMBAUXCTL);
- if(block)
+ if (block)
return ret;
- if(ret)
+ if (ret)
return ret;
if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
return 0;
@@ -585,7 +595,7 @@ static const struct pci_device_id i801_ids[] = {
{ 0, }
};
-MODULE_DEVICE_TABLE (pci, i801_ids);
+MODULE_DEVICE_TABLE(pci, i801_ids);
#if defined CONFIG_INPUT_APANEL || defined CONFIG_INPUT_APANEL_MODULE
static unsigned char apanel_addr;
@@ -689,10 +699,11 @@ static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
}
#endif
-static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int __devinit i801_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
unsigned char temp;
- int err;
+ int err, i;
#if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE
const char *vendor;
#endif
@@ -700,26 +711,28 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
I801_dev = dev;
i801_features = 0;
switch (dev->device) {
- case PCI_DEVICE_ID_INTEL_82801EB_3:
- case PCI_DEVICE_ID_INTEL_ESB_4:
- case PCI_DEVICE_ID_INTEL_ICH6_16:
- case PCI_DEVICE_ID_INTEL_ICH7_17:
- case PCI_DEVICE_ID_INTEL_ESB2_17:
- case PCI_DEVICE_ID_INTEL_ICH8_5:
- case PCI_DEVICE_ID_INTEL_ICH9_6:
- case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
- case PCI_DEVICE_ID_INTEL_ICH10_4:
- case PCI_DEVICE_ID_INTEL_ICH10_5:
- case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
- case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
+ default:
i801_features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
i801_features |= FEATURE_SMBUS_PEC;
i801_features |= FEATURE_BLOCK_BUFFER;
+ /* fall through */
+ case PCI_DEVICE_ID_INTEL_82801CA_3:
+ case PCI_DEVICE_ID_INTEL_82801BA_2:
+ case PCI_DEVICE_ID_INTEL_82801AB_3:
+ case PCI_DEVICE_ID_INTEL_82801AA_3:
break;
}
+ /* Disable features on user request */
+ for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
+ if (i801_features & disable_features & (1 << i))
+ dev_notice(&dev->dev, "%s disabled by user\n",
+ i801_feature_names[i]);
+ }
+ i801_features &= ~disable_features;
+
err = pci_enable_device(dev);
if (err) {
dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 2bef534..f8ccc0f 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -39,7 +39,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/of_platform.h>
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 5901707..112c61f 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -38,8 +38,7 @@
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include "i2c-iop3xx.h"
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 3623a44..1624206 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -17,8 +17,7 @@
#include <linux/interrupt.h>
#include <linux/mv643xx_i2c.h>
#include <linux/platform_device.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
/* Register defines */
#define MV64XXX_I2C_REG_SLAVE_ADDR 0x00
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 4a48dd4..a605a50 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -57,7 +57,7 @@
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@gmx.net>");
@@ -404,10 +404,9 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
/* SMBus adapter 1 */
res1 = nforce2_probe_smb(dev, 4, NFORCE_PCI_SMB1, &smbuses[0], "SMB1");
- if (res1 < 0) {
- dev_err(&dev->dev, "Error probing SMB1.\n");
+ if (res1 < 0)
smbuses[0].base = 0; /* to have a check value */
- }
+
/* SMBus adapter 2 */
if (dmi_check_system(nforce2_dmi_blacklist2)) {
dev_err(&dev->dev, "Disabling SMB2 for safety reasons.\n");
@@ -416,11 +415,10 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
} else {
res2 = nforce2_probe_smb(dev, 5, NFORCE_PCI_SMB2, &smbuses[1],
"SMB2");
- if (res2 < 0) {
- dev_err(&dev->dev, "Error probing SMB2.\n");
+ if (res2 < 0)
smbuses[1].base = 0; /* to have a check value */
- }
}
+
if ((res1 < 0) && (res2 < 0)) {
/* we did not find even one of the SMBuses, so we give up */
kfree(smbuses);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index b4ed4ca..0070371 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -19,7 +19,7 @@
#include <linux/wait.h>
#include <linux/i2c-ocores.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
struct ocores_i2c {
void __iomem *base;
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 5f41ec0..fc5fbd1 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -33,7 +33,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c-smbus.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "i2c-parport.h"
#define DEFAULT_BASE 0x378
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 846583e..0eb1515 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -137,7 +137,7 @@ static int parport_getsda(void *data)
copied. The attaching code will set getscl to NULL for adapters that
cannot read SCL back, and will also make the data field point to
the parallel port structure. */
-static struct i2c_algo_bit_data parport_algo_data = {
+static const struct i2c_algo_bit_data parport_algo_data = {
.setsda = parport_setsda,
.setscl = parport_setscl,
.getsda = parport_getsda,
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index d3d4a4b..4174101 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -25,7 +25,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
static struct pci_driver pasemi_smb_driver;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index f7346a9..bbd7760 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -30,8 +30,8 @@
#include <linux/isa.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pca.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/irq.h>
#define DRIVER "i2c-pca-isa"
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 5b2213d..ef5c784 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -23,9 +23,9 @@
#include <linux/i2c-algo-pca.h>
#include <linux/i2c-pca-platform.h>
#include <linux/gpio.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
struct i2c_pca_pf_data {
void __iomem *reg_base;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index ee9da6f..6d14ac2 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -39,7 +39,7 @@
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* PIIX4 SMBus address offsets */
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 7b57d5f..dfa7ae9 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -33,7 +33,7 @@
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <asm/io.h>
+#include <linux/io.h>
#define DRV_NAME "pmcmsptwi"
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index fbde6f6..020ff23 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -34,9 +34,9 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <plat/i2c.h>
/*
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index ec3256c..72902e0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -35,9 +35,9 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <plat/regs-iic.h>
#include <plat/iic.h>
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index c91359f..cadc021 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -36,8 +36,8 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include "i2c-s6000.h"
#define DRV_NAME "i2c-s6000"
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index b9680f5..4f93da3 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -16,10 +16,10 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/clock.h>
#include <asm/i2c-sh7760.h>
-#include <asm/io.h>
/* register offsets */
#define I2CSCR 0x0 /* slave ctrl */
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 98b1ec4..3d76a18 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_smbus.h>
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 78b0610..2fc08fb 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -24,12 +24,11 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
-
struct simtec_i2c_data {
struct resource *ioarea;
void __iomem *reg;
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 55a7137..4375866 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -61,7 +61,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
static int blacklist[] = {
PCI_DEVICE_ID_SI_540,
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 2309c7f..e6f539e 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -53,7 +53,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* SIS630 SMBus registers */
#define SMB_STS 0x80 /* status */
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index d43d8f8..86837f0 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -38,7 +38,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* base address register in PCI config space */
#define SIS96x_BAR 0x04
diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c
index 0c770ea..b1b34479 100644
--- a/drivers/i2c/busses/i2c-stub.c
+++ b/drivers/i2c/busses/i2c-stub.c
@@ -29,13 +29,16 @@
#include <linux/i2c.h>
#define MAX_CHIPS 10
+#define STUB_FUNC (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK)
static unsigned short chip_addr[MAX_CHIPS];
module_param_array(chip_addr, ushort, NULL, S_IRUGO);
MODULE_PARM_DESC(chip_addr,
"Chip addresses (up to 10, between 0x03 and 0x77)");
-static unsigned long functionality = ~0UL;
+static unsigned long functionality = STUB_FUNC;
module_param(functionality, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(functionality, "Override functionality bitfield");
@@ -156,9 +159,7 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
static u32 stub_func(struct i2c_adapter *adapter)
{
- return (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK) & functionality;
+ return STUB_FUNC & functionality;
}
static const struct i2c_algorithm smbus_algorithm = {
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 5c47383..6055601 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -15,8 +15,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#define I2C_CONTROL 0x00
#define I2C_CONTROLS 0x00
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index de78283..7799fe5 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -25,7 +25,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* Power management registers */
#define PM_CFG_REVID 0x08 /* silicon revision code */
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index d57292e..4c6fff5 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -51,7 +51,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
static struct pci_dev *vt596_pdev;
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 684395b..4cb4bb0 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -32,7 +32,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200.h>
@@ -552,7 +552,7 @@ static int __init scx200_create_isa(const char *text, unsigned long base,
* the name and the BAR where the I/O address resource is located. ISA
* devices are flagged with a bar value of -1 */
-static struct pci_device_id scx200_pci[] = {
+static const struct pci_device_id scx200_pci[] __initconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE),
.driver_data = 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE),
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index 42df0ec..7ee0d50 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -27,7 +27,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200_gpio.h>
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 7c469a6..db3c9f3 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1221,10 +1221,10 @@ EXPORT_SYMBOL(i2c_transfer);
*
* Returns negative errno, or else the number of bytes written.
*/
-int i2c_master_send(struct i2c_client *client,const char *buf ,int count)
+int i2c_master_send(struct i2c_client *client, const char *buf, int count)
{
int ret;
- struct i2c_adapter *adap=client->adapter;
+ struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg;
msg.addr = client->addr;
@@ -1248,9 +1248,9 @@ EXPORT_SYMBOL(i2c_master_send);
*
* Returns negative errno, or else the number of bytes read.
*/
-int i2c_master_recv(struct i2c_client *client, char *buf ,int count)
+int i2c_master_recv(struct i2c_client *client, char *buf, int count)
{
- struct i2c_adapter *adap=client->adapter;
+ struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg;
int ret;
@@ -1452,7 +1452,7 @@ i2c_new_probed_device(struct i2c_adapter *adap,
}
EXPORT_SYMBOL_GPL(i2c_new_probed_device);
-struct i2c_adapter* i2c_get_adapter(int id)
+struct i2c_adapter *i2c_get_adapter(int id)
{
struct i2c_adapter *adapter;
@@ -1479,7 +1479,7 @@ static u8 crc8(u16 data)
{
int i;
- for(i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
if (data & 0x8000)
data = data ^ POLY;
data = data << 1;
@@ -1492,7 +1492,7 @@ static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count)
{
int i;
- for(i = 0; i < count; i++)
+ for (i = 0; i < count; i++)
crc = crc8((crc ^ p[i]) << 8);
return crc;
}
@@ -1562,7 +1562,7 @@ EXPORT_SYMBOL(i2c_smbus_read_byte);
*/
s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value)
{
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
}
EXPORT_SYMBOL(i2c_smbus_write_byte);
@@ -1600,9 +1600,9 @@ s32 i2c_smbus_write_byte_data(struct i2c_client *client, u8 command, u8 value)
{
union i2c_smbus_data data;
data.byte = value;
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_BYTE_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_byte_data);
@@ -1639,9 +1639,9 @@ s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value)
{
union i2c_smbus_data data;
data.word = value;
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_WORD_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_WORD_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_word_data);
@@ -1718,9 +1718,9 @@ s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
length = I2C_SMBUS_BLOCK_MAX;
data.block[0] = length;
memcpy(&data.block[1], values, length);
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_BLOCK_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BLOCK_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_block_data);
@@ -1762,10 +1762,10 @@ EXPORT_SYMBOL(i2c_smbus_write_i2c_block_data);
/* Simulate a SMBus command using the i2c protocol
No checking of parameters is done! */
-static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
- unsigned short flags,
- char read_write, u8 command, int size,
- union i2c_smbus_data * data)
+static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags,
+ char read_write, u8 command, int size,
+ union i2c_smbus_data *data)
{
/* So we need to generate a series of msgs. In the case of writing, we
need to use only one message; when reading, we need two. We initialize
@@ -1773,7 +1773,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
simpler. */
unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3];
unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2];
- int num = read_write == I2C_SMBUS_READ?2:1;
+ int num = read_write == I2C_SMBUS_READ ? 2 : 1;
struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 },
{ addr, flags | I2C_M_RD, 0, msgbuf1 }
};
@@ -1782,7 +1782,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
int status;
msgbuf0[0] = command;
- switch(size) {
+ switch (size) {
case I2C_SMBUS_QUICK:
msg[0].len = 0;
/* Special case: The read/write field is used as data */
@@ -1809,7 +1809,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
if (read_write == I2C_SMBUS_READ)
msg[1].len = 2;
else {
- msg[0].len=3;
+ msg[0].len = 3;
msgbuf0[1] = data->word & 0xff;
msgbuf0[2] = data->word >> 8;
}
@@ -1902,26 +1902,26 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
}
if (read_write == I2C_SMBUS_READ)
- switch(size) {
- case I2C_SMBUS_BYTE:
- data->byte = msgbuf0[0];
- break;
- case I2C_SMBUS_BYTE_DATA:
- data->byte = msgbuf1[0];
- break;
- case I2C_SMBUS_WORD_DATA:
- case I2C_SMBUS_PROC_CALL:
- data->word = msgbuf1[0] | (msgbuf1[1] << 8);
- break;
- case I2C_SMBUS_I2C_BLOCK_DATA:
- for (i = 0; i < data->block[0]; i++)
- data->block[i+1] = msgbuf1[i];
- break;
- case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_BLOCK_PROC_CALL:
- for (i = 0; i < msgbuf1[0] + 1; i++)
- data->block[i] = msgbuf1[i];
- break;
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ data->byte = msgbuf0[0];
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = msgbuf1[0];
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_PROC_CALL:
+ data->word = msgbuf1[0] | (msgbuf1[1] << 8);
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ for (i = 0; i < data->block[0]; i++)
+ data->block[i+1] = msgbuf1[i];
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ for (i = 0; i < msgbuf1[0] + 1; i++)
+ data->block[i] = msgbuf1[i];
+ break;
}
return 0;
}
@@ -1966,7 +1966,7 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
}
rt_mutex_unlock(&adapter->bus_lock);
} else
- res = i2c_smbus_xfer_emulated(adapter,addr,flags,read_write,
+ res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write,
command, protocol, data);
return res;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index f4110aa..e0694e4 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -35,7 +35,7 @@
#include <linux/i2c.h>
#include <linux/i2c-dev.h>
#include <linux/jiffies.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static struct i2c_driver i2cdev_driver;
@@ -132,45 +132,45 @@ static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
* needed by those system calls and by this SMBus interface.
*/
-static ssize_t i2cdev_read (struct file *file, char __user *buf, size_t count,
- loff_t *offset)
+static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
+ loff_t *offset)
{
char *tmp;
int ret;
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
- tmp = kmalloc(count,GFP_KERNEL);
- if (tmp==NULL)
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (tmp == NULL)
return -ENOMEM;
pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
- ret = i2c_master_recv(client,tmp,count);
+ ret = i2c_master_recv(client, tmp, count);
if (ret >= 0)
- ret = copy_to_user(buf,tmp,count)?-EFAULT:ret;
+ ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
kfree(tmp);
return ret;
}
-static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t count,
- loff_t *offset)
+static ssize_t i2cdev_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offset)
{
int ret;
char *tmp;
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
- tmp = kmalloc(count,GFP_KERNEL);
- if (tmp==NULL)
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (tmp == NULL)
return -ENOMEM;
- if (copy_from_user(tmp,buf,count)) {
+ if (copy_from_user(tmp, buf, count)) {
kfree(tmp);
return -EFAULT;
}
@@ -178,7 +178,7 @@ static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t c
pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
- ret = i2c_master_send(client,tmp,count);
+ ret = i2c_master_send(client, tmp, count);
kfree(tmp);
return ret;
}
@@ -369,13 +369,13 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
unsigned long funcs;
dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n",
cmd, arg);
- switch ( cmd ) {
+ switch (cmd) {
case I2C_SLAVE:
case I2C_SLAVE_FORCE:
/* NOTE: devices set up to work with "new style" drivers
@@ -601,7 +601,7 @@ static void __exit i2c_dev_exit(void)
{
i2c_del_driver(&i2cdev_driver);
class_destroy(i2c_dev_class);
- unregister_chrdev(I2C_MAJOR,"i2c");
+ unregister_chrdev(I2C_MAJOR, "i2c");
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 9f4cadf..e95e8d0 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -877,7 +877,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
if (!mc_all_on) {
char *addrs;
int i;
- struct dev_mc_list *mcaddr;
+ struct netdev_hw_addr *ha;
addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
if (!addrs) {
@@ -885,9 +885,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
goto unlock;
}
i = 0;
- netdev_for_each_mc_addr(mcaddr, netdev)
- memcpy(get_addr(addrs, i++),
- mcaddr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN);
perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
pft_entries_preallocated * 0x8;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b166bb7..3871ac6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -768,11 +768,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
}
-static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
- const u8 *broadcast)
+static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
{
- if (addrlen != INFINIBAND_ALEN)
- return 0;
/* reserved QPN, prefix, scope */
if (memcmp(addr, broadcast, 6))
return 0;
@@ -787,7 +784,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, restart_task);
struct net_device *dev = priv->dev;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
unsigned long flags;
@@ -812,15 +809,13 @@ void ipoib_mcast_restart_task(struct work_struct *work)
clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
/* Mark all of the entries that are found or don't exist */
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
union ib_gid mgid;
- if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
- mclist->dmi_addrlen,
- dev->broadcast))
+ if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
continue;
- memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
+ memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
mcast = __ipoib_mcast_find(dev, &mgid);
if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 9b3353b..c1087ce 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -533,8 +533,8 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX)
return 0;
- xpad->odata = usb_buffer_alloc(xpad->udev, XPAD_PKT_LEN,
- GFP_KERNEL, &xpad->odata_dma);
+ xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
+ GFP_KERNEL, &xpad->odata_dma);
if (!xpad->odata)
goto fail1;
@@ -554,7 +554,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
return 0;
- fail2: usb_buffer_free(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
+ fail2: usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
fail1: return error;
}
@@ -568,7 +568,7 @@ static void xpad_deinit_output(struct usb_xpad *xpad)
{
if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX) {
usb_free_urb(xpad->irq_out);
- usb_buffer_free(xpad->udev, XPAD_PKT_LEN,
+ usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
xpad->odata, xpad->odata_dma);
}
}
@@ -788,8 +788,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
if (!xpad || !input_dev)
goto fail1;
- xpad->idata = usb_buffer_alloc(udev, XPAD_PKT_LEN,
- GFP_KERNEL, &xpad->idata_dma);
+ xpad->idata = usb_alloc_coherent(udev, XPAD_PKT_LEN,
+ GFP_KERNEL, &xpad->idata_dma);
if (!xpad->idata)
goto fail1;
@@ -942,7 +942,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
fail5: usb_kill_urb(xpad->irq_in);
fail4: usb_free_urb(xpad->irq_in);
fail3: xpad_deinit_output(xpad);
- fail2: usb_buffer_free(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
+ fail2: usb_free_coherent(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
fail1: input_free_device(input_dev);
kfree(xpad);
return error;
@@ -964,7 +964,7 @@ static void xpad_disconnect(struct usb_interface *intf)
usb_kill_urb(xpad->irq_in);
}
usb_free_urb(xpad->irq_in);
- usb_buffer_free(xpad->udev, XPAD_PKT_LEN,
+ usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
xpad->idata, xpad->idata_dma);
kfree(xpad);
}
diff --git a/drivers/input/misc/ati_remote.c b/drivers/input/misc/ati_remote.c
index e8bbc61..bce5712 100644
--- a/drivers/input/misc/ati_remote.c
+++ b/drivers/input/misc/ati_remote.c
@@ -624,13 +624,13 @@ static void ati_remote_irq_in(struct urb *urb)
static int ati_remote_alloc_buffers(struct usb_device *udev,
struct ati_remote *ati_remote)
{
- ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
- &ati_remote->inbuf_dma);
+ ati_remote->inbuf = usb_alloc_coherent(udev, DATA_BUFSIZE, GFP_ATOMIC,
+ &ati_remote->inbuf_dma);
if (!ati_remote->inbuf)
return -1;
- ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
- &ati_remote->outbuf_dma);
+ ati_remote->outbuf = usb_alloc_coherent(udev, DATA_BUFSIZE, GFP_ATOMIC,
+ &ati_remote->outbuf_dma);
if (!ati_remote->outbuf)
return -1;
@@ -653,10 +653,10 @@ static void ati_remote_free_buffers(struct ati_remote *ati_remote)
usb_free_urb(ati_remote->irq_urb);
usb_free_urb(ati_remote->out_urb);
- usb_buffer_free(ati_remote->udev, DATA_BUFSIZE,
+ usb_free_coherent(ati_remote->udev, DATA_BUFSIZE,
ati_remote->inbuf, ati_remote->inbuf_dma);
- usb_buffer_free(ati_remote->udev, DATA_BUFSIZE,
+ usb_free_coherent(ati_remote->udev, DATA_BUFSIZE,
ati_remote->outbuf, ati_remote->outbuf_dma);
}
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 2124b99..e148749 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -589,7 +589,7 @@ static int ati_remote2_urb_init(struct ati_remote2 *ar2)
int i, pipe, maxp;
for (i = 0; i < 2; i++) {
- ar2->buf[i] = usb_buffer_alloc(udev, 4, GFP_KERNEL, &ar2->buf_dma[i]);
+ ar2->buf[i] = usb_alloc_coherent(udev, 4, GFP_KERNEL, &ar2->buf_dma[i]);
if (!ar2->buf[i])
return -ENOMEM;
@@ -617,7 +617,7 @@ static void ati_remote2_urb_cleanup(struct ati_remote2 *ar2)
for (i = 0; i < 2; i++) {
usb_free_urb(ar2->urb[i]);
- usb_buffer_free(ar2->udev, 4, ar2->buf[i], ar2->buf_dma[i]);
+ usb_free_coherent(ar2->udev, 4, ar2->buf[i], ar2->buf_dma[i]);
}
}
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 86457fe..2b0eba6 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -102,7 +102,6 @@ struct cm109_dev {
struct cm109_ctl_packet *ctl_data;
dma_addr_t ctl_dma;
struct usb_ctrlrequest *ctl_req;
- dma_addr_t ctl_req_dma;
struct urb *urb_ctl;
/*
* The 3 bitfields below are protected by ctl_submit_lock.
@@ -629,15 +628,13 @@ static const struct usb_device_id cm109_usb_table[] = {
static void cm109_usb_cleanup(struct cm109_dev *dev)
{
- if (dev->ctl_req)
- usb_buffer_free(dev->udev, sizeof(*(dev->ctl_req)),
- dev->ctl_req, dev->ctl_req_dma);
+ kfree(dev->ctl_req);
if (dev->ctl_data)
- usb_buffer_free(dev->udev, USB_PKT_LEN,
- dev->ctl_data, dev->ctl_dma);
+ usb_free_coherent(dev->udev, USB_PKT_LEN,
+ dev->ctl_data, dev->ctl_dma);
if (dev->irq_data)
- usb_buffer_free(dev->udev, USB_PKT_LEN,
- dev->irq_data, dev->irq_dma);
+ usb_free_coherent(dev->udev, USB_PKT_LEN,
+ dev->irq_data, dev->irq_dma);
usb_free_urb(dev->urb_irq); /* parameter validation in core/urb */
usb_free_urb(dev->urb_ctl); /* parameter validation in core/urb */
@@ -686,18 +683,17 @@ static int cm109_usb_probe(struct usb_interface *intf,
goto err_out;
/* allocate usb buffers */
- dev->irq_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_KERNEL, &dev->irq_dma);
+ dev->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_KERNEL, &dev->irq_dma);
if (!dev->irq_data)
goto err_out;
- dev->ctl_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_KERNEL, &dev->ctl_dma);
+ dev->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_KERNEL, &dev->ctl_dma);
if (!dev->ctl_data)
goto err_out;
- dev->ctl_req = usb_buffer_alloc(udev, sizeof(*(dev->ctl_req)),
- GFP_KERNEL, &dev->ctl_req_dma);
+ dev->ctl_req = kmalloc(sizeof(*(dev->ctl_req)), GFP_KERNEL);
if (!dev->ctl_req)
goto err_out;
@@ -735,10 +731,8 @@ static int cm109_usb_probe(struct usb_interface *intf,
usb_fill_control_urb(dev->urb_ctl, udev, usb_sndctrlpipe(udev, 0),
(void *)dev->ctl_req, dev->ctl_data, USB_PKT_LEN,
cm109_urb_ctl_callback, dev);
- dev->urb_ctl->setup_dma = dev->ctl_req_dma;
dev->urb_ctl->transfer_dma = dev->ctl_dma;
- dev->urb_ctl->transfer_flags |= URB_NO_SETUP_DMA_MAP |
- URB_NO_TRANSFER_DMA_MAP;
+ dev->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
dev->urb_ctl->dev = udev;
/* find out the physical bus location */
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 86afdd1..a93c525 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -464,7 +464,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
remote->in_endpoint = endpoint;
remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */
- remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
+ remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
if (!remote->in_buffer) {
error = -ENOMEM;
goto fail1;
@@ -543,7 +543,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
return 0;
fail3: usb_free_urb(remote->irq_urb);
- fail2: usb_buffer_free(udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
+ fail2: usb_free_coherent(udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
fail1: kfree(remote);
input_free_device(input_dev);
@@ -564,7 +564,7 @@ static void keyspan_disconnect(struct usb_interface *interface)
input_unregister_device(remote->input);
usb_kill_urb(remote->irq_urb);
usb_free_urb(remote->irq_urb);
- usb_buffer_free(remote->udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
+ usb_free_coherent(remote->udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
kfree(remote);
}
}
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 668913d..bf170f6 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -64,7 +64,6 @@ struct powermate_device {
dma_addr_t data_dma;
struct urb *irq, *config;
struct usb_ctrlrequest *configcr;
- dma_addr_t configcr_dma;
struct usb_device *udev;
struct input_dev *input;
spinlock_t lock;
@@ -182,8 +181,6 @@ static void powermate_sync_state(struct powermate_device *pm)
usb_fill_control_urb(pm->config, pm->udev, usb_sndctrlpipe(pm->udev, 0),
(void *) pm->configcr, NULL, 0,
powermate_config_complete, pm);
- pm->config->setup_dma = pm->configcr_dma;
- pm->config->transfer_flags |= URB_NO_SETUP_DMA_MAP;
if (usb_submit_urb(pm->config, GFP_ATOMIC))
printk(KERN_ERR "powermate: usb_submit_urb(config) failed");
@@ -276,13 +273,12 @@ static int powermate_input_event(struct input_dev *dev, unsigned int type, unsig
static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
{
- pm->data = usb_buffer_alloc(udev, POWERMATE_PAYLOAD_SIZE_MAX,
- GFP_ATOMIC, &pm->data_dma);
+ pm->data = usb_alloc_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
+ GFP_ATOMIC, &pm->data_dma);
if (!pm->data)
return -1;
- pm->configcr = usb_buffer_alloc(udev, sizeof(*(pm->configcr)),
- GFP_ATOMIC, &pm->configcr_dma);
+ pm->configcr = kmalloc(sizeof(*(pm->configcr)), GFP_KERNEL);
if (!pm->configcr)
return -1;
@@ -291,10 +287,9 @@ static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_dev
static void powermate_free_buffers(struct usb_device *udev, struct powermate_device *pm)
{
- usb_buffer_free(udev, POWERMATE_PAYLOAD_SIZE_MAX,
- pm->data, pm->data_dma);
- usb_buffer_free(udev, sizeof(*(pm->configcr)),
- pm->configcr, pm->configcr_dma);
+ usb_free_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
+ pm->data, pm->data_dma);
+ kfree(pm->configcr);
}
/* Called whenever a USB device matching one in our supported devices table is connected */
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 93a22ac..41201c6 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -111,7 +111,6 @@ struct yealink_dev {
struct yld_ctl_packet *ctl_data;
dma_addr_t ctl_dma;
struct usb_ctrlrequest *ctl_req;
- dma_addr_t ctl_req_dma;
struct urb *urb_ctl;
char phys[64]; /* physical device path */
@@ -836,12 +835,9 @@ static int usb_cleanup(struct yealink_dev *yld, int err)
usb_free_urb(yld->urb_irq);
usb_free_urb(yld->urb_ctl);
- usb_buffer_free(yld->udev, sizeof(*(yld->ctl_req)),
- yld->ctl_req, yld->ctl_req_dma);
- usb_buffer_free(yld->udev, USB_PKT_LEN,
- yld->ctl_data, yld->ctl_dma);
- usb_buffer_free(yld->udev, USB_PKT_LEN,
- yld->irq_data, yld->irq_dma);
+ kfree(yld->ctl_req);
+ usb_free_coherent(yld->udev, USB_PKT_LEN, yld->ctl_data, yld->ctl_dma);
+ usb_free_coherent(yld->udev, USB_PKT_LEN, yld->irq_data, yld->irq_dma);
kfree(yld);
return err;
@@ -886,18 +882,17 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
return usb_cleanup(yld, -ENOMEM);
/* allocate usb buffers */
- yld->irq_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_ATOMIC, &yld->irq_dma);
+ yld->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_ATOMIC, &yld->irq_dma);
if (yld->irq_data == NULL)
return usb_cleanup(yld, -ENOMEM);
- yld->ctl_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_ATOMIC, &yld->ctl_dma);
+ yld->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_ATOMIC, &yld->ctl_dma);
if (!yld->ctl_data)
return usb_cleanup(yld, -ENOMEM);
- yld->ctl_req = usb_buffer_alloc(udev, sizeof(*(yld->ctl_req)),
- GFP_ATOMIC, &yld->ctl_req_dma);
+ yld->ctl_req = kmalloc(sizeof(*(yld->ctl_req)), GFP_KERNEL);
if (yld->ctl_req == NULL)
return usb_cleanup(yld, -ENOMEM);
@@ -936,10 +931,8 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_fill_control_urb(yld->urb_ctl, udev, usb_sndctrlpipe(udev, 0),
(void *)yld->ctl_req, yld->ctl_data, USB_PKT_LEN,
urb_ctl_callback, yld);
- yld->urb_ctl->setup_dma = yld->ctl_req_dma;
yld->urb_ctl->transfer_dma = yld->ctl_dma;
- yld->urb_ctl->transfer_flags |= URB_NO_SETUP_DMA_MAP |
- URB_NO_TRANSFER_DMA_MAP;
+ yld->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
yld->urb_ctl->dev = udev;
/* find out the physical bus location */
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 53ec7dd..05edd75 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -806,8 +806,8 @@ static int atp_probe(struct usb_interface *iface,
if (!dev->urb)
goto err_free_devs;
- dev->data = usb_buffer_alloc(dev->udev, dev->info->datalen, GFP_KERNEL,
- &dev->urb->transfer_dma);
+ dev->data = usb_alloc_coherent(dev->udev, dev->info->datalen, GFP_KERNEL,
+ &dev->urb->transfer_dma);
if (!dev->data)
goto err_free_urb;
@@ -862,8 +862,8 @@ static int atp_probe(struct usb_interface *iface,
return 0;
err_free_buffer:
- usb_buffer_free(dev->udev, dev->info->datalen,
- dev->data, dev->urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->info->datalen,
+ dev->data, dev->urb->transfer_dma);
err_free_urb:
usb_free_urb(dev->urb);
err_free_devs:
@@ -881,8 +881,8 @@ static void atp_disconnect(struct usb_interface *iface)
if (dev) {
usb_kill_urb(dev->urb);
input_unregister_device(dev->input);
- usb_buffer_free(dev->udev, dev->info->datalen,
- dev->data, dev->urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->info->datalen,
+ dev->data, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
kfree(dev);
}
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b89879b..6dedded 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -715,15 +715,15 @@ static int bcm5974_probe(struct usb_interface *iface,
if (!dev->tp_urb)
goto err_free_bt_urb;
- dev->bt_data = usb_buffer_alloc(dev->udev,
- dev->cfg.bt_datalen, GFP_KERNEL,
- &dev->bt_urb->transfer_dma);
+ dev->bt_data = usb_alloc_coherent(dev->udev,
+ dev->cfg.bt_datalen, GFP_KERNEL,
+ &dev->bt_urb->transfer_dma);
if (!dev->bt_data)
goto err_free_urb;
- dev->tp_data = usb_buffer_alloc(dev->udev,
- dev->cfg.tp_datalen, GFP_KERNEL,
- &dev->tp_urb->transfer_dma);
+ dev->tp_data = usb_alloc_coherent(dev->udev,
+ dev->cfg.tp_datalen, GFP_KERNEL,
+ &dev->tp_urb->transfer_dma);
if (!dev->tp_data)
goto err_free_bt_buffer;
@@ -765,10 +765,10 @@ static int bcm5974_probe(struct usb_interface *iface,
return 0;
err_free_buffer:
- usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
+ usb_free_coherent(dev->udev, dev->cfg.tp_datalen,
dev->tp_data, dev->tp_urb->transfer_dma);
err_free_bt_buffer:
- usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
+ usb_free_coherent(dev->udev, dev->cfg.bt_datalen,
dev->bt_data, dev->bt_urb->transfer_dma);
err_free_urb:
usb_free_urb(dev->tp_urb);
@@ -788,10 +788,10 @@ static void bcm5974_disconnect(struct usb_interface *iface)
usb_set_intfdata(iface, NULL);
input_unregister_device(dev->input);
- usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
- dev->tp_data, dev->tp_urb->transfer_dma);
- usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
- dev->bt_data, dev->bt_urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->cfg.tp_datalen,
+ dev->tp_data, dev->tp_urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->cfg.bt_datalen,
+ dev->bt_data, dev->bt_urb->transfer_dma);
usb_free_urb(dev->tp_urb);
usb_free_urb(dev->bt_urb);
kfree(dev);
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 7e319d6..f34f1db 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -209,4 +209,20 @@ config SERIO_ALTERA_PS2
To compile this driver as a module, choose M here: the
module will be called altera_ps2.
+config SERIO_AMS_DELTA
+ tristate "Amstrad Delta (E3) mailboard support"
+ depends on MACH_AMS_DELTA
+ default y
+ select AMS_DELTA_FIQ
+ ---help---
+ Say Y here if you have an E3 and want to use its mailboard,
+ or any standard AT keyboard connected to the mailboard port.
+
+ When used for the E3 mailboard, a non-standard key table
+ must be loaded from userspace, possibly using udev extras
+ provided keymap helper utility.
+
+ To compile this driver as a module, choose M here;
+ the module will be called ams_delta_serio.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index bf945f7..84c80bf 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_SERIO_PCIPS2) += pcips2.o
obj-$(CONFIG_SERIO_MACEPS2) += maceps2.o
obj-$(CONFIG_SERIO_LIBPS2) += libps2.o
obj-$(CONFIG_SERIO_RAW) += serio_raw.o
+obj-$(CONFIG_SERIO_AMS_DELTA) += ams_delta_serio.o
obj-$(CONFIG_SERIO_XILINX_XPS_PS2) += xilinx_ps2.o
obj-$(CONFIG_SERIO_ALTERA_PS2) += altera_ps2.o
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
new file mode 100644
index 0000000..8f1770e
--- /dev/null
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -0,0 +1,177 @@
+/*
+ * Amstrad E3 (Delta) keyboard port driver
+ *
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Thanks to Cliff Lawson for his help
+ *
+ * The Amstrad Delta keyboard (aka mailboard) uses normal PC-AT style serial
+ * transmission. The keyboard port is formed of two GPIO lines, for clock
+ * and data. Due to strict timing requirements of the interface,
+ * the serial data stream is read and processed by a FIQ handler.
+ * The resulting words are fetched by this driver from a circular buffer.
+ *
+ * Standard AT keyboard driver (atkbd) is used for handling the keyboard data.
+ * However, when used with the E3 mailboard that producecs non-standard
+ * scancodes, a custom key table must be prepared and loaded from userspace.
+ */
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include <asm/mach-types.h>
+#include <plat/board-ams-delta.h>
+
+#include <mach/ams-delta-fiq.h>
+
+MODULE_AUTHOR("Matt Callow");
+MODULE_DESCRIPTION("AMS Delta (E3) keyboard port driver");
+MODULE_LICENSE("GPL");
+
+static struct serio *ams_delta_serio;
+
+static int check_data(int data)
+{
+ int i, parity = 0;
+
+ /* check valid stop bit */
+ if (!(data & 0x400)) {
+ dev_warn(&ams_delta_serio->dev,
+ "invalid stop bit, data=0x%X\n",
+ data);
+ return SERIO_FRAME;
+ }
+ /* calculate the parity */
+ for (i = 1; i < 10; i++) {
+ if (data & (1 << i))
+ parity++;
+ }
+ /* it should be odd */
+ if (!(parity & 0x01)) {
+ dev_warn(&ams_delta_serio->dev,
+ "paritiy check failed, data=0x%X parity=0x%X\n",
+ data, parity);
+ return SERIO_PARITY;
+ }
+ return 0;
+}
+
+static irqreturn_t ams_delta_serio_interrupt(int irq, void *dev_id)
+{
+ int *circ_buff = &fiq_buffer[FIQ_CIRC_BUFF];
+ int data, dfl;
+ u8 scancode;
+
+ fiq_buffer[FIQ_IRQ_PEND] = 0;
+
+ /*
+ * Read data from the circular buffer, check it
+ * and then pass it on the serio
+ */
+ while (fiq_buffer[FIQ_KEYS_CNT] > 0) {
+
+ data = circ_buff[fiq_buffer[FIQ_HEAD_OFFSET]++];
+ fiq_buffer[FIQ_KEYS_CNT]--;
+ if (fiq_buffer[FIQ_HEAD_OFFSET] == fiq_buffer[FIQ_BUF_LEN])
+ fiq_buffer[FIQ_HEAD_OFFSET] = 0;
+
+ dfl = check_data(data);
+ scancode = (u8) (data >> 1) & 0xFF;
+ serio_interrupt(ams_delta_serio, scancode, dfl);
+ }
+ return IRQ_HANDLED;
+}
+
+static int ams_delta_serio_open(struct serio *serio)
+{
+ /* enable keyboard */
+ ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR,
+ AMD_DELTA_LATCH2_KEYBRD_PWR);
+
+ return 0;
+}
+
+static void ams_delta_serio_close(struct serio *serio)
+{
+ /* disable keyboard */
+ ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR, 0);
+}
+
+static int __init ams_delta_serio_init(void)
+{
+ int err;
+
+ if (!machine_is_ams_delta())
+ return -ENODEV;
+
+ ams_delta_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!ams_delta_serio)
+ return -ENOMEM;
+
+ ams_delta_serio->id.type = SERIO_8042;
+ ams_delta_serio->open = ams_delta_serio_open;
+ ams_delta_serio->close = ams_delta_serio_close;
+ strlcpy(ams_delta_serio->name, "AMS DELTA keyboard adapter",
+ sizeof(ams_delta_serio->name));
+ strlcpy(ams_delta_serio->phys, "GPIO/serio0",
+ sizeof(ams_delta_serio->phys));
+
+ err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_DATA, "serio-data");
+ if (err) {
+ pr_err("ams_delta_serio: Couldn't request gpio pin for data\n");
+ goto serio;
+ }
+ gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+
+ err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_CLK, "serio-clock");
+ if (err) {
+ pr_err("ams_delta_serio: couldn't request gpio pin for clock\n");
+ goto gpio_data;
+ }
+ gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+
+ err = request_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+ ams_delta_serio_interrupt, IRQ_TYPE_EDGE_RISING,
+ "ams-delta-serio", 0);
+ if (err < 0) {
+ pr_err("ams_delta_serio: couldn't request gpio interrupt %d\n",
+ gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
+ goto gpio_clk;
+ }
+ /*
+ * Since GPIO register handling for keyboard clock pin is performed
+ * at FIQ level, switch back from edge to simple interrupt handler
+ * to avoid bad interaction.
+ */
+ set_irq_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+ handle_simple_irq);
+
+ serio_register_port(ams_delta_serio);
+ dev_info(&ams_delta_serio->dev, "%s\n", ams_delta_serio->name);
+
+ return 0;
+gpio_clk:
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+gpio_data:
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+serio:
+ kfree(ams_delta_serio);
+ return err;
+}
+module_init(ams_delta_serio_init);
+
+static void __exit ams_delta_serio_exit(void)
+{
+ serio_unregister_port(ams_delta_serio);
+ free_irq(OMAP_GPIO_IRQ(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+ kfree(ams_delta_serio);
+}
+module_exit(ams_delta_serio_exit);
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index 37d0539..aea9a93 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -155,7 +155,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
goto fail1;
}
- acecad->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &acecad->data_dma);
+ acecad->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &acecad->data_dma);
if (!acecad->data) {
err= -ENOMEM;
goto fail1;
@@ -235,7 +235,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
return 0;
- fail2: usb_buffer_free(dev, 8, acecad->data, acecad->data_dma);
+ fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma);
fail1: input_free_device(input_dev);
kfree(acecad);
return err;
@@ -249,7 +249,7 @@ static void usb_acecad_disconnect(struct usb_interface *intf)
input_unregister_device(acecad->input);
usb_free_urb(acecad->irq);
- usb_buffer_free(acecad->usbdev, 8, acecad->data, acecad->data_dma);
+ usb_free_coherent(acecad->usbdev, 8, acecad->data, acecad->data_dma);
kfree(acecad);
}
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 4be039d..51b80b0 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1711,8 +1711,8 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto fail1;
}
- aiptek->data = usb_buffer_alloc(usbdev, AIPTEK_PACKET_LENGTH,
- GFP_ATOMIC, &aiptek->data_dma);
+ aiptek->data = usb_alloc_coherent(usbdev, AIPTEK_PACKET_LENGTH,
+ GFP_ATOMIC, &aiptek->data_dma);
if (!aiptek->data) {
dev_warn(&intf->dev, "cannot allocate usb buffer\n");
goto fail1;
@@ -1884,8 +1884,8 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
fail4: sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
fail3: usb_free_urb(aiptek->urb);
- fail2: usb_buffer_free(usbdev, AIPTEK_PACKET_LENGTH, aiptek->data,
- aiptek->data_dma);
+ fail2: usb_free_coherent(usbdev, AIPTEK_PACKET_LENGTH, aiptek->data,
+ aiptek->data_dma);
fail1: usb_set_intfdata(intf, NULL);
input_free_device(inputdev);
kfree(aiptek);
@@ -1909,9 +1909,9 @@ static void aiptek_disconnect(struct usb_interface *intf)
input_unregister_device(aiptek->inputdev);
sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
usb_free_urb(aiptek->urb);
- usb_buffer_free(interface_to_usbdev(intf),
- AIPTEK_PACKET_LENGTH,
- aiptek->data, aiptek->data_dma);
+ usb_free_coherent(interface_to_usbdev(intf),
+ AIPTEK_PACKET_LENGTH,
+ aiptek->data, aiptek->data_dma);
kfree(aiptek);
}
}
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 866a9ee..8ea6afe 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -850,8 +850,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
gtco->usbdev = usb_get_dev(interface_to_usbdev(usbinterface));
/* Allocate some data for incoming reports */
- gtco->buffer = usb_buffer_alloc(gtco->usbdev, REPORT_MAX_SIZE,
- GFP_KERNEL, &gtco->buf_dma);
+ gtco->buffer = usb_alloc_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ GFP_KERNEL, &gtco->buf_dma);
if (!gtco->buffer) {
err("No more memory for us buffers");
error = -ENOMEM;
@@ -982,8 +982,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
err_free_urb:
usb_free_urb(gtco->urbinfo);
err_free_buf:
- usb_buffer_free(gtco->usbdev, REPORT_MAX_SIZE,
- gtco->buffer, gtco->buf_dma);
+ usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ gtco->buffer, gtco->buf_dma);
err_free_devs:
input_free_device(input_dev);
kfree(gtco);
@@ -1005,8 +1005,8 @@ static void gtco_disconnect(struct usb_interface *interface)
input_unregister_device(gtco->inputdevice);
usb_kill_urb(gtco->urbinfo);
usb_free_urb(gtco->urbinfo);
- usb_buffer_free(gtco->usbdev, REPORT_MAX_SIZE,
- gtco->buffer, gtco->buf_dma);
+ usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ gtco->buffer, gtco->buf_dma);
kfree(gtco);
}
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index b9969f1..290f4e5 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -122,7 +122,7 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
if (!kbtab || !input_dev)
goto fail1;
- kbtab->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &kbtab->data_dma);
+ kbtab->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &kbtab->data_dma);
if (!kbtab->data)
goto fail1;
@@ -173,7 +173,7 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
return 0;
fail3: usb_free_urb(kbtab->irq);
- fail2: usb_buffer_free(dev, 8, kbtab->data, kbtab->data_dma);
+ fail2: usb_free_coherent(dev, 8, kbtab->data, kbtab->data_dma);
fail1: input_free_device(input_dev);
kfree(kbtab);
return error;
@@ -187,7 +187,7 @@ static void kbtab_disconnect(struct usb_interface *intf)
input_unregister_device(kbtab->dev);
usb_free_urb(kbtab->irq);
- usb_buffer_free(kbtab->usbdev, 8, kbtab->data, kbtab->data_dma);
+ usb_free_coherent(kbtab->usbdev, 8, kbtab->data, kbtab->data_dma);
kfree(kbtab);
}
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index d90f4e0..2dc0c07 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -465,8 +465,8 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
goto fail1;
}
- wacom_wac->data = usb_buffer_alloc(dev, WACOM_PKGLEN_MAX,
- GFP_KERNEL, &wacom->data_dma);
+ wacom_wac->data = usb_alloc_coherent(dev, WACOM_PKGLEN_MAX,
+ GFP_KERNEL, &wacom->data_dma);
if (!wacom_wac->data) {
error = -ENOMEM;
goto fail1;
@@ -536,7 +536,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
fail4: wacom_remove_shared_data(wacom_wac);
fail3: usb_free_urb(wacom->irq);
- fail2: usb_buffer_free(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
+ fail2: usb_free_coherent(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
fail1: input_free_device(input_dev);
kfree(wacom);
return error;
@@ -551,7 +551,7 @@ static void wacom_disconnect(struct usb_interface *intf)
usb_kill_urb(wacom->irq);
input_unregister_device(wacom->wacom_wac.input);
usb_free_urb(wacom->irq);
- usb_buffer_free(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
+ usb_free_coherent(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
wacom->wacom_wac.data, wacom->data_dma);
wacom_remove_shared_data(&wacom->wacom_wac);
kfree(wacom);
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 0b0ae2e..29a8bbf 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1290,8 +1290,8 @@ static void usbtouch_close(struct input_dev *input)
static void usbtouch_free_buffers(struct usb_device *udev,
struct usbtouch_usb *usbtouch)
{
- usb_buffer_free(udev, usbtouch->type->rept_size,
- usbtouch->data, usbtouch->data_dma);
+ usb_free_coherent(udev, usbtouch->type->rept_size,
+ usbtouch->data, usbtouch->data_dma);
kfree(usbtouch->buffer);
}
@@ -1335,8 +1335,8 @@ static int usbtouch_probe(struct usb_interface *intf,
if (!type->process_pkt)
type->process_pkt = usbtouch_process_pkt;
- usbtouch->data = usb_buffer_alloc(udev, type->rept_size,
- GFP_KERNEL, &usbtouch->data_dma);
+ usbtouch->data = usb_alloc_coherent(udev, type->rept_size,
+ GFP_KERNEL, &usbtouch->data_dma);
if (!usbtouch->data)
goto out_free;
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index efcf1f9..fd10d7c 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -194,7 +194,7 @@ static int isdn_x25iface_receive(struct concap_proto *cprot, struct sk_buff *skb
if ( ( (ix25_pdata_t*) (cprot->proto_data) )
-> state == WAN_CONNECTED ){
if( skb_push(skb, 1)){
- skb -> data[0]=0x00;
+ skb->data[0] = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -224,7 +224,7 @@ static int isdn_x25iface_connect_ind(struct concap_proto *cprot)
skb = dev_alloc_skb(1);
if( skb ){
- *( skb_put(skb, 1) ) = 0x01;
+ *(skb_put(skb, 1)) = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -253,7 +253,7 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *cprot)
*state_p = WAN_DISCONNECTED;
skb = dev_alloc_skb(1);
if( skb ){
- *( skb_put(skb, 1) ) = 0x02;
+ *(skb_put(skb, 1)) = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -272,9 +272,10 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
unsigned char firstbyte = skb->data[0];
enum wan_states *state = &((ix25_pdata_t*)cprot->proto_data)->state;
int ret = 0;
- IX25DEBUG( "isdn_x25iface_xmit: %s first=%x state=%d \n", MY_DEVNAME(cprot -> net_dev), firstbyte, *state );
+ IX25DEBUG("isdn_x25iface_xmit: %s first=%x state=%d\n",
+ MY_DEVNAME(cprot->net_dev), firstbyte, *state);
switch ( firstbyte ){
- case 0x00: /* dl_data request */
+ case X25_IFACE_DATA:
if( *state == WAN_CONNECTED ){
skb_pull(skb, 1);
cprot -> net_dev -> trans_start = jiffies;
@@ -285,7 +286,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
}
illegal_state_warn( *state, firstbyte );
break;
- case 0x01: /* dl_connect request */
+ case X25_IFACE_CONNECT:
if( *state == WAN_DISCONNECTED ){
*state = WAN_CONNECTING;
ret = cprot -> dops -> connect_req(cprot);
@@ -298,7 +299,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
illegal_state_warn( *state, firstbyte );
}
break;
- case 0x02: /* dl_disconnect request */
+ case X25_IFACE_DISCONNECT:
switch ( *state ){
case WAN_DISCONNECTED:
/* Should not happen. However, give upper layer a
@@ -318,7 +319,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
illegal_state_warn( *state, firstbyte );
}
break;
- case 0x03: /* changing lapb parameters requested */
+ case X25_IFACE_PARAMS:
printk(KERN_WARNING "isdn_x25iface_xmit: setting of lapb"
" options not yet supported\n");
break;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 26ac8aa..f084249 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1678,9 +1678,9 @@ int bitmap_create(mddev_t *mddev)
bitmap->mddev = mddev;
- bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
+ bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
if (bm) {
- bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
+ bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
sysfs_put(bm);
} else
bitmap->sysfs_can_clear = NULL;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cefd63d..a9fd491 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1766,7 +1766,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
kobject_del(&rdev->kobj);
goto fail;
}
- rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
+ rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, NULL, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
@@ -4189,7 +4189,7 @@ static int md_alloc(dev_t dev, char *name)
mutex_unlock(&disks_mutex);
if (!error) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
- mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
+ mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, NULL, "array_state");
}
mddev_put(mddev);
return error;
@@ -4398,7 +4398,7 @@ static int do_md_run(mddev_t * mddev)
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
- mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 441c064..cccea41 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -1109,14 +1109,14 @@ static int dvb_net_feed_stop(struct net_device *dev)
}
-static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc)
+static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr)
{
struct dvb_net_priv *priv = netdev_priv(dev);
if (priv->multi_num == DVB_NET_MULTICAST_MAX)
return -ENOMEM;
- memcpy(priv->multi_macs[priv->multi_num], mc->dmi_addr, 6);
+ memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN);
priv->multi_num++;
return 0;
@@ -1140,8 +1140,7 @@ static void wq_set_multicast_list (struct work_struct *work)
dprintk("%s: allmulti mode\n", dev->name);
priv->rx_mode = RX_MODE_ALL_MULTI;
} else if (!netdev_mc_empty(dev)) {
- int mci;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
dprintk("%s: set_mc_list, %d entries\n",
dev->name, netdev_mc_count(dev));
@@ -1149,11 +1148,8 @@ static void wq_set_multicast_list (struct work_struct *work)
priv->rx_mode = RX_MODE_MULTI;
priv->multi_num = 0;
- for (mci = 0, mc=dev->mc_list;
- mci < netdev_mc_count(dev);
- mc = mc->next, mci++) {
- dvb_set_mc_filter(dev, mc);
- }
+ netdev_for_each_mc_addr(ha, dev)
+ dvb_set_mc_filter(dev, ha->addr);
}
netif_addr_unlock_bh(dev);
diff --git a/drivers/media/dvb/dvb-usb/usb-urb.c b/drivers/media/dvb/dvb-usb/usb-urb.c
index f9702e3..86d6893 100644
--- a/drivers/media/dvb/dvb-usb/usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/usb-urb.c
@@ -96,8 +96,9 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream)
while (stream->buf_num) {
stream->buf_num--;
deb_mem("freeing buffer %d\n",stream->buf_num);
- usb_buffer_free(stream->udev, stream->buf_size,
- stream->buf_list[stream->buf_num], stream->dma_addr[stream->buf_num]);
+ usb_free_coherent(stream->udev, stream->buf_size,
+ stream->buf_list[stream->buf_num],
+ stream->dma_addr[stream->buf_num]);
}
}
@@ -116,7 +117,7 @@ static int usb_allocate_stream_buffers(struct usb_data_stream *stream, int num,
for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) {
deb_mem("allocating buffer %d\n",stream->buf_num);
if (( stream->buf_list[stream->buf_num] =
- usb_buffer_alloc(stream->udev, size, GFP_ATOMIC,
+ usb_alloc_coherent(stream->udev, size, GFP_ATOMIC,
&stream->dma_addr[stream->buf_num]) ) == NULL) {
deb_mem("not enough memory for urb-buffer allocation.\n");
usb_free_stream_buffers(stream);
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index 53baccb..fe1b803 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -1257,7 +1257,7 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec)
if(!dec->irq_urb) {
return -ENOMEM;
}
- dec->irq_buffer = usb_buffer_alloc(dec->udev,IRQ_PACKET_SIZE,
+ dec->irq_buffer = usb_alloc_coherent(dec->udev,IRQ_PACKET_SIZE,
GFP_ATOMIC, &dec->irq_dma_handle);
if(!dec->irq_buffer) {
usb_free_urb(dec->irq_urb);
@@ -1550,8 +1550,8 @@ static void ttusb_dec_exit_rc(struct ttusb_dec *dec)
usb_free_urb(dec->irq_urb);
- usb_buffer_free(dec->udev,IRQ_PACKET_SIZE,
- dec->irq_buffer, dec->irq_dma_handle);
+ usb_free_coherent(dec->udev,IRQ_PACKET_SIZE,
+ dec->irq_buffer, dec->irq_dma_handle);
if (dec->rc_input_dev) {
input_unregister_device(dec->rc_input_dev);
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 6615021..52f25aa 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -177,7 +177,7 @@ void au0828_uninit_isoc(struct au0828_dev *dev)
usb_unlink_urb(urb);
if (dev->isoc_ctl.transfer_buffer[i]) {
- usb_buffer_free(dev->usbdev,
+ usb_free_coherent(dev->usbdev,
urb->transfer_buffer_length,
dev->isoc_ctl.transfer_buffer[i],
urb->transfer_dma);
@@ -247,7 +247,7 @@ int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
}
dev->isoc_ctl.urb[i] = urb;
- dev->isoc_ctl.transfer_buffer[i] = usb_buffer_alloc(dev->usbdev,
+ dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->usbdev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!dev->isoc_ctl.transfer_buffer[i]) {
printk("unable to allocate %i bytes for transfer"
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index f5e1a23..912a4d7 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -676,11 +676,11 @@ void cx231xx_uninit_isoc(struct cx231xx *dev)
usb_unlink_urb(urb);
if (dev->video_mode.isoc_ctl.transfer_buffer[i]) {
- usb_buffer_free(dev->udev,
- urb->transfer_buffer_length,
- dev->video_mode.isoc_ctl.
- transfer_buffer[i],
- urb->transfer_dma);
+ usb_free_coherent(dev->udev,
+ urb->transfer_buffer_length,
+ dev->video_mode.isoc_ctl.
+ transfer_buffer[i],
+ urb->transfer_dma);
}
usb_free_urb(urb);
dev->video_mode.isoc_ctl.urb[i] = NULL;
@@ -767,8 +767,8 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
dev->video_mode.isoc_ctl.urb[i] = urb;
dev->video_mode.isoc_ctl.transfer_buffer[i] =
- usb_buffer_alloc(dev->udev, sb_size, GFP_KERNEL,
- &urb->transfer_dma);
+ usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL,
+ &urb->transfer_dma);
if (!dev->video_mode.isoc_ctl.transfer_buffer[i]) {
cx231xx_err("unable to allocate %i bytes for transfer"
" buffer %i%s\n",
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index d3813ed..331e1ca 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -970,7 +970,7 @@ void em28xx_uninit_isoc(struct em28xx *dev)
usb_unlink_urb(urb);
if (dev->isoc_ctl.transfer_buffer[i]) {
- usb_buffer_free(dev->udev,
+ usb_free_coherent(dev->udev,
urb->transfer_buffer_length,
dev->isoc_ctl.transfer_buffer[i],
urb->transfer_dma);
@@ -1045,7 +1045,7 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
}
dev->isoc_ctl.urb[i] = urb;
- dev->isoc_ctl.transfer_buffer[i] = usb_buffer_alloc(dev->udev,
+ dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!dev->isoc_ctl.transfer_buffer[i]) {
em28xx_err("unable to allocate %i bytes for transfer"
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 43ac4af..fce8d94 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -117,13 +117,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
- urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev,
+ urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
SD_PKT_SZ * SD_NPKT,
GFP_KERNEL,
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
- err("usb_buffer_alloc failed");
+ err("usb_alloc_coherent failed");
return -ENOMEM;
}
urb->dev = gspca_dev->dev;
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index efe6159..678675b 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -213,7 +213,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
goto error;
}
- buffer = usb_buffer_alloc(dev, buffer_len,
+ buffer = usb_alloc_coherent(dev, buffer_len,
GFP_KERNEL, &urb->transfer_dma);
if (!buffer) {
ret = -ENOMEM;
@@ -232,10 +232,10 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
return ret;
error_submit:
- usb_buffer_free(dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
error_buffer:
usb_free_urb(urb);
error:
@@ -272,10 +272,10 @@ static void gspca_input_destroy_urb(struct gspca_dev *gspca_dev)
if (urb) {
gspca_dev->int_urb = NULL;
usb_kill_urb(urb);
- usb_buffer_free(gspca_dev->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(gspca_dev->dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
usb_free_urb(urb);
}
}
@@ -605,10 +605,10 @@ static void destroy_urbs(struct gspca_dev *gspca_dev)
gspca_dev->urb[i] = NULL;
usb_kill_urb(urb);
if (urb->transfer_buffer != NULL)
- usb_buffer_free(gspca_dev->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(gspca_dev->dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
usb_free_urb(urb);
}
}
@@ -760,13 +760,13 @@ static int create_urbs(struct gspca_dev *gspca_dev,
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
- urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev,
+ urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
bsize,
GFP_KERNEL,
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
- err("usb_buffer_alloc failed");
+ err("usb_alloc_coherent failed");
return -ENOMEM;
}
urb->dev = gspca_dev->dev;
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index d2f0ee2..7cfccfd 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -92,8 +92,8 @@ static int hdpvr_free_queue(struct list_head *q)
buf = list_entry(p, struct hdpvr_buffer, buff_list);
urb = buf->urb;
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
tmp = p->next;
list_del(p);
@@ -143,8 +143,8 @@ int hdpvr_alloc_buffers(struct hdpvr_device *dev, uint count)
}
buf->urb = urb;
- mem = usb_buffer_alloc(dev->udev, dev->bulk_in_size, GFP_KERNEL,
- &urb->transfer_dma);
+ mem = usb_alloc_coherent(dev->udev, dev->bulk_in_size, GFP_KERNEL,
+ &urb->transfer_dma);
if (!mem) {
v4l2_err(&dev->v4l2_dev,
"cannot allocate usb transfer buffer\n");
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index c267e0c..256cc55 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -454,8 +454,8 @@ static int poseidon_probe(struct usb_interface *interface,
device_init_wakeup(&udev->dev, 1);
#ifdef CONFIG_PM
- pd->udev->autosuspend_disabled = 0;
pd->udev->autosuspend_delay = HZ * PM_SUSPEND_DELAY;
+ usb_enable_autosuspend(pd->udev);
if (in_hibernation(pd)) {
INIT_WORK(&pd->pm_work, hibernation_resume);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index c750fd1..d0cc012 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -478,10 +478,10 @@ static int prepare_iso_urb(struct video_data *video)
goto out;
video->urb_array[i] = urb;
- mem = usb_buffer_alloc(udev,
- ISO_PKT_SIZE * PK_PER_URB,
- GFP_KERNEL,
- &urb->transfer_dma);
+ mem = usb_alloc_coherent(udev,
+ ISO_PKT_SIZE * PK_PER_URB,
+ GFP_KERNEL,
+ &urb->transfer_dma);
urb->complete = urb_complete_iso; /* handler */
urb->dev = udev;
@@ -521,8 +521,8 @@ int alloc_bulk_urbs_generic(struct urb **urb_array, int num,
if (urb == NULL)
return i;
- mem = usb_buffer_alloc(udev, buf_size, gfp_flags,
- &urb->transfer_dma);
+ mem = usb_alloc_coherent(udev, buf_size, gfp_flags,
+ &urb->transfer_dma);
if (mem == NULL)
return i;
@@ -542,7 +542,7 @@ void free_all_urb_generic(struct urb **urb_array, int num)
for (i = 0; i < num; i++) {
urb = urb_array[i];
if (urb) {
- usb_buffer_free(urb->dev,
+ usb_free_coherent(urb->dev,
urb->transfer_buffer_length,
urb->transfer_buffer,
urb->transfer_dma);
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index f7aae22..b9dd74f 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -2493,10 +2493,10 @@ int usbvision_init_isoc(struct usb_usbvision *usbvision)
}
usbvision->sbuf[bufIdx].urb = urb;
usbvision->sbuf[bufIdx].data =
- usb_buffer_alloc(usbvision->dev,
- sb_size,
- GFP_KERNEL,
- &urb->transfer_dma);
+ usb_alloc_coherent(usbvision->dev,
+ sb_size,
+ GFP_KERNEL,
+ &urb->transfer_dma);
urb->dev = dev;
urb->context = usbvision;
urb->pipe = usb_rcvisocpipe(dev, usbvision->video_endp);
@@ -2552,10 +2552,10 @@ void usbvision_stop_isoc(struct usb_usbvision *usbvision)
for (bufIdx = 0; bufIdx < USBVISION_NUMSBUF; bufIdx++) {
usb_kill_urb(usbvision->sbuf[bufIdx].urb);
if (usbvision->sbuf[bufIdx].data){
- usb_buffer_free(usbvision->dev,
- sb_size,
- usbvision->sbuf[bufIdx].data,
- usbvision->sbuf[bufIdx].urb->transfer_dma);
+ usb_free_coherent(usbvision->dev,
+ sb_size,
+ usbvision->sbuf[bufIdx].data,
+ usbvision->sbuf[bufIdx].urb->transfer_dma);
}
usb_free_urb(usbvision->sbuf[bufIdx].urb);
usbvision->sbuf[bufIdx].urb = NULL;
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 821a996..53f3ef4 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -739,7 +739,7 @@ static void uvc_free_urb_buffers(struct uvc_streaming *stream)
for (i = 0; i < UVC_URBS; ++i) {
if (stream->urb_buffer[i]) {
- usb_buffer_free(stream->dev->udev, stream->urb_size,
+ usb_free_coherent(stream->dev->udev, stream->urb_size,
stream->urb_buffer[i], stream->urb_dma[i]);
stream->urb_buffer[i] = NULL;
}
@@ -780,7 +780,7 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
for (; npackets > 1; npackets /= 2) {
for (i = 0; i < UVC_URBS; ++i) {
stream->urb_size = psize * npackets;
- stream->urb_buffer[i] = usb_buffer_alloc(
+ stream->urb_buffer[i] = usb_alloc_coherent(
stream->dev->udev, stream->urb_size,
gfp_flags | __GFP_NOWARN, &stream->urb_dma[i]);
if (!stream->urb_buffer[i]) {
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5382b5a..a6a5701 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -5064,7 +5064,7 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
if (!timeleft) {
printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
@@ -6456,10 +6456,15 @@ out:
issue_hard_reset = 0;
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ if (retry_count == 0) {
+ if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
+ retry_count++;
+ } else
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+
mpt_free_msg_frame(ioc, mf);
/* attempt one retry for a timed out command */
- if (!retry_count) {
+ if (retry_count < 2) {
printk(MYIOC_s_INFO_FMT
"Attempting Retry Config request"
" type 0x%x, page 0x%x,"
@@ -6904,6 +6909,172 @@ mpt_halt_firmware(MPT_ADAPTER *ioc)
}
EXPORT_SYMBOL(mpt_halt_firmware);
+/**
+ * mpt_SoftResetHandler - Issues a less expensive reset
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ *
+ * Message Unit Reset - instructs the IOC to reset the Reply Post and
+ * Free FIFO's. All the Message Frames on Reply Free FIFO are discarded.
+ * All posted buffers are freed, and event notification is turned off.
+ * IOC doesnt reply to any outstanding request. This will transfer IOC
+ * to READY state.
+ **/
+int
+mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
+{
+ int rc;
+ int ii;
+ u8 cb_idx;
+ unsigned long flags;
+ u32 ioc_state;
+ unsigned long time_count;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler Entered!\n",
+ ioc->name));
+
+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ if (ioc_state == MPI_IOC_STATE_FAULT ||
+ ioc_state == MPI_IOC_STATE_RESET) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "skipping, either in FAULT or RESET state!\n", ioc->name));
+ return -1;
+ }
+
+ if (ioc->bus_type == FC) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "skipping, because the bus type is FC!\n", ioc->name));
+ return -1;
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -1;
+ }
+ ioc->ioc_reset_in_progress = 1;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ rc = -1;
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->taskmgmt_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ /* Disable reply interrupts (also blocks FreeQ) */
+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+ ioc->active = 0;
+ time_count = jiffies;
+
+ rc = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
+ }
+
+ if (rc)
+ goto out;
+
+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
+ if (ioc_state != MPI_IOC_STATE_READY)
+ goto out;
+
+ for (ii = 0; ii < 5; ii++) {
+ /* Get IOC facts! Allow 5 retries */
+ rc = GetIocFacts(ioc, sleepFlag,
+ MPT_HOSTEVENT_IOC_RECOVER);
+ if (rc == 0)
+ break;
+ if (sleepFlag == CAN_SLEEP)
+ msleep(100);
+ else
+ mdelay(100);
+ }
+ if (ii == 5)
+ goto out;
+
+ rc = PrimeIocFifos(ioc);
+ if (rc != 0)
+ goto out;
+
+ rc = SendIocInit(ioc, sleepFlag);
+ if (rc != 0)
+ goto out;
+
+ rc = SendEventNotification(ioc, 1, sleepFlag);
+ if (rc != 0)
+ goto out;
+
+ if (ioc->hard_resets < -1)
+ ioc->hard_resets++;
+
+ /*
+ * At this point, we know soft reset succeeded.
+ */
+
+ ioc->active = 1;
+ CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
+
+ out:
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ ioc->ioc_reset_in_progress = 0;
+ ioc->taskmgmt_quiesce_io = 0;
+ ioc->taskmgmt_in_progress = 0;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ if (ioc->active) { /* otherwise, hard reset coming */
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc,
+ MPT_IOC_POST_RESET);
+ }
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "SoftResetHandler: completed (%d seconds): %s\n",
+ ioc->name, jiffies_to_msecs(jiffies - time_count)/1000,
+ ((rc == 0) ? "SUCCESS" : "FAILED")));
+
+ return rc;
+}
+
+/**
+ * mpt_Soft_Hard_ResetHandler - Try less expensive reset
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ * Try for softreset first, only if it fails go for expensive
+ * HardReset.
+ **/
+int
+mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag) {
+ int ret = -1;
+
+ ret = mpt_SoftResetHandler(ioc, sleepFlag);
+ if (ret == 0)
+ return ret;
+ ret = mpt_HardResetHandler(ioc, sleepFlag);
+ return ret;
+}
+EXPORT_SYMBOL(mpt_Soft_Hard_ResetHandler);
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Reset Handling
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 9718c8f..b613eb3 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.14"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.14"
+#define MPT_LINUX_VERSION_COMMON "3.04.15"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.15"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -940,6 +940,7 @@ extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
+extern int mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
extern int mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index caa8f56..f06b291 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -128,7 +128,6 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
struct buflist *buflist, MPT_ADAPTER *ioc);
-static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
/*
* Reset Handler cleanup function
@@ -275,45 +274,6 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
return 1;
}
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/* mptctl_timeout_expired
- *
- * Expecting an interrupt, however timed out.
- *
- */
-static void
-mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
-{
- unsigned long flags;
-
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
- ioc->name, __func__));
-
- if (mpt_fwfault_debug)
- mpt_halt_firmware(ioc);
-
- spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
- if (ioc->ioc_reset_in_progress) {
- spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
- CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
- mpt_free_msg_frame(ioc, mf);
- return;
- }
- spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
-
-
- if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
- return;
-
- /* Issue a reset for this device.
- * The IOC is not responding.
- */
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
- ioc->name));
- CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- mpt_free_msg_frame(ioc, mf);
-}
static int
mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
@@ -343,12 +303,8 @@ mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
return 0;
}
-/* mptctl_bus_reset
- *
- * Bus reset code.
- *
- */
-static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
+static int
+mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
@@ -359,13 +315,6 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
unsigned long time_count;
u16 iocstatus;
- /* bus reset is only good for SCSI IO, RAID PASSTHRU */
- if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
- function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
- "TaskMgmt, not SCSI_IO!!\n", ioc->name));
- return -EPERM;
- }
mutex_lock(&ioc->taskmgmt_cmds.mutex);
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
@@ -375,15 +324,14 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = 0;
- /* Send request
- */
mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
if (mf == NULL) {
- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
- "TaskMgmt, no msg frames!!\n", ioc->name));
+ dtmprintk(ioc,
+ printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n",
+ ioc->name));
mpt_clear_taskmgmt_in_progress_flag(ioc);
retval = -ENOMEM;
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
@@ -392,10 +340,13 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
pScsiTm = (SCSITaskMgmt_t *) mf;
memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
- pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
- pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
- pScsiTm->TargetID = 0;
- pScsiTm->Bus = 0;
+ pScsiTm->TaskType = tm_type;
+ if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) &&
+ (ioc->bus_type == FC))
+ pScsiTm->MsgFlags =
+ MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
+ pScsiTm->TargetID = target_id;
+ pScsiTm->Bus = bus_id;
pScsiTm->ChainOffset = 0;
pScsiTm->Reserved = 0;
pScsiTm->Reserved1 = 0;
@@ -413,17 +364,16 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
timeout = 30;
break;
case SPI:
- default:
- timeout = 2;
+ default:
+ timeout = 10;
break;
}
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "TaskMgmt type=%d timeout=%ld\n",
- ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout));
+ dtmprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n",
+ ioc->name, tm_type, timeout));
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
- CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
time_count = jiffies;
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
@@ -432,17 +382,20 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
if (retval != 0) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ dfailprintk(ioc,
+ printk(MYIOC_s_ERR_FMT
"TaskMgmt send_handshake FAILED!"
" (ioc %p, mf %p, rc=%d) \n", ioc->name,
ioc, mf, retval));
+ mpt_free_msg_frame(ioc, mf);
mpt_clear_taskmgmt_in_progress_flag(ioc);
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
}
/* Now wait for the command to complete */
ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
+
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
@@ -452,14 +405,14 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = 0;
else
retval = -1; /* return failure */
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
retval = -1; /* return failure */
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
@@ -467,7 +420,7 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
"TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
"iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
"term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
- pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ pScsiTmReply->TargetID, tm_type,
le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo),
pScsiTmReply->ResponseCode,
@@ -485,13 +438,71 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = -1; /* return failure */
}
-
- mptctl_bus_reset_done:
+ tm_done:
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
return retval;
}
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* mptctl_timeout_expired
+ *
+ * Expecting an interrupt, however timed out.
+ *
+ */
+static void
+mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
+{
+ unsigned long flags;
+ int ret_val = -1;
+ SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf;
+ u8 function = mf->u.hdr.Function;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
+ ioc->name, __func__));
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+ mpt_free_msg_frame(ioc, mf);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+
+ if (ioc->bus_type == SAS) {
+ if (function == MPI_FUNCTION_SCSI_IO_REQUEST)
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ scsi_req->Bus, scsi_req->TargetID);
+ else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ scsi_req->Bus, 0);
+ if (!ret_val)
+ return;
+ } else {
+ if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+ (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH))
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ scsi_req->Bus, 0);
+ if (!ret_val)
+ return;
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n",
+ ioc->name));
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptctl_ioc_reset
@@ -1318,6 +1329,8 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
if (ioc->sh) {
shost_for_each_device(sdev, ioc->sh) {
vdevice = sdev->hostdata;
+ if (vdevice == NULL || vdevice->vtarget == NULL)
+ continue;
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
continue;
@@ -1439,6 +1452,8 @@ mptctl_gettargetinfo (unsigned long arg)
if (!maxWordsLeft)
continue;
vdevice = sdev->hostdata;
+ if (vdevice == NULL || vdevice->vtarget == NULL)
+ continue;
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
continue;
@@ -1967,6 +1982,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
+ if (vtarget == NULL)
+ continue;
+
if ((pScsiReq->TargetID == vtarget->id) &&
(pScsiReq->Bus == vtarget->channel) &&
(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
@@ -2991,6 +3009,14 @@ static int __init mptctl_init(void)
}
mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
+ if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) {
+ printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
+ mpt_deregister(mptctl_id);
+ misc_deregister(&mptctl_miscdev);
+ err = -EBUSY;
+ goto out_fail;
+ }
+
mpt_reset_register(mptctl_id, mptctl_ioc_reset);
mpt_event_register(mptctl_id, mptctl_event_process);
@@ -3010,12 +3036,15 @@ static void mptctl_exit(void)
printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n",
mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
+ /* De-register event handler from base module */
+ mpt_event_deregister(mptctl_id);
+
/* De-register reset handler from base module */
mpt_reset_deregister(mptctl_id);
/* De-register callback handler from base module */
+ mpt_deregister(mptctl_taskmgmt_id);
mpt_deregister(mptctl_id);
- mpt_reset_deregister(mptctl_taskmgmt_id);
mpt_device_driver_deregister(MPTCTL_DRIVER);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 33f7256..b5f03ad 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -482,6 +482,7 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
if (vtarget) {
vtarget->id = pg0->CurrentTargetID;
vtarget->channel = pg0->CurrentBus;
+ vtarget->deleted = 0;
}
}
*((struct mptfc_rport_info **)rport->dd_data) = ri;
@@ -1092,6 +1093,8 @@ mptfc_setup_reset(struct work_struct *work)
container_of(work, MPT_ADAPTER, fc_setup_reset_work);
u64 pn;
struct mptfc_rport_info *ri;
+ struct scsi_target *starget;
+ VirtTarget *vtarget;
/* reset about to happen, delete (block) all rports */
list_for_each_entry(ri, &ioc->fc_rports, list) {
@@ -1099,6 +1102,12 @@ mptfc_setup_reset(struct work_struct *work)
ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED;
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
+ starget = ri->starget;
+ if (starget) {
+ vtarget = starget->hostdata;
+ if (vtarget)
+ vtarget->deleted = 1;
+ }
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
@@ -1119,6 +1128,8 @@ mptfc_rescan_devices(struct work_struct *work)
int ii;
u64 pn;
struct mptfc_rport_info *ri;
+ struct scsi_target *starget;
+ VirtTarget *vtarget;
/* start by tagging all ports as missing */
list_for_each_entry(ri, &ioc->fc_rports, list) {
@@ -1146,6 +1157,12 @@ mptfc_rescan_devices(struct work_struct *work)
MPT_RPORT_INFO_FLAGS_MISSING);
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
+ starget = ri->starget;
+ if (starget) {
+ vtarget = starget->hostdata;
+ if (vtarget)
+ vtarget->deleted = 1;
+ }
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
@@ -1358,6 +1375,9 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
unsigned long flags;
int rc=1;
+ if (ioc->bus_type != FC)
+ return 0;
+
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
ioc->name, event));
@@ -1396,7 +1416,7 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
unsigned long flags;
rc = mptscsih_ioc_reset(ioc,reset_phase);
- if (rc == 0)
+ if ((ioc->bus_type != FC) || (!rc))
return rc;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7668712..ac000e8 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1894,7 +1894,7 @@ static struct scsi_host_template mptsas_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptsas",
.proc_info = mptscsih_proc_info,
- .name = "MPT SPI Host",
+ .name = "MPT SAS Host",
.info = mptscsih_info,
.queuecommand = mptsas_qcmd,
.target_alloc = mptsas_target_alloc,
@@ -2038,11 +2038,13 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
10 * HZ);
- if (!timeleft) {
- /* On timeout reset the board */
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ error = -ETIME;
mpt_free_msg_frame(ioc, mf);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- error = -ETIMEDOUT;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out_unlock;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_unlock;
}
@@ -2223,11 +2225,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
- if (!timeleft) {
- printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__);
- /* On timeout reset the board */
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- ret = -ETIMEDOUT;
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ mpt_free_msg_frame(ioc, mf);
+ mf = NULL;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto unmap;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto unmap;
}
mf = NULL;
@@ -2518,6 +2523,12 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
+
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
+ error = -ENODEV;
+ goto out_free_consistent;
+ }
+
if (error)
goto out_free_consistent;
@@ -2594,14 +2605,14 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
- if (error)
- goto out_free_consistent;
-
- if (!buffer->NumPhys) {
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
goto out_free_consistent;
}
+ if (error)
+ goto out_free_consistent;
+
/* save config data */
port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
port_info->phy_info = kcalloc(port_info->num_phys,
@@ -2677,7 +2688,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
- goto out;
+ goto out_free_consistent;
}
if (error)
@@ -2833,7 +2844,7 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out_free;
if (!timeleft)
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_free;
}
@@ -4098,6 +4109,7 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
cfg.pageAddr = (channel << 8) + id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
@@ -4717,7 +4729,7 @@ mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
if (issue_reset) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
}
mptsas_free_fw_event(ioc, fw_event);
}
@@ -4779,6 +4791,9 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
struct fw_event_work *fw_event;
unsigned long delay;
+ if (ioc->bus_type != SAS)
+ return 0;
+
/* events turned off due to host reset or driver unloading */
if (ioc->fw_events_off)
return 0;
@@ -5073,6 +5088,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
struct mptsas_portinfo *p, *n;
int i;
+ if (!ioc->sh) {
+ printk(MYIOC_s_INFO_FMT "IOC is in Target mode\n", ioc->name);
+ mpt_detach(pdev);
+ return;
+ }
+
mptsas_shutdown(pdev);
mptsas_del_device_components(ioc);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 953c2bf..7b249ed 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -110,7 +110,7 @@ struct fw_event_work {
MPT_ADAPTER *ioc;
u32 event;
u8 retries;
- u8 event_data[1];
+ u8 __attribute__((aligned(4))) event_data[1];
};
struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6796597..7bd4c0f 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1149,11 +1149,6 @@ mptscsih_remove(struct pci_dev *pdev)
MPT_SCSI_HOST *hd;
int sz1;
- if(!host) {
- mpt_detach(pdev);
- return;
- }
-
scsi_remove_host(host);
if((hd = shost_priv(host)) == NULL)
@@ -1711,7 +1706,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
if (issue_hard_reset) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
@@ -1728,6 +1723,7 @@ mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
case FC:
return 40;
case SAS:
+ return 30;
case SPI:
default:
return 10;
@@ -1777,7 +1773,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
ioc->name, SCpnt));
SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->scsi_done(SCpnt);
- retval = 0;
+ retval = SUCCESS;
goto out;
}
@@ -1792,6 +1788,17 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
goto out;
}
+ /* Task aborts are not supported for volumes.
+ */
+ if (vdevice->vtarget->raidVolume) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: raid volume (sc=%p)\n",
+ ioc->name, SCpnt));
+ SCpnt->result = DID_RESET << 16;
+ retval = FAILED;
+ goto out;
+ }
+
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) {
@@ -1991,7 +1998,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
/* If our attempts to reset the host failed, then return a failed
* status. The host will be taken off line by the SCSI mid-layer.
*/
- retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
if (retval < 0)
status = FAILED;
else
@@ -2344,6 +2351,8 @@ mptscsih_slave_destroy(struct scsi_device *sdev)
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdevice = sdev->hostdata;
+ if (!vdevice)
+ return;
mptscsih_search_running_cmds(hd, vdevice);
vtarget->num_luns--;
@@ -3040,7 +3049,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
if (!timeleft) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e443651..1abaa5d 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -210,6 +210,10 @@ mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
target->maxOffset = offset;
target->maxWidth = width;
+ spi_min_period(scsi_target(sdev)) = factor;
+ spi_max_offset(scsi_target(sdev)) = offset;
+ spi_max_width(scsi_target(sdev)) = width;
+
target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO;
/* Disable unused features.
@@ -558,6 +562,7 @@ static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0;
cfg.pageAddr = starget->id;
+ cfg.timeout = 60;
if (mpt_config(ioc, &cfg)) {
starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name);
@@ -1152,6 +1157,9 @@ mptspi_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+ if (ioc->bus_type != SPI)
+ return 0;
+
if (hd && event == MPI_EVENT_INTEGRATED_RAID) {
int reason
= (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
@@ -1283,6 +1291,8 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
int rc;
rc = mptscsih_ioc_reset(ioc, reset_phase);
+ if ((ioc->bus_type != SPI) || (!rc))
+ return rc;
/* only try to do a renegotiation if we're properly set up
* if we get an ioc fault on bringup, ioc->sh will be NULL */
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index ed090e7..19fc7c1 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -707,7 +707,7 @@ static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
return nread;
}
-static ssize_t c2port_read_flash_data(struct kobject *kobj,
+static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
@@ -824,7 +824,7 @@ static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
return nwrite;
}
-static ssize_t c2port_write_flash_data(struct kobject *kobj,
+static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 9197cfc..a513f0a 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -140,7 +140,8 @@ static const struct attribute_group ds1682_group = {
/*
* User data attribute
*/
-static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -163,7 +164,8 @@ static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *at
return count;
}
-static ssize_t ds1682_eeprom_write(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index db7d0f2..f7ca3a4 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -54,7 +54,7 @@
struct at24_data {
struct at24_platform_data chip;
struct memory_accessor macc;
- bool use_smbus;
+ int use_smbus;
/*
* Lock protects against activities from other Linux tasks,
@@ -184,11 +184,19 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
if (count > io_limit)
count = io_limit;
- if (at24->use_smbus) {
+ switch (at24->use_smbus) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
/* Smaller eeproms can work given some SMBus extension calls */
if (count > I2C_SMBUS_BLOCK_MAX)
count = I2C_SMBUS_BLOCK_MAX;
- } else {
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ count = 2;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ count = 1;
+ break;
+ default:
/*
* When we have a better choice than SMBus calls, use a
* combined I2C message. Write address; then read up to
@@ -219,10 +227,27 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
timeout = jiffies + msecs_to_jiffies(write_timeout);
do {
read_time = jiffies;
- if (at24->use_smbus) {
+ switch (at24->use_smbus) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
status = i2c_smbus_read_i2c_block_data(client, offset,
count, buf);
- } else {
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ status = i2c_smbus_read_word_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status & 0xff;
+ buf[1] = status >> 8;
+ status = count;
+ }
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ status = i2c_smbus_read_byte_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status;
+ status = count;
+ }
+ break;
+ default:
status = i2c_transfer(client->adapter, msg, 2);
if (status == 2)
status = count;
@@ -274,7 +299,8 @@ static ssize_t at24_read(struct at24_data *at24,
return retval;
}
-static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct at24_data *at24;
@@ -395,7 +421,8 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
return retval;
}
-static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct at24_data *at24;
@@ -434,7 +461,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct at24_platform_data chip;
bool writable;
- bool use_smbus = false;
+ int use_smbus = 0;
struct at24_data *at24;
int err;
unsigned i, num_addresses;
@@ -475,12 +502,19 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
err = -EPFNOSUPPORT;
goto err_out;
}
- if (!i2c_check_functionality(client->adapter,
+ if (i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ use_smbus = I2C_SMBUS_I2C_BLOCK_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ use_smbus = I2C_SMBUS_WORD_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+ use_smbus = I2C_SMBUS_BYTE_DATA;
+ } else {
err = -EPFNOSUPPORT;
goto err_out;
}
- use_smbus = true;
}
if (chip.flags & AT24_FLAG_TAKE8ADDR)
@@ -566,11 +600,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_info(&client->dev, "%zu byte %s EEPROM %s\n",
at24->bin.size, client->name,
writable ? "(writable)" : "(read-only)");
+ if (use_smbus == I2C_SMBUS_WORD_DATA ||
+ use_smbus == I2C_SMBUS_BYTE_DATA) {
+ dev_notice(&client->dev, "Falling back to %s reads, "
+ "performance will suffer\n", use_smbus ==
+ I2C_SMBUS_WORD_DATA ? "word" : "byte");
+ }
dev_dbg(&client->dev,
- "page_size %d, num_addresses %d, write_max %d%s\n",
+ "page_size %d, num_addresses %d, write_max %d, use_smbus %d\n",
chip.page_size, num_addresses,
- at24->write_max,
- use_smbus ? ", use_smbus" : "");
+ at24->write_max, use_smbus);
/* export data to kernel code */
if (chip.setup)
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index d194212..c627e41 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -126,7 +126,8 @@ at25_ee_read(
}
static ssize_t
-at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+at25_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev;
@@ -253,7 +254,8 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
}
static ssize_t
-at25_bin_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+at25_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev;
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index e306a8c..45060dd 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -81,7 +81,8 @@ exit:
mutex_unlock(&data->update_lock);
}
-static ssize_t eeprom_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index fe29092..5653a3c 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -107,7 +107,7 @@ exit_up:
mutex_unlock(&data->update_lock);
}
-static ssize_t max6875_read(struct kobject *kobj,
+static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index ecf90f5..f8210bf 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -304,6 +304,19 @@ config SSFDC
This enables read only access to SmartMedia formatted NAND
flash. You can mount it with FAT file system.
+
+config SM_FTL
+ tristate "SmartMedia/xD new translation layer"
+ depends on EXPERIMENTAL && BLOCK
+ select MTD_BLKDEVS
+ select MTD_NAND_ECC
+ help
+ This enables new and very EXPERMENTAL support for SmartMedia/xD
+ FTL (Flash translation layer).
+ Write support isn't yet well tested, therefore this code IS likely to
+ eat your card, so please don't use it together with valuable data.
+ Use readonly driver (CONFIG_SSFDC) instead.
+
config MTD_OOPS
tristate "Log panic/oops to an MTD buffer"
depends on MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 4521b1e..760abc5 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_NFTL) += nftl.o
obj-$(CONFIG_INFTL) += inftl.o
obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
obj-$(CONFIG_SSFDC) += ssfdc.o
+obj-$(CONFIG_SM_FTL) += sm_ftl.o
obj-$(CONFIG_MTD_OOPS) += mtdoops.o
nftl-objs := nftlcore.o nftlmount.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5fbf29e..62f3ea9 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -615,10 +615,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
return mtd;
setup_err:
- if(mtd) {
- kfree(mtd->eraseregions);
- kfree(mtd);
- }
+ kfree(mtd->eraseregions);
+ kfree(mtd);
kfree(cfi->cmdset_priv);
return NULL;
}
@@ -727,8 +725,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
- spin_lock_init(&chip->_spinlock);
- chip->mutex = &chip->_spinlock;
+ mutex_init(&chip->mutex);
chip++;
}
}
@@ -774,9 +771,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
break;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
return -EAGAIN;
}
@@ -823,9 +820,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@@ -852,10 +849,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@@ -901,20 +898,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
- ret = spin_trylock(contender->mutex);
+ ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, contender->start, mode);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@@ -923,10 +920,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* in FL_SYNCING state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender, contender->start);
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
}
/* Check if we already have suspended erase
@@ -936,10 +933,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto retry;
}
@@ -969,12 +966,12 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
if (shared->writing && shared->writing != chip) {
/* give back ownership to who we loaned it from */
struct flchip *loaner = shared->writing;
- spin_lock(loaner->mutex);
+ mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
put_chip(map, loaner, loaner->start);
- spin_lock(chip->mutex);
- spin_unlock(loaner->mutex);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@@ -1144,7 +1141,7 @@ static int __xipram xip_wait_for_operation(
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@@ -1154,15 +1151,15 @@ static int __xipram xip_wait_for_operation(
* a suspended erase state. If so let's wait
* until it's done.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != newstate) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@@ -1218,10 +1215,10 @@ static int inval_cache_and_wait_for_operation(
int chip_state = chip->state;
unsigned int timeo, sleep_time, reset_timeo;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
timeo = chip_op_time_max;
if (!timeo)
@@ -1241,7 +1238,7 @@ static int inval_cache_and_wait_for_operation(
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@@ -1256,17 +1253,17 @@ static int inval_cache_and_wait_for_operation(
cond_resched();
timeo--;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (chip->erase_suspended && chip_state == FL_ERASING) {
/* Erase suspend occured while sleep: reset timeout */
@@ -1302,7 +1299,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_POINT);
@@ -1313,7 +1310,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
chip->state = FL_POINT;
chip->ref_point_counter++;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1398,7 +1395,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
else
thislen = len;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
@@ -1407,7 +1404,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
put_chip(map, chip, chip->start);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@@ -1426,10 +1423,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1443,7 +1440,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
put_chip(map, chip, cmd_addr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -1506,10 +1503,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
return -EINVAL;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1555,7 +1552,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1664,10 +1661,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1798,7 +1795,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, cmd_adr);
out: put_chip(map, chip, cmd_adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1877,10 +1874,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
adr += chip->start;
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1936,7 +1933,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
goto retry;
} else {
printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
@@ -1948,7 +1945,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1981,7 +1978,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SYNCING);
if (!ret) {
@@ -1992,7 +1989,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -2000,14 +1997,14 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -2053,10 +2050,10 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2090,7 +2087,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2155,10 +2152,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2177,7 +2174,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
put_chip(map, chip, chip->start);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -2452,7 +2449,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch (chip->state) {
case FL_READY:
@@ -2484,7 +2481,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
case FL_PM_SUSPENDED:
break;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -2493,7 +2490,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@@ -2503,7 +2500,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -2544,7 +2541,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@@ -2553,7 +2550,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
if ((mtd->flags & MTD_POWERUP_LOCK)
@@ -2573,14 +2570,14 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
/* force the completion of any ongoing operation
and switch to array mode so any bootloader in
flash is accessible for soft reboot. */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
return 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index f3600e8..d81079e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/reboot.h>
#include <linux/mtd/compatmac.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
@@ -43,10 +44,6 @@
#define MAX_WORD_RETRIES 3
-#define MANUFACTURER_AMD 0x0001
-#define MANUFACTURER_ATMEL 0x001F
-#define MANUFACTURER_MACRONIX 0x00C2
-#define MANUFACTURER_SST 0x00BF
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
@@ -60,6 +57,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
+static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static void cfi_amdstd_destroy(struct mtd_info *);
@@ -168,7 +166,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
* This reduces the risk of false detection due to
* the 8-bit device ID.
*/
- (cfi->mfr == MANUFACTURER_MACRONIX)) {
+ (cfi->mfr == CFI_MFR_MACRONIX)) {
DEBUG(MTD_DEBUG_LEVEL1,
"%s: Macronix MX29LV400C with bottom boot block"
" detected\n", map->name);
@@ -260,6 +258,42 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
mtd->flags |= MTD_POWERUP_LOCK;
}
+static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /*
+ * These flashes report two seperate eraseblock regions based on the
+ * sector_erase-size and block_erase-size, although they both operate on the
+ * same memory. This is not allowed according to CFI, so we just pick the
+ * sector_erase-size.
+ */
+ cfi->cfiq->NumEraseRegions = 1;
+}
+
+static void fixup_sst39vf(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_old_sst_eraseregion(mtd);
+
+ cfi->addr_unlock1 = 0x5555;
+ cfi->addr_unlock2 = 0x2AAA;
+}
+
+static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_old_sst_eraseregion(mtd);
+
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2AA;
+}
+
static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
{
struct map_info *map = mtd->priv;
@@ -282,11 +316,24 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
}
}
+/* Used to fix CFI-Tables of chips without Extended Query Tables */
+static struct cfi_fixup cfi_nopri_fixup_table[] = {
+ { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602
+ { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601
+ { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202
+ { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201
+ { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B
+ { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B
+ { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B
+ { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B
+ { 0, 0, NULL, NULL }
+};
+
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
- { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
+ { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
#endif
{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
@@ -304,9 +351,9 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
- { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
{ 0, 0, NULL, NULL }
};
@@ -355,67 +402,72 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->name = map->name;
mtd->writesize = 1;
+ mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
+
if (cfi->cfi_mode==CFI_MODE_CFI){
unsigned char bootloc;
- /*
- * It's a real CFI chip, not one for which the probe
- * routine faked a CFI structure. So we read the feature
- * table from it.
- */
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_amdstd *extp;
extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
- if (!extp) {
- kfree(mtd);
- return NULL;
- }
-
- cfi_fixup_major_minor(cfi, extp);
-
- if (extp->MajorVersion != '1' ||
- (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
- printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
- "version %c.%c.\n", extp->MajorVersion,
- extp->MinorVersion);
- kfree(extp);
- kfree(mtd);
- return NULL;
- }
+ if (extp) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure.
+ */
+ cfi_fixup_major_minor(cfi, extp);
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+ printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ kfree(mtd);
+ return NULL;
+ }
- /* Install our own private info structure */
- cfi->cmdset_priv = extp;
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
- /* Apply cfi device specific fixups */
- cfi_fixup(mtd, cfi_fixup_table);
+ /* Apply cfi device specific fixups */
+ cfi_fixup(mtd, cfi_fixup_table);
#ifdef DEBUG_CFI_FEATURES
- /* Tell the user about it in lots of lovely detail */
- cfi_tell_features(extp);
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
#endif
- bootloc = extp->TopBottom;
- if ((bootloc != 2) && (bootloc != 3)) {
- printk(KERN_WARNING "%s: CFI does not contain boot "
- "bank location. Assuming top.\n", map->name);
- bootloc = 2;
- }
+ bootloc = extp->TopBottom;
+ if ((bootloc < 2) || (bootloc > 5)) {
+ printk(KERN_WARNING "%s: CFI contains unrecognised boot "
+ "bank location (%d). Assuming bottom.\n",
+ map->name, bootloc);
+ bootloc = 2;
+ }
- if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
- printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
+ if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
+ printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
- for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
- int j = (cfi->cfiq->NumEraseRegions-1)-i;
- __u32 swap;
+ for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
+ int j = (cfi->cfiq->NumEraseRegions-1)-i;
+ __u32 swap;
- swap = cfi->cfiq->EraseRegionInfo[i];
- cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
- cfi->cfiq->EraseRegionInfo[j] = swap;
+ swap = cfi->cfiq->EraseRegionInfo[i];
+ cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
+ cfi->cfiq->EraseRegionInfo[j] = swap;
+ }
}
+ /* Set the default CFI lock/unlock addresses */
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+ }
+ cfi_fixup(mtd, cfi_nopri_fixup_table);
+
+ if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
+ kfree(mtd);
+ return NULL;
}
- /* Set the default CFI lock/unlock addresses */
- cfi->addr_unlock1 = 0x555;
- cfi->addr_unlock2 = 0x2aa;
} /* CFI mode */
else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
@@ -437,7 +489,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
return cfi_amdstd_setup(mtd);
}
+struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
+struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
{
@@ -491,13 +547,12 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
#endif
__module_get(THIS_MODULE);
+ register_reboot_notifier(&mtd->reboot_notifier);
return mtd;
setup_err:
- if(mtd) {
- kfree(mtd->eraseregions);
- kfree(mtd);
- }
+ kfree(mtd->eraseregions);
+ kfree(mtd);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
return NULL;
@@ -571,9 +626,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
goto retry;
}
@@ -617,9 +672,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@@ -634,6 +689,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
chip->state = FL_READY;
return 0;
+ case FL_SHUTDOWN:
+ /* The machine is rebooting */
+ return -EIO;
+
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
@@ -643,10 +702,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto resettime;
}
}
@@ -778,7 +837,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@@ -788,15 +847,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
* a suspended erase state. If so let's wait
* until it's done.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != FL_XIP_WHILE_ERASING) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@@ -858,17 +917,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
#define UDELAY(map, chip, adr, usec) \
do { \
- spin_unlock(chip->mutex); \
+ mutex_unlock(&chip->mutex); \
cfi_udelay(usec); \
- spin_lock(chip->mutex); \
+ mutex_lock(&chip->mutex); \
} while (0)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
do { \
- spin_unlock(chip->mutex); \
+ mutex_unlock(&chip->mutex); \
INVALIDATE_CACHED_RANGE(map, adr, len); \
cfi_udelay(usec); \
- spin_lock(chip->mutex); \
+ mutex_lock(&chip->mutex); \
} while (0)
#endif
@@ -884,10 +943,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -900,7 +959,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
put_chip(map, chip, cmd_addr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -954,7 +1013,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
struct cfi_private *cfi = map->fldrv_priv;
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state != FL_READY){
#if 0
@@ -963,7 +1022,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
@@ -992,7 +1051,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
wake_up(&chip->wq);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -1061,10 +1120,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1107,11 +1166,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -1143,7 +1202,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1175,7 +1234,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
map_word tmp_buf;
retry:
- spin_lock(cfi->chips[chipnum].mutex);
+ mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
#if 0
@@ -1184,7 +1243,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1198,7 +1257,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
/* Load 'tmp_buf' with old contents of flash */
tmp_buf = map_read(map, bus_ofs+chipstart);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
/* Number of bytes to copy from buffer */
n = min_t(int, len, map_bankwidth(map)-i);
@@ -1253,7 +1312,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
map_word tmp_buf;
retry1:
- spin_lock(cfi->chips[chipnum].mutex);
+ mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
#if 0
@@ -1262,7 +1321,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1275,7 +1334,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
tmp_buf = map_read(map, ofs + chipstart);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
@@ -1310,10 +1369,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
adr += chip->start;
cmd_adr = adr;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1368,11 +1427,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -1400,7 +1459,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1500,10 +1559,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
adr = cfi->addr_unlock1;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1536,10 +1595,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
@@ -1573,7 +1632,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_READY;
xip_enable(map, chip, adr);
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1588,10 +1647,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1624,10 +1683,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
@@ -1663,7 +1722,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1715,7 +1774,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
if (ret)
goto out_unlock;
@@ -1741,7 +1800,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
ret = 0;
out_unlock:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1751,7 +1810,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
if (ret)
goto out_unlock;
@@ -1769,7 +1828,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
ret = 0;
out_unlock:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1797,7 +1856,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1811,7 +1870,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
case FL_SYNCING:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
break;
default:
@@ -1819,7 +1878,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
@@ -1834,13 +1893,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1856,7 +1915,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1876,7 +1935,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -1885,13 +1944,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1910,7 +1969,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = FL_READY;
@@ -1920,15 +1979,62 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
else
printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
+
+/*
+ * Ensure that the flash device is put back into read array mode before
+ * unloading the driver or rebooting. On some systems, rebooting while
+ * the flash is in query/program/erase mode will prevent the CPU from
+ * fetching the bootloader code, requiring a hard reset or power cycle.
+ */
+static int cfi_amdstd_reset(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i, ret;
+ struct flchip *chip;
+
+ for (i = 0; i < cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
+ if (!ret) {
+ map_write(map, CMD(0xF0), chip->start);
+ chip->state = FL_SHUTDOWN;
+ put_chip(map, chip, chip->start);
+ }
+
+ mutex_unlock(&chip->mutex);
+ }
+
+ return 0;
+}
+
+
+static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ cfi_amdstd_reset(mtd);
+ return NOTIFY_DONE;
+}
+
+
static void cfi_amdstd_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
+ cfi_amdstd_reset(mtd);
+ unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
kfree(cfi);
@@ -1938,3 +2044,5 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
+MODULE_ALIAS("cfi_cmdset_0006");
+MODULE_ALIAS("cfi_cmdset_0701");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 0667a67..e54e8c1 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* If it's in FL_ERASING state, suspend it and make it talk now.
@@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* make sure we're in 'read status' mode */
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready after erase "
"suspended: status = 0x%lx\n", status.x[0]);
return -EIO;
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
suspended = 1;
@@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
#ifdef DEBUG_CFI_FEATURES
printk("%s: chip->state[%d]\n", __func__, chip->state);
#endif
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* Later, we can actually think about interrupting it
@@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
break;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
status.x[0], map_read(map, cmd_adr).x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
if (map_word_andequal(map, status, status_OK, status_OK))
break;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (++z > 100) {
/* Argh. Not ready for write to buffer */
DISABLE_VPP(map);
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
return -EIO;
}
@@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(chip->buffer_write_time);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
timeo = jiffies + (HZ/2);
z = 0;
@@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
/* Someone's suspended the write. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
z++;
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (!z) {
chip->buffer_write_time--;
@@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
/* put back into read status register mode */
map_write(map, CMD(0x70), adr);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -766,13 +766,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -781,7 +781,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -797,9 +797,9 @@ retry:
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -810,11 +810,11 @@ retry:
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ*20); /* FIXME */
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -828,14 +828,14 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
DISABLE_VPP(map);
@@ -878,7 +878,7 @@ retry:
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
timeo = jiffies + HZ;
chip->state = FL_STATUS;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
goto retry;
}
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
@@ -887,7 +887,7 @@ retry:
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
case FL_SYNCING:
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
break;
default:
@@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
@@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -1071,13 +1071,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -1086,7 +1086,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -1098,9 +1098,9 @@ retry:
map_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -1118,21 +1118,21 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -1220,13 +1220,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -1235,7 +1235,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -1247,9 +1247,9 @@ retry:
map_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -1267,21 +1267,21 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the unlock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index e63e674..b2acd32f 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -158,6 +158,7 @@ static int __xipram cfi_chip_setup(struct map_info *map,
__u32 base = 0;
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
int i;
+ int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
xip_enable(base, map, cfi);
#ifdef DEBUG_CFI
@@ -181,29 +182,6 @@ static int __xipram cfi_chip_setup(struct map_info *map,
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
- /* Note we put the device back into Read Mode BEFORE going into Auto
- * Select Mode, as some devices support nesting of modes, others
- * don't. This way should always work.
- * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
- * so should be treated as nops or illegal (and so put the device
- * back into Read Mode, which is a nop in this case).
- */
- cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi->mfr = cfi_read_query16(map, base);
- cfi->id = cfi_read_query16(map, base + ofs_factor);
-
- /* Get AMD/Spansion extended JEDEC ID */
- if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
- cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
- cfi_read_query(map, base + 0xf * ofs_factor);
-
- /* Put it back into Read Mode */
- cfi_qry_mode_off(base, map, cfi);
- xip_allowed(base, map);
-
/* Do any necessary byteswapping */
cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
@@ -228,6 +206,35 @@ static int __xipram cfi_chip_setup(struct map_info *map,
#endif
}
+ if (cfi->cfiq->P_ID == P_ID_SST_OLD) {
+ addr_unlock1 = 0x5555;
+ addr_unlock2 = 0x2AAA;
+ }
+
+ /*
+ * Note we put the device back into Read Mode BEFORE going into Auto
+ * Select Mode, as some devices support nesting of modes, others
+ * don't. This way should always work.
+ * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
+ * so should be treated as nops or illegal (and so put the device
+ * back into Read Mode, which is a nop in this case).
+ */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query16(map, base);
+ cfi->id = cfi_read_query16(map, base + ofs_factor);
+
+ /* Get AMD/Spansion extended JEDEC ID */
+ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
+ cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
+ cfi_read_query(map, base + 0xf * ofs_factor);
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
map->name, cfi->interleave, cfi->device_type*8, base,
map->bankwidth*8);
@@ -269,6 +276,9 @@ static char *vendorname(__u16 vendor)
case P_ID_SST_PAGE:
return "SST Page Write";
+ case P_ID_SST_OLD:
+ return "SST 39VF160x/39VF320x";
+
case P_ID_INTEL_PERFORMANCE:
return "Intel Performance Code";
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index ca584d0..d7c2c67 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -104,10 +104,11 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
int i;
struct cfi_extquery *extp = NULL;
- printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
if (!adr)
goto out;
+ printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
+
extp = kmalloc(size, GFP_KERNEL);
if (!extp) {
printk(KERN_ERR "Failed to allocate memory\n");
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 57e0e4e..d180649 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
* to flash memory - that means that we don't have to check status
* and timeout.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -72,7 +72,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
/* Done and happy. */
chip->state = chip->oldstate;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index e2dc964..3b9a284 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -155,8 +155,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
pchip->start = (i << cfi.chipshift);
pchip->state = FL_READY;
init_waitqueue_head(&pchip->wq);
- spin_lock_init(&pchip->_spinlock);
- pchip->mutex = &pchip->_spinlock;
+ mutex_init(&pchip->mutex);
}
}
@@ -242,17 +241,19 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
/* We need these for the !CONFIG_MODULES case,
because symbol_get() doesn't work there */
#ifdef CONFIG_MTD_CFI_INTELEXT
- case 0x0001:
- case 0x0003:
- case 0x0200:
+ case P_ID_INTEL_EXT:
+ case P_ID_INTEL_STD:
+ case P_ID_INTEL_PERFORMANCE:
return cfi_cmdset_0001(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_AMDSTD
- case 0x0002:
+ case P_ID_AMD_STD:
+ case P_ID_SST_OLD:
+ case P_ID_WINBOND:
return cfi_cmdset_0002(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_STAA
- case 0x0020:
+ case P_ID_ST_ADV:
return cfi_cmdset_0020(map, primary);
#endif
default:
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 8db1148..d72a5fb 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -22,24 +22,6 @@
#include <linux/mtd/cfi.h>
#include <linux/mtd/gen_probe.h>
-/* Manufacturers */
-#define MANUFACTURER_AMD 0x0001
-#define MANUFACTURER_ATMEL 0x001f
-#define MANUFACTURER_EON 0x001c
-#define MANUFACTURER_FUJITSU 0x0004
-#define MANUFACTURER_HYUNDAI 0x00AD
-#define MANUFACTURER_INTEL 0x0089
-#define MANUFACTURER_MACRONIX 0x00C2
-#define MANUFACTURER_NEC 0x0010
-#define MANUFACTURER_PMC 0x009D
-#define MANUFACTURER_SHARP 0x00b0
-#define MANUFACTURER_SST 0x00BF
-#define MANUFACTURER_ST 0x0020
-#define MANUFACTURER_TOSHIBA 0x0098
-#define MANUFACTURER_WINBOND 0x00da
-#define CONTINUATION_CODE 0x007f
-
-
/* AMD */
#define AM29DL800BB 0x22CB
#define AM29DL800BT 0x224A
@@ -166,6 +148,8 @@
#define SST39LF160 0x2782
#define SST39VF1601 0x234b
#define SST39VF3201 0x235b
+#define SST39WF1601 0x274b
+#define SST39WF1602 0x274a
#define SST39LF512 0x00D4
#define SST39LF010 0x00D5
#define SST39LF020 0x00D6
@@ -309,7 +293,7 @@ struct amd_flash_info {
*/
static const struct amd_flash_info jedec_table[] = {
{
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F032B,
.name = "AMD AM29F032B",
.uaddr = MTD_UADDR_0x0555_0x02AA,
@@ -321,7 +305,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,64)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV160DT,
.name = "AMD AM29LV160DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -336,7 +320,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV160DB,
.name = "AMD AM29LV160DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -351,7 +335,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV400BB,
.name = "AMD AM29LV400BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -366,7 +350,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV400BT,
.name = "AMD AM29LV400BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -381,7 +365,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV800BB,
.name = "AMD AM29LV800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -397,7 +381,7 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
/* add DL */
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29DL800BB,
.name = "AMD AM29DL800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -414,7 +398,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,14)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29DL800BT,
.name = "AMD AM29DL800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -431,7 +415,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F800BB,
.name = "AMD AM29F800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -446,7 +430,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV800BT,
.name = "AMD AM29LV800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -461,7 +445,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F800BT,
.name = "AMD AM29F800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -476,7 +460,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F017D,
.name = "AMD AM29F017D",
.devtypes = CFI_DEVICETYPE_X8,
@@ -488,7 +472,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F016D,
.name = "AMD AM29F016D",
.devtypes = CFI_DEVICETYPE_X8,
@@ -500,7 +484,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F080,
.name = "AMD AM29F080",
.devtypes = CFI_DEVICETYPE_X8,
@@ -512,7 +496,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F040,
.name = "AMD AM29F040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -524,7 +508,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV040B,
.name = "AMD AM29LV040B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -536,7 +520,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F002T,
.name = "AMD AM29F002T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -551,7 +535,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29SL800DT,
.name = "AMD AM29SL800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -566,7 +550,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29SL800DB,
.name = "AMD AM29SL800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -581,7 +565,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV512,
.name = "Atmel AT49BV512",
.devtypes = CFI_DEVICETYPE_X8,
@@ -593,7 +577,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,1)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT29LV512,
.name = "Atmel AT29LV512",
.devtypes = CFI_DEVICETYPE_X8,
@@ -606,7 +590,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x80,256)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV16X,
.name = "Atmel AT49BV16X",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -619,7 +603,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV16XT,
.name = "Atmel AT49BV16XT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -632,7 +616,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV32X,
.name = "Atmel AT49BV32X",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -645,7 +629,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,63)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV32XT,
.name = "Atmel AT49BV32XT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -658,7 +642,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_EON,
+ .mfr_id = CFI_MFR_EON,
.dev_id = EN29SL800BT,
.name = "Eon EN29SL800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -673,7 +657,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_EON,
+ .mfr_id = CFI_MFR_EON,
.dev_id = EN29SL800BB,
.name = "Eon EN29SL800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -688,7 +672,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29F040C,
.name = "Fujitsu MBM29F040C",
.devtypes = CFI_DEVICETYPE_X8,
@@ -700,7 +684,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29F800BA,
.name = "Fujitsu MBM29F800BA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -715,7 +699,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV650UE,
.name = "Fujitsu MBM29LV650UE",
.devtypes = CFI_DEVICETYPE_X8,
@@ -727,7 +711,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,128)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV320TE,
.name = "Fujitsu MBM29LV320TE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -740,7 +724,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV320BE,
.name = "Fujitsu MBM29LV320BE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -753,7 +737,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,63)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV160TE,
.name = "Fujitsu MBM29LV160TE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -768,7 +752,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV160BE,
.name = "Fujitsu MBM29LV160BE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -783,7 +767,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV800BA,
.name = "Fujitsu MBM29LV800BA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -798,7 +782,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV800TA,
.name = "Fujitsu MBM29LV800TA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -813,7 +797,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV400BC,
.name = "Fujitsu MBM29LV400BC",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -828,7 +812,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV400TC,
.name = "Fujitsu MBM29LV400TC",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -843,7 +827,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_HYUNDAI,
+ .mfr_id = CFI_MFR_HYUNDAI,
.dev_id = HY29F002T,
.name = "Hyundai HY29F002T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -858,7 +842,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F004B3B,
.name = "Intel 28F004B3B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -871,7 +855,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 7),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F004B3T,
.name = "Intel 28F004B3T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -884,7 +868,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F400B3B,
.name = "Intel 28F400B3B",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -897,7 +881,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 7),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F400B3T,
.name = "Intel 28F400B3T",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -910,7 +894,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008B3B,
.name = "Intel 28F008B3B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -923,7 +907,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 15),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008B3T,
.name = "Intel 28F008B3T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -936,7 +920,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008S5,
.name = "Intel 28F008S5",
.devtypes = CFI_DEVICETYPE_X8,
@@ -948,7 +932,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016S5,
.name = "Intel 28F016S5",
.devtypes = CFI_DEVICETYPE_X8,
@@ -960,7 +944,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008SA,
.name = "Intel 28F008SA",
.devtypes = CFI_DEVICETYPE_X8,
@@ -972,7 +956,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 16),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F800B3B,
.name = "Intel 28F800B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -985,7 +969,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 15),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F800B3T,
.name = "Intel 28F800B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -998,7 +982,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016B3B,
.name = "Intel 28F016B3B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1011,7 +995,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 31),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016S3,
.name = "Intel I28F016S3",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1023,7 +1007,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 32),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016B3T,
.name = "Intel 28F016B3T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1036,7 +1020,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F160B3B,
.name = "Intel 28F160B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1049,7 +1033,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 31),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F160B3T,
.name = "Intel 28F160B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1062,7 +1046,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F320B3B,
.name = "Intel 28F320B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1075,7 +1059,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 63),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F320B3T,
.name = "Intel 28F320B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1088,7 +1072,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640B3B,
.name = "Intel 28F640B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1101,7 +1085,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 127),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640B3T,
.name = "Intel 28F640B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1114,7 +1098,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640C3B,
.name = "Intel 28F640C3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1127,7 +1111,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 127),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I82802AB,
.name = "Intel 82802AB",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1139,7 +1123,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I82802AC,
.name = "Intel 82802AC",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1151,7 +1135,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV040C,
.name = "Macronix MX29LV040C",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1163,7 +1147,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV160T,
.name = "MXIC MX29LV160T",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1178,7 +1162,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_NEC,
+ .mfr_id = CFI_MFR_NEC,
.dev_id = UPD29F064115,
.name = "NEC uPD29F064115",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1192,7 +1176,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x2000,8),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV160B,
.name = "MXIC MX29LV160B",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1207,7 +1191,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F040,
.name = "Macronix MX29F040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1219,7 +1203,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F016,
.name = "Macronix MX29F016",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1231,7 +1215,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F004T,
.name = "Macronix MX29F004T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1246,7 +1230,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F004B,
.name = "Macronix MX29F004B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1261,7 +1245,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F002T,
.name = "Macronix MX29F002T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1276,7 +1260,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_PMC,
+ .mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL002,
.name = "PMC Pm49FL002",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1288,7 +1272,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO( 0x01000, 64 )
}
}, {
- .mfr_id = MANUFACTURER_PMC,
+ .mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL004,
.name = "PMC Pm49FL004",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1300,7 +1284,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO( 0x01000, 128 )
}
}, {
- .mfr_id = MANUFACTURER_PMC,
+ .mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL008,
.name = "PMC Pm49FL008",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1312,7 +1296,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO( 0x01000, 256 )
}
}, {
- .mfr_id = MANUFACTURER_SHARP,
+ .mfr_id = CFI_MFR_SHARP,
.dev_id = LH28F640BF,
.name = "LH28F640BF",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1324,7 +1308,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x40000,16),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF512,
.name = "SST 39LF512",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1336,7 +1320,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,16),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF010,
.name = "SST 39LF010",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1348,8 +1332,8 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,32),
}
}, {
- .mfr_id = MANUFACTURER_SST,
- .dev_id = SST29EE020,
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST29EE020,
.name = "SST 29EE020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
@@ -1359,9 +1343,9 @@ static const struct amd_flash_info jedec_table[] = {
.regions = {ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST29LE020,
- .name = "SST 29LE020",
+ .name = "SST 29LE020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
@@ -1370,7 +1354,7 @@ static const struct amd_flash_info jedec_table[] = {
.regions = {ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF020,
.name = "SST 39LF020",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1382,7 +1366,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF040,
.name = "SST 39LF040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1394,7 +1378,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39SF010A,
.name = "SST 39SF010A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1406,7 +1390,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,32),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39SF020A,
.name = "SST 39SF020A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1418,7 +1402,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39SF040,
.name = "SST 39SF040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1430,7 +1414,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF040B,
.name = "SST 49LF040B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1443,7 +1427,7 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF004B,
.name = "SST 49LF004B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1455,7 +1439,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF008A,
.name = "SST 49LF008A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1467,7 +1451,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,256),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF030A,
.name = "SST 49LF030A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1479,7 +1463,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,96),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF040A,
.name = "SST 49LF040A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1491,7 +1475,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF080A,
.name = "SST 49LF080A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1503,7 +1487,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,256),
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39LF160,
.name = "SST 39LF160",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1516,7 +1500,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,256)
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39VF1601,
.name = "SST 39VF1601",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1529,7 +1513,35 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,256)
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ /* CFI is broken: reports AMD_STD, but needs custom uaddr */
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39WF1601,
+ .name = "SST 39WF1601",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ /* CFI is broken: reports AMD_STD, but needs custom uaddr */
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39WF1602,
+ .name = "SST 39WF1602",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39VF3201,
.name = "SST 39VF3201",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1544,7 +1556,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,256)
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST36VF3203,
.name = "SST 36VF3203",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1556,7 +1568,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,64),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29F800AB,
.name = "ST M29F800AB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1571,7 +1583,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W800DT,
.name = "ST M29W800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1586,7 +1598,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W800DB,
.name = "ST M29W800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1601,7 +1613,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15)
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29W400DT,
.name = "ST M29W400DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1616,7 +1628,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,1)
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29W400DB,
.name = "ST M29W400DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1631,7 +1643,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7)
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W160DT,
.name = "ST M29W160DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1646,7 +1658,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W160DB,
.name = "ST M29W160DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1661,7 +1673,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29W040B,
.name = "ST M29W040B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1673,7 +1685,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FW040,
.name = "ST M50FW040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1685,7 +1697,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FW080,
.name = "ST M50FW080",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1697,7 +1709,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FW016,
.name = "ST M50FW016",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1709,7 +1721,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50LPW080,
.name = "ST M50LPW080",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1721,7 +1733,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
},
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FLW080A,
.name = "ST M50FLW080A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1736,7 +1748,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,16),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FLW080B,
.name = "ST M50FLW080B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1751,7 +1763,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,16),
}
}, {
- .mfr_id = 0xff00 | MANUFACTURER_ST,
+ .mfr_id = 0xff00 | CFI_MFR_ST,
.dev_id = 0xff00 | PSD4256G6V,
.name = "ST PSD4256G6V",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1763,7 +1775,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT160,
.name = "Toshiba TC58FVT160",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1778,7 +1790,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB160,
.name = "Toshiba TC58FVB160",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1793,7 +1805,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB321,
.name = "Toshiba TC58FVB321",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1806,7 +1818,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,63)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT321,
.name = "Toshiba TC58FVT321",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1819,7 +1831,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB641,
.name = "Toshiba TC58FVB641",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1832,7 +1844,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,127)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT641,
.name = "Toshiba TC58FVT641",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1845,7 +1857,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_WINBOND,
+ .mfr_id = CFI_MFR_WINBOND,
.dev_id = W49V002A,
.name = "Winbond W49V002A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1878,7 +1890,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
mask = (1 << (cfi->device_type * 8)) - 1;
result = map_read(map, base + ofs);
bank++;
- } while ((result.x[0] & mask) == CONTINUATION_CODE);
+ } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
return result.x[0] & mask;
}
@@ -1969,7 +1981,7 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
- return 1; /* ok */
+ return 1; /* ok */
}
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index ab5c9b9..f3226b1 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,5 +1,5 @@
#
-# linux/drivers/devices/Makefile
+# linux/drivers/mtd/devices/Makefile
#
obj-$(CONFIG_MTD_DOC2000) += doc2000.o
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index ce64240..9365186 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -276,12 +276,10 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Setup the MTD structure */
/* make the name contain the block device in */
- name = kmalloc(sizeof("block2mtd: ") + strlen(devname) + 1,
- GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
if (!name)
goto devinit_err;
- sprintf(name, "block2mtd: %s", devname);
dev->mtd.name = name;
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index d2fd550..fc8ea0a 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -668,7 +668,7 @@ static int __init init_pmc551(void)
{
struct pci_dev *PCI_Device = NULL;
struct mypriv *priv;
- int count, found = 0;
+ int found = 0;
struct mtd_info *mtd;
u32 length = 0;
@@ -695,7 +695,7 @@ static int __init init_pmc551(void)
/*
* PCU-bus chipset probe.
*/
- for (count = 0; count < MAX_MTD_DEVICES; count++) {
+ for (;;) {
if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
PCI_DEVICE_ID_V3_SEMI_V370PDC,
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index fe17054..ab5d8cd 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -73,15 +73,25 @@ static struct flash_info __initdata sst25l_flash_info[] = {
static int sst25l_status(struct sst25l_flash *flash, int *status)
{
- unsigned char command, response;
+ struct spi_message m;
+ struct spi_transfer t;
+ unsigned char cmd_resp[2];
int err;
- command = SST25L_CMD_RDSR;
- err = spi_write_then_read(flash->spi, &command, 1, &response, 1);
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(struct spi_transfer));
+
+ cmd_resp[0] = SST25L_CMD_RDSR;
+ cmd_resp[1] = 0xff;
+ t.tx_buf = cmd_resp;
+ t.rx_buf = cmd_resp;
+ t.len = sizeof(cmd_resp);
+ spi_message_add_tail(&t, &m);
+ err = spi_sync(flash->spi, &m);
if (err < 0)
return err;
- *status = response;
+ *status = cmd_resp[1];
return 0;
}
@@ -328,33 +338,32 @@ out:
static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
- unsigned char command[4], response;
+ struct spi_message m;
+ struct spi_transfer t;
+ unsigned char cmd_resp[6];
int i, err;
uint16_t id;
- command[0] = SST25L_CMD_READ_ID;
- command[1] = 0;
- command[2] = 0;
- command[3] = 0;
- err = spi_write_then_read(spi, command, sizeof(command), &response, 1);
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(struct spi_transfer));
+
+ cmd_resp[0] = SST25L_CMD_READ_ID;
+ cmd_resp[1] = 0;
+ cmd_resp[2] = 0;
+ cmd_resp[3] = 0;
+ cmd_resp[4] = 0xff;
+ cmd_resp[5] = 0xff;
+ t.tx_buf = cmd_resp;
+ t.rx_buf = cmd_resp;
+ t.len = sizeof(cmd_resp);
+ spi_message_add_tail(&t, &m);
+ err = spi_sync(spi, &m);
if (err < 0) {
- dev_err(&spi->dev, "error reading device id msb\n");
+ dev_err(&spi->dev, "error reading device id\n");
return NULL;
}
- id = response << 8;
-
- command[0] = SST25L_CMD_READ_ID;
- command[1] = 0;
- command[2] = 0;
- command[3] = 1;
- err = spi_write_then_read(spi, command, sizeof(command), &response, 1);
- if (err < 0) {
- dev_err(&spi->dev, "error reading device id lsb\n");
- return NULL;
- }
-
- id |= response;
+ id = (cmd_resp[4] << 8) | cmd_resp[5];
for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++)
if (sst25l_flash_info[i].device_id == id)
@@ -411,17 +420,6 @@ static int __init sst25l_probe(struct spi_device *spi)
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
- if (flash->mtd.numeraseregions)
- for (i = 0; i < flash->mtd.numeraseregions; i++)
- DEBUG(MTD_DEBUG_LEVEL2,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)flash->mtd.eraseregions[i].offset,
- flash->mtd.eraseregions[i].erasesize,
- flash->mtd.eraseregions[i].erasesize / 1024,
- flash->mtd.eraseregions[i].numblocks);
-
if (mtd_has_partitions()) {
struct mtd_partition *parts = NULL;
int nr_parts = 0;
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index e56d6b4..62da9eb 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1082,7 +1082,6 @@ static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
ftl_freepart((partition_t *)dev);
- kfree(dev);
}
static struct mtd_blktrans_ops ftl_tr = {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 8aca552..015a7fe 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -139,7 +139,6 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
kfree(inftl->PUtable);
kfree(inftl->VUtable);
- kfree(inftl);
}
/*
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 32e82ae..8f988d7 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -100,9 +100,10 @@ static int find_boot_record(struct INFTLrecord *inftl)
}
/* To be safer with BIOS, also use erase mark as discriminant */
- if ((ret = inftl_read_oob(mtd, block * inftl->EraseSize +
- SECTORSIZE + 8, 8, &retlen,
- (char *)&h1) < 0)) {
+ ret = inftl_read_oob(mtd,
+ block * inftl->EraseSize + SECTORSIZE + 8,
+ 8, &retlen,(char *)&h1);
+ if (ret < 0) {
printk(KERN_WARNING "INFTL: ANAND header found at "
"0x%x in mtd%d, but OOB data read failed "
"(err %d)\n", block * inftl->EraseSize,
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index a73ee12..fece5be 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -107,8 +107,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
- spin_lock_init(&chip->_spinlock);
- chip->mutex = &chip->_spinlock;
+ mutex_init(&chip->mutex);
chip++;
}
}
@@ -144,7 +143,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@@ -159,17 +158,17 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
cond_resched();
timeo--;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (chip->erase_suspended || chip->write_suspended) {
/* Suspend has occured while sleep: reset timeout */
@@ -230,20 +229,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
- ret = spin_trylock(contender->mutex);
+ ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, mode);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@@ -252,10 +251,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender);
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
}
/* Check if we have suspended erase on this chip.
@@ -265,10 +264,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto retry;
}
@@ -337,10 +336,10 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@@ -356,12 +355,12 @@ static void put_chip(struct map_info *map, struct flchip *chip)
if (shared->writing && shared->writing != chip) {
/* give back the ownership */
struct flchip *loaner = shared->writing;
- spin_lock(loaner->mutex);
+ mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
put_chip(map, loaner);
- spin_lock(chip->mutex);
- spin_unlock(loaner->mutex);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@@ -414,10 +413,10 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
wbufsize = 1 << lpddr->qinfo->BufSizeShift;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
/* Figure out the number of words to write */
@@ -478,7 +477,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -490,10 +489,10 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
struct flchip *chip = &lpddr->chips[chipnum];
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
@@ -505,7 +504,7 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
goto out;
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -518,10 +517,10 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
struct flchip *chip = &lpddr->chips[chipnum];
int ret = 0;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -529,7 +528,7 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
*retlen = len;
put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -569,9 +568,9 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
else
thislen = len;
/* get the chip */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_POINT);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (ret)
break;
@@ -611,7 +610,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
else
thislen = len;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if (chip->ref_point_counter == 0)
@@ -621,7 +620,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
"pointed region\n", map->name);
put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@@ -727,10 +726,10 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -750,7 +749,7 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
goto out;
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -771,10 +770,10 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -788,7 +787,7 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index 79bf40f..dbfe17b 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -134,13 +134,12 @@ out:
static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
{
- lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
+ lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
if (!lpddr->qinfo) {
printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n",
map->name);
return 0;
}
- memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip));
/* Get the ManuID */
lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
@@ -185,13 +184,11 @@ static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
lpddr.numchips = 1;
numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
- retlpddr = kmalloc(sizeof(struct lpddr_private) +
+ retlpddr = kzalloc(sizeof(struct lpddr_private) +
numvirtchips * sizeof(struct flchip), GFP_KERNEL);
if (!retlpddr)
return NULL;
- memset(retlpddr, 0, sizeof(struct lpddr_private) +
- numvirtchips * sizeof(struct flchip));
memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
retlpddr->numchips = numvirtchips;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index aa2807d..f22bc9f 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -435,7 +435,7 @@ config MTD_PCI
config MTD_PCMCIA
tristate "PCMCIA MTD driver"
- depends on PCMCIA && MTD_COMPLEX_MAPPINGS && BROKEN
+ depends on PCMCIA && MTD_COMPLEX_MAPPINGS
help
Map driver for accessing PCMCIA linear flash memory cards. These
cards are usually around 4-16MiB in size. This does not include
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index c0fd99b..85dd181 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -70,7 +70,7 @@ static void switch_back(struct async_state *state)
local_irq_restore(state->irq_flags);
}
-static map_word bfin_read(struct map_info *map, unsigned long ofs)
+static map_word bfin_flash_read(struct map_info *map, unsigned long ofs)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
uint16_t word;
@@ -86,7 +86,7 @@ static map_word bfin_read(struct map_info *map, unsigned long ofs)
return test;
}
-static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
@@ -97,7 +97,7 @@ static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, s
switch_back(state);
}
-static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
+static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
uint16_t d;
@@ -112,7 +112,7 @@ static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
switch_back(state);
}
-static void bfin_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
@@ -141,10 +141,10 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENOMEM;
state->map.name = DRIVER_NAME;
- state->map.read = bfin_read;
- state->map.copy_from = bfin_copy_from;
- state->map.write = bfin_write;
- state->map.copy_to = bfin_copy_to;
+ state->map.read = bfin_flash_read;
+ state->map.copy_from = bfin_flash_copy_from;
+ state->map.write = bfin_flash_write;
+ state->map.copy_to = bfin_flash_copy_to;
state->map.bankwidth = pdata->width;
state->map.size = memory->end - memory->start + 1;
state->map.virt = (void __iomem *)memory->start;
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index d41f347..c09f4f5 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -253,7 +253,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
static int __init clps_setup_flash(void)
{
- int nr;
+ int nr = 0;
#ifdef CONFIG_ARCH_CEIVA
if (machine_is_ceiva()) {
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 1bdf0ee..9639d83 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -165,12 +165,11 @@ static int ixp2000_flash_probe(struct platform_device *dev)
return -EIO;
}
- info = kmalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
}
- memset(info, 0, sizeof(struct ixp2000_flash_info));
platform_set_drvdata(dev, info);
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 7b05152..e0a5e04 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -107,8 +107,8 @@ static void ixp4xx_copy_from(struct map_info *map, void *to,
return;
if (from & 1) {
- *dest++ = BYTE1(flash_read16(src));
- src++;
+ *dest++ = BYTE1(flash_read16(src-1));
+ src++;
--len;
}
@@ -196,12 +196,11 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
return err;
}
- info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
}
- memset(info, 0, sizeof(struct ixp4xx_flash_info));
platform_set_drvdata(dev, info);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 87b2b8f..e699e6a 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -40,10 +40,7 @@ MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
static const int debug = 0;
#endif
-#define err(format, arg...) printk(KERN_ERR "pcmciamtd: " format "\n" , ## arg)
#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "pcmciamtd: " format "\n" , ## arg)
-
#define DRIVER_DESC "PCMCIA Flash memory card driver"
@@ -99,7 +96,9 @@ module_param(mem_type, int, 0);
MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)");
-/* read/write{8,16} copy_{from,to} routines with window remapping to access whole card */
+/* read/write{8,16} copy_{from,to} routines with window remapping
+ * to access whole card
+ */
static caddr_t remap_window(struct map_info *map, unsigned long to)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
@@ -136,7 +135,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]);
return d;
}
@@ -151,7 +150,7 @@ static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]);
return d;
}
@@ -161,7 +160,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
- DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
+ DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
while(len) {
int toread = win_size - (from & (win_size-1));
caddr_t addr;
@@ -189,7 +188,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d.x[0]);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]);
writeb(d.x[0], addr);
}
@@ -200,7 +199,7 @@ static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d.x[0]);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]);
writew(d.x[0], addr);
}
@@ -210,7 +209,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
- DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
+ DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
while(len) {
int towrite = win_size - (to & (win_size-1));
caddr_t addr;
@@ -244,7 +243,8 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx",
+ ofs, win_base + ofs, d.x[0]);
return d;
}
@@ -258,7 +258,8 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx",
+ ofs, win_base + ofs, d.x[0]);
return d;
}
@@ -270,32 +271,34 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from,
if(DEV_REMOVED(map))
return;
- DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
+ DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
memcpy_fromio(to, win_base + from, len);
}
-static void pcmcia_write8(struct map_info *map, u8 d, unsigned long adr)
+static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
{
caddr_t win_base = (caddr_t)map->map_priv_2;
if(DEV_REMOVED(map))
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, win_base + adr, d);
- writeb(d, win_base + adr);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx",
+ adr, win_base + adr, d.x[0]);
+ writeb(d.x[0], win_base + adr);
}
-static void pcmcia_write16(struct map_info *map, u16 d, unsigned long adr)
+static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
{
caddr_t win_base = (caddr_t)map->map_priv_2;
if(DEV_REMOVED(map))
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, win_base + adr, d);
- writew(d, win_base + adr);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx",
+ adr, win_base + adr, d.x[0]);
+ writew(d.x[0], win_base + adr);
}
@@ -306,7 +309,7 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
if(DEV_REMOVED(map))
return;
- DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
+ DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
memcpy_toio(win_base + to, from, len);
}
@@ -375,7 +378,8 @@ static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
if (!pcmcia_parse_tuple(tuple, &parse)) {
cistpl_jedec_t *t = &parse.jedec;
for (i = 0; i < t->nid; i++)
- DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info);
+ DEBUG(2, "JEDEC: 0x%02x 0x%02x",
+ t->id[i].mfr, t->id[i].info);
}
return -ENOSPC;
}
@@ -431,7 +435,7 @@ static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
}
-static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name)
+static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev, int *new_name)
{
int i;
@@ -476,7 +480,8 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link,
}
DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
- dev->pcmcia_map.size, dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
+ dev->pcmcia_map.size,
+ dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
}
@@ -489,7 +494,6 @@ static int pcmciamtd_config(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
struct mtd_info *mtd = NULL;
- cs_status_t status;
win_req_t req;
int ret;
int i;
@@ -513,9 +517,11 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(setvpp == 1)
dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
- /* Request a memory window for PCMCIA. Some architeures can map windows upto the maximum
- that PCMCIA can support (64MiB) - this is ideal and we aim for a window the size of the
- whole card - otherwise we try smaller windows until we succeed */
+ /* Request a memory window for PCMCIA. Some architeures can map windows
+ * upto the maximum that PCMCIA can support (64MiB) - this is ideal and
+ * we aim for a window the size of the whole card - otherwise we try
+ * smaller windows until we succeed
+ */
req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE;
req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
@@ -543,7 +549,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
DEBUG(2, "dev->win_size = %d", dev->win_size);
if(!dev->win_size) {
- err("Cant allocate memory window");
+ dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -553,7 +559,8 @@ static int pcmciamtd_config(struct pcmcia_device *link)
DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win);
dev->win_base = ioremap(req.Base, req.Size);
if(!dev->win_base) {
- err("ioremap(%lu, %u) failed", req.Base, req.Size);
+ dev_err(&dev->p_dev->dev, "ioremap(%lu, %u) failed\n",
+ req.Base, req.Size);
pcmciamtd_release(link);
return -ENODEV;
}
@@ -564,7 +571,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
dev->pcmcia_map.map_priv_1 = (unsigned long)dev;
dev->pcmcia_map.map_priv_2 = (unsigned long)link->win;
- dev->vpp = (vpp) ? vpp : link->socket.socket.Vpp;
+ dev->vpp = (vpp) ? vpp : link->socket->socket.Vpp;
link->conf.Attributes = 0;
if(setvpp == 2) {
link->conf.Vpp = dev->vpp;
@@ -600,7 +607,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
}
if(!mtd) {
- DEBUG(1, "Cant find an MTD");
+ DEBUG(1, "Can not find an MTD");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -611,8 +618,9 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(new_name) {
int size = 0;
char unit = ' ';
- /* Since we are using a default name, make it better by adding in the
- size */
+ /* Since we are using a default name, make it better by adding
+ * in the size
+ */
if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */
size = mtd->size >> 10;
unit = 'K';
@@ -642,15 +650,15 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(add_mtd_device(mtd)) {
map_destroy(mtd);
dev->mtd_info = NULL;
- err("Couldnt register MTD device");
+ dev_err(&dev->p_dev->dev,
+ "Could not register the MTD device\n");
pcmciamtd_release(link);
return -ENODEV;
}
- info("mtd%d: %s", mtd->index, mtd->name);
+ dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name);
return 0;
- failed:
- err("CS Error, exiting");
+ dev_err(&dev->p_dev->dev, "CS Error, exiting\n");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -689,8 +697,9 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
if(dev->mtd_info) {
del_mtd_device(dev->mtd_info);
+ dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
+ dev->mtd_info->index);
map_destroy(dev->mtd_info);
- info("mtd%d: Removed", dev->mtd_info->index);
}
pcmciamtd_release(link);
@@ -734,8 +743,11 @@ static struct pcmcia_device_id pcmciamtd_ids[] = {
PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8),
PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c),
PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0),
+ PCMCIA_DEVICE_PROD_ID123("M-Systems", "M-SYS Flash Memory Card", "(c) M-Systems", 0x7ed2ad87, 0x675dc3fb, 0x7aef3965),
+ PCMCIA_DEVICE_PROD_ID12("PRETEC", " 2MB SRAM CARD", 0xebf91155, 0x805360ca),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad),
+ PCMCIA_DEVICE_PROD_ID12("SMART Modular Technologies", " 4MB FLASH Card", 0x96fd8277, 0x737a5b05),
PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca),
PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944),
/* the following was commented out in pcmcia-cs-3.2.7 */
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index d9603f7..426461a 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -264,8 +264,11 @@ static int __init physmap_init(void)
err = platform_driver_register(&physmap_flash_driver);
#ifdef CONFIG_MTD_PHYSMAP_COMPAT
- if (err == 0)
- platform_device_register(&physmap_flash);
+ if (err == 0) {
+ err = platform_device_register(&physmap_flash);
+ if (err)
+ platform_driver_unregister(&physmap_flash_driver);
+ }
#endif
return err;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 101ee6e..36dbcee 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -173,12 +173,53 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
}
}
+#ifdef CONFIG_MTD_PARTITIONS
+/* When partitions are set we look for a linux,part-probe property which
+ specifies the list of partition probers to use. If none is given then the
+ default is use. These take precedence over other device tree
+ information. */
+static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL };
+static const char ** __devinit of_get_probes(struct device_node *dp)
+{
+ const char *cp;
+ int cplen;
+ unsigned int l;
+ unsigned int count;
+ const char **res;
+
+ cp = of_get_property(dp, "linux,part-probe", &cplen);
+ if (cp == NULL)
+ return part_probe_types_def;
+
+ count = 0;
+ for (l = 0; l != cplen; l++)
+ if (cp[l] == 0)
+ count++;
+
+ res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
+ count = 0;
+ while (cplen > 0) {
+ res[count] = cp;
+ l = strlen(cp) + 1;
+ cp += l;
+ cplen -= l;
+ count++;
+ }
+ return res;
+}
+
+static void __devinit of_free_probes(const char **probes)
+{
+ if (probes != part_probe_types_def)
+ kfree(probes);
+}
+#endif
+
static int __devinit of_flash_probe(struct of_device *dev,
const struct of_device_id *match)
{
#ifdef CONFIG_MTD_PARTITIONS
- static const char *part_probe_types[]
- = { "cmdlinepart", "RedBoot", NULL };
+ const char **part_probe_types;
#endif
struct device_node *dp = dev->node;
struct resource res;
@@ -218,7 +259,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
dev_set_drvdata(&dev->dev, info);
- mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
+ mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
if (!mtd_list)
goto err_flash_remove;
@@ -307,12 +348,14 @@ static int __devinit of_flash_probe(struct of_device *dev,
goto err_out;
#ifdef CONFIG_MTD_PARTITIONS
- /* First look for RedBoot table or partitions on the command
- * line, these take precedence over device tree information */
+ part_probe_types = of_get_probes(dp);
err = parse_mtd_partitions(info->cmtd, part_probe_types,
&info->parts, 0);
- if (err < 0)
+ if (err < 0) {
+ of_free_probes(part_probe_types);
return err;
+ }
+ of_free_probes(part_probe_types);
#ifdef CONFIG_MTD_OF_PARTS
if (err == 0) {
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 60c068d..eb476b7 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -234,6 +234,7 @@ static int __devexit pismo_remove(struct i2c_client *client)
/* FIXME: set_vpp needs saner arguments */
pismo_setvpp_remove_fix(pismo);
+ i2c_set_clientdata(client, NULL);
kfree(pismo);
return 0;
@@ -272,7 +273,7 @@ static int __devinit pismo_probe(struct i2c_client *client,
ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
if (ret < 0) {
dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
- return ret;
+ goto exit_free;
}
dev_info(&client->dev, "%.15s board found\n", eeprom.board);
@@ -283,6 +284,11 @@ static int __devinit pismo_probe(struct i2c_client *client,
pdata->cs_addrs[i]);
return 0;
+
+ exit_free:
+ i2c_set_clientdata(client, NULL);
+ kfree(pismo);
+ return ret;
}
static const struct i2c_device_id pismo_id[] = {
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 91dc633..dd90880 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -63,11 +63,10 @@ static int __init pxa2xx_flash_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- info = kmalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- memset(info, 0, sizeof(struct pxa2xx_flash_info));
info->map.name = (char *) flash->name;
info->map.bankwidth = flash->width;
info->map.phys = res->start;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index c82e09b..03e19c1 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -14,7 +14,6 @@
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
-#include <linux/freezer.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
@@ -25,12 +24,42 @@
#include "mtdcore.h"
static LIST_HEAD(blktrans_majors);
+static DEFINE_MUTEX(blktrans_ref_mutex);
+
+void blktrans_dev_release(struct kref *kref)
+{
+ struct mtd_blktrans_dev *dev =
+ container_of(kref, struct mtd_blktrans_dev, ref);
+
+ dev->disk->private_data = NULL;
+ blk_cleanup_queue(dev->rq);
+ put_disk(dev->disk);
+ list_del(&dev->list);
+ kfree(dev);
+}
+
+static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
+{
+ struct mtd_blktrans_dev *dev;
+
+ mutex_lock(&blktrans_ref_mutex);
+ dev = disk->private_data;
+
+ if (!dev)
+ goto unlock;
+ kref_get(&dev->ref);
+unlock:
+ mutex_unlock(&blktrans_ref_mutex);
+ return dev;
+}
+
+void blktrans_dev_put(struct mtd_blktrans_dev *dev)
+{
+ mutex_lock(&blktrans_ref_mutex);
+ kref_put(&dev->ref, blktrans_dev_release);
+ mutex_unlock(&blktrans_ref_mutex);
+}
-struct mtd_blkcore_priv {
- struct task_struct *thread;
- struct request_queue *rq;
- spinlock_t queue_lock;
-};
static int do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
@@ -61,7 +90,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
return -EIO;
rq_flush_dcache_pages(req);
return 0;
-
case WRITE:
if (!tr->writesect)
return -EIO;
@@ -71,7 +99,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
-
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
return -EIO;
@@ -80,14 +107,13 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
static int mtd_blktrans_thread(void *arg)
{
- struct mtd_blktrans_ops *tr = arg;
- struct request_queue *rq = tr->blkcore_priv->rq;
+ struct mtd_blktrans_dev *dev = arg;
+ struct request_queue *rq = dev->rq;
struct request *req = NULL;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
- struct mtd_blktrans_dev *dev;
int res;
if (!req && !(req = blk_fetch_request(rq))) {
@@ -98,13 +124,10 @@ static int mtd_blktrans_thread(void *arg)
continue;
}
- dev = req->rq_disk->private_data;
- tr = dev->tr;
-
spin_unlock_irq(rq->queue_lock);
mutex_lock(&dev->lock);
- res = do_blktrans_request(tr, dev, req);
+ res = do_blktrans_request(dev->tr, dev, req);
mutex_unlock(&dev->lock);
spin_lock_irq(rq->queue_lock);
@@ -123,81 +146,112 @@ static int mtd_blktrans_thread(void *arg)
static void mtd_blktrans_request(struct request_queue *rq)
{
- struct mtd_blktrans_ops *tr = rq->queuedata;
- wake_up_process(tr->blkcore_priv->thread);
-}
+ struct mtd_blktrans_dev *dev;
+ struct request *req = NULL;
+
+ dev = rq->queuedata;
+ if (!dev)
+ while ((req = blk_fetch_request(rq)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
+ else
+ wake_up_process(dev->thread);
+}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
- int ret = -ENODEV;
-
- if (!get_mtd_device(NULL, dev->mtd->index))
- goto out;
-
- if (!try_module_get(tr->owner))
- goto out_tr;
-
- /* FIXME: Locking. A hot pluggable device can go away
- (del_mtd_device can be called for it) without its module
- being unloaded. */
- dev->mtd->usecount++;
-
- ret = 0;
- if (tr->open && (ret = tr->open(dev))) {
- dev->mtd->usecount--;
- put_mtd_device(dev->mtd);
- out_tr:
- module_put(tr->owner);
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret;
+
+ if (!dev)
+ return -ERESTARTSYS;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd) {
+ ret = -ENXIO;
+ goto unlock;
}
- out:
+
+ ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0;
+
+ /* Take another reference on the device so it won't go away till
+ last release */
+ if (!ret)
+ kref_get(&dev->ref);
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
return ret;
}
static int blktrans_release(struct gendisk *disk, fmode_t mode)
{
- struct mtd_blktrans_dev *dev = disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
- int ret = 0;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
+ int ret = -ENXIO;
- if (tr->release)
- ret = tr->release(dev);
+ if (!dev)
+ return ret;
- if (!ret) {
- dev->mtd->usecount--;
- put_mtd_device(dev->mtd);
- module_put(tr->owner);
- }
+ mutex_lock(&dev->lock);
+
+ /* Release one reference, we sure its not the last one here*/
+ kref_put(&dev->ref, blktrans_dev_release);
+ if (!dev->mtd)
+ goto unlock;
+
+ ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0;
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
return ret;
}
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (!dev)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
- if (dev->tr->getgeo)
- return dev->tr->getgeo(dev, geo);
- return -ENOTTY;
+ ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (!dev)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
switch (cmd) {
case BLKFLSBUF:
- if (tr->flush)
- return tr->flush(dev);
- /* The core code did the work, we had nothing to do. */
- return 0;
+ ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
default:
- return -ENOTTY;
+ ret = -ENOTTY;
}
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static const struct block_device_operations mtd_blktrans_ops = {
@@ -214,12 +268,14 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
struct mtd_blktrans_dev *d;
int last_devnum = -1;
struct gendisk *gd;
+ int ret;
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
+ mutex_lock(&blktrans_ref_mutex);
list_for_each_entry(d, &tr->devs, list) {
if (new->devnum == -1) {
/* Use first free number */
@@ -231,6 +287,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
} else if (d->devnum == new->devnum) {
/* Required number taken */
+ mutex_unlock(&blktrans_ref_mutex);
return -EBUSY;
} else if (d->devnum > new->devnum) {
/* Required number was free */
@@ -239,24 +296,38 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
last_devnum = d->devnum;
}
+
+ ret = -EBUSY;
if (new->devnum == -1)
new->devnum = last_devnum+1;
- if ((new->devnum << tr->part_bits) > 256) {
- return -EBUSY;
+ /* Check that the device and any partitions will get valid
+ * minor numbers and that the disk naming code below can cope
+ * with this number. */
+ if (new->devnum > (MINORMASK >> tr->part_bits) ||
+ (tr->part_bits && new->devnum >= 27 * 26)) {
+ mutex_unlock(&blktrans_ref_mutex);
+ goto error1;
}
list_add_tail(&new->list, &tr->devs);
added:
+ mutex_unlock(&blktrans_ref_mutex);
+
mutex_init(&new->lock);
+ kref_init(&new->ref);
if (!tr->writesect)
new->readonly = 1;
+ /* Create gendisk */
+ ret = -ENOMEM;
gd = alloc_disk(1 << tr->part_bits);
- if (!gd) {
- list_del(&new->list);
- return -ENOMEM;
- }
+
+ if (!gd)
+ goto error2;
+
+ new->disk = gd;
+ gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
gd->fops = &mtd_blktrans_ops;
@@ -274,13 +345,35 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
- /* 2.5 has capacity in units of 512 bytes while still
- having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
set_capacity(gd, (new->size * tr->blksize) >> 9);
- gd->private_data = new;
- new->blkcore_priv = gd;
- gd->queue = tr->blkcore_priv->rq;
+ /* Create the request queue */
+ spin_lock_init(&new->queue_lock);
+ new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
+
+ if (!new->rq)
+ goto error3;
+
+ new->rq->queuedata = new;
+ blk_queue_logical_block_size(new->rq, tr->blksize);
+
+ if (tr->discard)
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ new->rq);
+
+ gd->queue = new->rq;
+
+ __get_mtd_device(new->mtd);
+ __module_get(tr->owner);
+
+ /* Create processing thread */
+ /* TODO: workqueue ? */
+ new->thread = kthread_run(mtd_blktrans_thread, new,
+ "%s%d", tr->name, new->mtd->index);
+ if (IS_ERR(new->thread)) {
+ ret = PTR_ERR(new->thread);
+ goto error4;
+ }
gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
@@ -288,21 +381,65 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
add_disk(gd);
+ if (new->disk_attributes) {
+ ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
+ new->disk_attributes);
+ WARN_ON(ret);
+ }
return 0;
+error4:
+ module_put(tr->owner);
+ __put_mtd_device(new->mtd);
+ blk_cleanup_queue(new->rq);
+error3:
+ put_disk(new->disk);
+error2:
+ list_del(&new->list);
+error1:
+ kfree(new);
+ return ret;
}
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
+ unsigned long flags;
+
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
- list_del(&old->list);
+ /* Stop new requests to arrive */
+ del_gendisk(old->disk);
+
+ if (old->disk_attributes)
+ sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
+ old->disk_attributes);
+
+ /* Stop the thread */
+ kthread_stop(old->thread);
+
+ /* Kill current requests */
+ spin_lock_irqsave(&old->queue_lock, flags);
+ old->rq->queuedata = NULL;
+ blk_start_queue(old->rq);
+ spin_unlock_irqrestore(&old->queue_lock, flags);
+
+ /* Ask trans driver for release to the mtd device */
+ mutex_lock(&old->lock);
+ if (old->open && old->tr->release) {
+ old->tr->release(old);
+ old->open = 0;
+ }
+
+ __put_mtd_device(old->mtd);
+ module_put(old->tr->owner);
- del_gendisk(old->blkcore_priv);
- put_disk(old->blkcore_priv);
+ /* At that point, we don't touch the mtd anymore */
+ old->mtd = NULL;
+ mutex_unlock(&old->lock);
+ blktrans_dev_put(old);
return 0;
}
@@ -335,7 +472,8 @@ static struct mtd_notifier blktrans_notifier = {
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
- int ret, i;
+ struct mtd_info *mtd;
+ int ret;
/* Register the notifier if/when the first device type is
registered, to prevent the link/init ordering from fucking
@@ -343,9 +481,6 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier);
- tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
- if (!tr->blkcore_priv)
- return -ENOMEM;
mutex_lock(&mtd_table_mutex);
@@ -353,49 +488,20 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (ret) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
- kfree(tr->blkcore_priv);
mutex_unlock(&mtd_table_mutex);
return ret;
}
- spin_lock_init(&tr->blkcore_priv->queue_lock);
-
- tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
- if (!tr->blkcore_priv->rq) {
- unregister_blkdev(tr->major, tr->name);
- kfree(tr->blkcore_priv);
- mutex_unlock(&mtd_table_mutex);
- return -ENOMEM;
- }
-
- tr->blkcore_priv->rq->queuedata = tr;
- blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
- if (tr->discard)
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
- tr->blkcore_priv->rq);
tr->blkshift = ffs(tr->blksize) - 1;
- tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
- "%sd", tr->name);
- if (IS_ERR(tr->blkcore_priv->thread)) {
- ret = PTR_ERR(tr->blkcore_priv->thread);
- blk_cleanup_queue(tr->blkcore_priv->rq);
- unregister_blkdev(tr->major, tr->name);
- kfree(tr->blkcore_priv);
- mutex_unlock(&mtd_table_mutex);
- return ret;
- }
-
INIT_LIST_HEAD(&tr->devs);
list_add(&tr->list, &blktrans_majors);
- for (i=0; i<MAX_MTD_DEVICES; i++) {
- if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
- tr->add_mtd(tr, mtd_table[i]);
- }
+ mtd_for_each_device(mtd)
+ if (mtd->type != MTD_ABSENT)
+ tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
-
return 0;
}
@@ -405,22 +511,15 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
mutex_lock(&mtd_table_mutex);
- /* Clean up the kernel thread */
- kthread_stop(tr->blkcore_priv->thread);
-
/* Remove it from the list of active majors */
list_del(&tr->list);
list_for_each_entry_safe(dev, next, &tr->devs, list)
tr->remove_dev(dev);
- blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
-
mutex_unlock(&mtd_table_mutex);
- kfree(tr->blkcore_priv);
-
BUG_ON(!list_empty(&tr->devs));
return 0;
}
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 9f41b1a..e6edbec 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -19,15 +19,15 @@
#include <linux/mutex.h>
-static struct mtdblk_dev {
- struct mtd_info *mtd;
+struct mtdblk_dev {
+ struct mtd_blktrans_dev mbd;
int count;
struct mutex cache_mutex;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
-} *mtdblks[MAX_MTD_DEVICES];
+};
static struct mutex mtdblks_lock;
@@ -98,7 +98,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
static int write_cached_data (struct mtdblk_dev *mtdblk)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
int ret;
if (mtdblk->cache_state != STATE_DIRTY)
@@ -128,7 +128,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const char *buf)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
@@ -198,7 +198,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, char *buf)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
@@ -244,16 +244,16 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
return do_cached_read(mtdblk, block<<9, 512, buf);
}
static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
- mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
+ mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
if (!mtdblk->cache_data)
return -EINTR;
/* -EINTR is not really correct, but it is the best match
@@ -266,37 +266,26 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
- struct mtdblk_dev *mtdblk;
- struct mtd_info *mtd = mbd->mtd;
- int dev = mbd->devnum;
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
mutex_lock(&mtdblks_lock);
- if (mtdblks[dev]) {
- mtdblks[dev]->count++;
+ if (mtdblk->count) {
+ mtdblk->count++;
mutex_unlock(&mtdblks_lock);
return 0;
}
/* OK, it's not open. Create cache info for it */
- mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
- if (!mtdblk) {
- mutex_unlock(&mtdblks_lock);
- return -ENOMEM;
- }
-
mtdblk->count = 1;
- mtdblk->mtd = mtd;
-
mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY;
- if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) {
- mtdblk->cache_size = mtdblk->mtd->erasesize;
+ if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
+ mtdblk->cache_size = mbd->mtd->erasesize;
mtdblk->cache_data = NULL;
}
- mtdblks[dev] = mtdblk;
mutex_unlock(&mtdblks_lock);
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
@@ -306,8 +295,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
static int mtdblock_release(struct mtd_blktrans_dev *mbd)
{
- int dev = mbd->devnum;
- struct mtdblk_dev *mtdblk = mtdblks[dev];
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
@@ -318,12 +306,10 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblk->cache_mutex);
if (!--mtdblk->count) {
- /* It was the last usage. Free the device */
- mtdblks[dev] = NULL;
- if (mtdblk->mtd->sync)
- mtdblk->mtd->sync(mtdblk->mtd);
+ /* It was the last usage. Free the cache */
+ if (mbd->mtd->sync)
+ mbd->mtd->sync(mbd->mtd);
vfree(mtdblk->cache_data);
- kfree(mtdblk);
}
mutex_unlock(&mtdblks_lock);
@@ -335,40 +321,40 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
- if (mtdblk->mtd->sync)
- mtdblk->mtd->sync(mtdblk->mtd);
+ if (dev->mtd->sync)
+ dev->mtd->sync(dev->mtd);
return 0;
}
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
- struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
- dev->mtd = mtd;
- dev->devnum = mtd->index;
+ dev->mbd.mtd = mtd;
+ dev->mbd.devnum = mtd->index;
- dev->size = mtd->size >> 9;
- dev->tr = tr;
+ dev->mbd.size = mtd->size >> 9;
+ dev->mbd.tr = tr;
if (!(mtd->flags & MTD_WRITEABLE))
- dev->readonly = 1;
+ dev->mbd.readonly = 1;
- add_mtd_blktrans_dev(dev);
+ if (add_mtd_blktrans_dev(&dev->mbd))
+ kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
- kfree(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 852165f..d0d3f79 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -43,13 +43,13 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
dev->tr = tr;
dev->readonly = 1;
- add_mtd_blktrans_dev(dev);
+ if (add_mtd_blktrans_dev(dev))
+ kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
- kfree(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 5b081cb..8bb5e4a 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -15,12 +15,15 @@
#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
#include <linux/compat.h>
+#include <linux/mount.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
#include <asm/uaccess.h>
+#define MTD_INODE_FS_MAGIC 0x11307854
+static struct vfsmount *mtd_inode_mnt __read_mostly;
/*
* Data structure to hold the pointer to the mtd device as well
@@ -28,6 +31,7 @@
*/
struct mtd_file_info {
struct mtd_info *mtd;
+ struct inode *ino;
enum mtd_file_modes mode;
};
@@ -64,12 +68,10 @@ static int mtd_open(struct inode *inode, struct file *file)
int ret = 0;
struct mtd_info *mtd;
struct mtd_file_info *mfi;
+ struct inode *mtd_ino;
DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
- if (devnum >= MAX_MTD_DEVICES)
- return -ENODEV;
-
/* You can't open the RO devices RW */
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
@@ -88,11 +90,23 @@ static int mtd_open(struct inode *inode, struct file *file)
goto out;
}
- if (mtd->backing_dev_info)
- file->f_mapping->backing_dev_info = mtd->backing_dev_info;
+ mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
+ if (!mtd_ino) {
+ put_mtd_device(mtd);
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (mtd_ino->i_state & I_NEW) {
+ mtd_ino->i_private = mtd;
+ mtd_ino->i_mode = S_IFCHR;
+ mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
+ unlock_new_inode(mtd_ino);
+ }
+ file->f_mapping = mtd_ino->i_mapping;
/* You can't open it RW if it's not a writeable device */
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
+ iput(mtd_ino);
put_mtd_device(mtd);
ret = -EACCES;
goto out;
@@ -100,10 +114,12 @@ static int mtd_open(struct inode *inode, struct file *file)
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
if (!mfi) {
+ iput(mtd_ino);
put_mtd_device(mtd);
ret = -ENOMEM;
goto out;
}
+ mfi->ino = mtd_ino;
mfi->mtd = mtd;
file->private_data = mfi;
@@ -125,6 +141,8 @@ static int mtd_close(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE) && mtd->sync)
mtd->sync(mtd);
+ iput(mfi->ino);
+
put_mtd_device(mtd);
file->private_data = NULL;
kfree(mfi);
@@ -373,7 +391,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
if (!mtd->write_oob)
ret = -EOPNOTSUPP;
else
- ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
+ ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
if (ret)
return ret;
@@ -482,7 +500,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
uint32_t ur_idx;
struct mtd_erase_region_info *kr;
- struct region_info_user *ur = (struct region_info_user *) argp;
+ struct region_info_user __user *ur = argp;
if (get_user(ur_idx, &(ur->regionindex)))
return -EFAULT;
@@ -954,22 +972,81 @@ static const struct file_operations mtd_fops = {
#endif
};
+static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
+ mnt);
+}
+
+static struct file_system_type mtd_inodefs_type = {
+ .name = "mtd_inodefs",
+ .get_sb = mtd_inodefs_get_sb,
+ .kill_sb = kill_anon_super,
+};
+
+static void mtdchar_notify_add(struct mtd_info *mtd)
+{
+}
+
+static void mtdchar_notify_remove(struct mtd_info *mtd)
+{
+ struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
+
+ if (mtd_ino) {
+ /* Destroy the inode if it exists */
+ mtd_ino->i_nlink = 0;
+ iput(mtd_ino);
+ }
+}
+
+static struct mtd_notifier mtdchar_notifier = {
+ .add = mtdchar_notify_add,
+ .remove = mtdchar_notify_remove,
+};
+
static int __init init_mtdchar(void)
{
- int status;
+ int ret;
- status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops);
- if (status < 0) {
- printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
- MTD_CHAR_MAJOR);
+ ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
+ "mtd", &mtd_fops);
+ if (ret < 0) {
+ pr_notice("Can't allocate major number %d for "
+ "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
+ return ret;
}
- return status;
+ ret = register_filesystem(&mtd_inodefs_type);
+ if (ret) {
+ pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
+ goto err_unregister_chdev;
+ }
+
+ mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
+ if (IS_ERR(mtd_inode_mnt)) {
+ ret = PTR_ERR(mtd_inode_mnt);
+ pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
+ goto err_unregister_filesystem;
+ }
+ register_mtd_user(&mtdchar_notifier);
+
+ return ret;
+
+err_unregister_filesystem:
+ unregister_filesystem(&mtd_inodefs_type);
+err_unregister_chdev:
+ __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
+ return ret;
}
static void __exit cleanup_mtdchar(void)
{
- unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
+ unregister_mtd_user(&mtdchar_notifier);
+ mntput(mtd_inode_mnt);
+ unregister_filesystem(&mtd_inodefs_type);
+ __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
module_init(init_mtdchar);
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index db6de74..7e07562 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -183,10 +183,9 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
/* make a copy of vecs */
- vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
+ vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
if (!vecs_copy)
return -ENOMEM;
- memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
entry_low = 0;
for (i = 0; i < concat->num_subdev; i++) {
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b177e75..a1b8b70 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -19,7 +19,9 @@
#include <linux/init.h>
#include <linux/mtd/compatmac.h>
#include <linux/proc_fs.h>
+#include <linux/idr.h>
#include <linux/backing-dev.h>
+#include <linux/gfp.h>
#include <linux/mtd/mtd.h>
@@ -63,13 +65,18 @@ static struct class mtd_class = {
.resume = mtd_cls_resume,
};
+static DEFINE_IDR(mtd_idr);
+
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
DEFINE_MUTEX(mtd_table_mutex);
-struct mtd_info *mtd_table[MAX_MTD_DEVICES];
-
EXPORT_SYMBOL_GPL(mtd_table_mutex);
-EXPORT_SYMBOL_GPL(mtd_table);
+
+struct mtd_info *__mtd_next_device(int i)
+{
+ return idr_get_next(&mtd_idr, &i);
+}
+EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
@@ -265,13 +272,13 @@ static struct device_type mtd_devtype = {
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or 1 on failure, which currently will only happen
- * if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
- * or there's a sysfs error.
+ * if there is insufficient memory or a sysfs error.
*/
int add_mtd_device(struct mtd_info *mtd)
{
- int i;
+ struct mtd_notifier *not;
+ int i, error;
if (!mtd->backing_dev_info) {
switch (mtd->type) {
@@ -290,70 +297,73 @@ int add_mtd_device(struct mtd_info *mtd)
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
- for (i=0; i < MAX_MTD_DEVICES; i++)
- if (!mtd_table[i]) {
- struct mtd_notifier *not;
-
- mtd_table[i] = mtd;
- mtd->index = i;
- mtd->usecount = 0;
-
- if (is_power_of_2(mtd->erasesize))
- mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
- else
- mtd->erasesize_shift = 0;
-
- if (is_power_of_2(mtd->writesize))
- mtd->writesize_shift = ffs(mtd->writesize) - 1;
- else
- mtd->writesize_shift = 0;
-
- mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
- mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
-
- /* Some chips always power up locked. Unlock them now */
- if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
- if (mtd->unlock(mtd, 0, mtd->size))
- printk(KERN_WARNING
- "%s: unlock failed, "
- "writes may not work\n",
- mtd->name);
- }
+ do {
+ if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
+ goto fail_locked;
+ error = idr_get_new(&mtd_idr, mtd, &i);
+ } while (error == -EAGAIN);
- /* Caller should have set dev.parent to match the
- * physical device.
- */
- mtd->dev.type = &mtd_devtype;
- mtd->dev.class = &mtd_class;
- mtd->dev.devt = MTD_DEVT(i);
- dev_set_name(&mtd->dev, "mtd%d", i);
- dev_set_drvdata(&mtd->dev, mtd);
- if (device_register(&mtd->dev) != 0) {
- mtd_table[i] = NULL;
- break;
- }
+ if (error)
+ goto fail_locked;
- if (MTD_DEVT(i))
- device_create(&mtd_class, mtd->dev.parent,
- MTD_DEVT(i) + 1,
- NULL, "mtd%dro", i);
-
- DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
- /* No need to get a refcount on the module containing
- the notifier, since we hold the mtd_table_mutex */
- list_for_each_entry(not, &mtd_notifiers, list)
- not->add(mtd);
-
- mutex_unlock(&mtd_table_mutex);
- /* We _know_ we aren't being removed, because
- our caller is still holding us here. So none
- of this try_ nonsense, and no bitching about it
- either. :) */
- __module_get(THIS_MODULE);
- return 0;
- }
+ mtd->index = i;
+ mtd->usecount = 0;
+
+ if (is_power_of_2(mtd->erasesize))
+ mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+ else
+ mtd->erasesize_shift = 0;
+
+ if (is_power_of_2(mtd->writesize))
+ mtd->writesize_shift = ffs(mtd->writesize) - 1;
+ else
+ mtd->writesize_shift = 0;
+
+ mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+ mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
+ /* Some chips always power up locked. Unlock them now */
+ if ((mtd->flags & MTD_WRITEABLE)
+ && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
+ if (mtd->unlock(mtd, 0, mtd->size))
+ printk(KERN_WARNING
+ "%s: unlock failed, writes may not work\n",
+ mtd->name);
+ }
+
+ /* Caller should have set dev.parent to match the
+ * physical device.
+ */
+ mtd->dev.type = &mtd_devtype;
+ mtd->dev.class = &mtd_class;
+ mtd->dev.devt = MTD_DEVT(i);
+ dev_set_name(&mtd->dev, "mtd%d", i);
+ dev_set_drvdata(&mtd->dev, mtd);
+ if (device_register(&mtd->dev) != 0)
+ goto fail_added;
+
+ if (MTD_DEVT(i))
+ device_create(&mtd_class, mtd->dev.parent,
+ MTD_DEVT(i) + 1,
+ NULL, "mtd%dro", i);
+
+ DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->add(mtd);
+
+ mutex_unlock(&mtd_table_mutex);
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
+ return 0;
+fail_added:
+ idr_remove(&mtd_idr, i);
+fail_locked:
mutex_unlock(&mtd_table_mutex);
return 1;
}
@@ -371,31 +381,34 @@ int add_mtd_device(struct mtd_info *mtd)
int del_mtd_device (struct mtd_info *mtd)
{
int ret;
+ struct mtd_notifier *not;
mutex_lock(&mtd_table_mutex);
- if (mtd_table[mtd->index] != mtd) {
+ if (idr_find(&mtd_idr, mtd->index) != mtd) {
ret = -ENODEV;
- } else if (mtd->usecount) {
+ goto out_error;
+ }
+
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->remove(mtd);
+
+ if (mtd->usecount) {
printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
mtd->index, mtd->name, mtd->usecount);
ret = -EBUSY;
} else {
- struct mtd_notifier *not;
-
device_unregister(&mtd->dev);
- /* No need to get a refcount on the module containing
- the notifier, since we hold the mtd_table_mutex */
- list_for_each_entry(not, &mtd_notifiers, list)
- not->remove(mtd);
-
- mtd_table[mtd->index] = NULL;
+ idr_remove(&mtd_idr, mtd->index);
module_put(THIS_MODULE);
ret = 0;
}
+out_error:
mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -411,7 +424,7 @@ int del_mtd_device (struct mtd_info *mtd)
void register_mtd_user (struct mtd_notifier *new)
{
- int i;
+ struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
@@ -419,9 +432,8 @@ void register_mtd_user (struct mtd_notifier *new)
__module_get(THIS_MODULE);
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i])
- new->add(mtd_table[i]);
+ mtd_for_each_device(mtd)
+ new->add(mtd);
mutex_unlock(&mtd_table_mutex);
}
@@ -438,15 +450,14 @@ void register_mtd_user (struct mtd_notifier *new)
int unregister_mtd_user (struct mtd_notifier *old)
{
- int i;
+ struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
module_put(THIS_MODULE);
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i])
- old->remove(mtd_table[i]);
+ mtd_for_each_device(mtd)
+ old->remove(mtd);
list_del(&old->list);
mutex_unlock(&mtd_table_mutex);
@@ -468,42 +479,56 @@ int unregister_mtd_user (struct mtd_notifier *old)
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
- struct mtd_info *ret = NULL;
- int i, err = -ENODEV;
+ struct mtd_info *ret = NULL, *other;
+ int err = -ENODEV;
mutex_lock(&mtd_table_mutex);
if (num == -1) {
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i] == mtd)
- ret = mtd_table[i];
- } else if (num >= 0 && num < MAX_MTD_DEVICES) {
- ret = mtd_table[num];
+ mtd_for_each_device(other) {
+ if (other == mtd) {
+ ret = mtd;
+ break;
+ }
+ }
+ } else if (num >= 0) {
+ ret = idr_find(&mtd_idr, num);
if (mtd && mtd != ret)
ret = NULL;
}
- if (!ret)
- goto out_unlock;
-
- if (!try_module_get(ret->owner))
- goto out_unlock;
-
- if (ret->get_device) {
- err = ret->get_device(ret);
- if (err)
- goto out_put;
+ if (!ret) {
+ ret = ERR_PTR(err);
+ goto out;
}
- ret->usecount++;
+ err = __get_mtd_device(ret);
+ if (err)
+ ret = ERR_PTR(err);
+out:
mutex_unlock(&mtd_table_mutex);
return ret;
+}
-out_put:
- module_put(ret->owner);
-out_unlock:
- mutex_unlock(&mtd_table_mutex);
- return ERR_PTR(err);
+
+int __get_mtd_device(struct mtd_info *mtd)
+{
+ int err;
+
+ if (!try_module_get(mtd->owner))
+ return -ENODEV;
+
+ if (mtd->get_device) {
+
+ err = mtd->get_device(mtd);
+
+ if (err) {
+ module_put(mtd->owner);
+ return err;
+ }
+ }
+ mtd->usecount++;
+ return 0;
}
/**
@@ -517,14 +542,14 @@ out_unlock:
struct mtd_info *get_mtd_device_nm(const char *name)
{
- int i, err = -ENODEV;
- struct mtd_info *mtd = NULL;
+ int err = -ENODEV;
+ struct mtd_info *mtd = NULL, *other;
mutex_lock(&mtd_table_mutex);
- for (i = 0; i < MAX_MTD_DEVICES; i++) {
- if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) {
- mtd = mtd_table[i];
+ mtd_for_each_device(other) {
+ if (!strcmp(name, other->name)) {
+ mtd = other;
break;
}
}
@@ -554,14 +579,19 @@ out_unlock:
void put_mtd_device(struct mtd_info *mtd)
{
- int c;
-
mutex_lock(&mtd_table_mutex);
- c = --mtd->usecount;
+ __put_mtd_device(mtd);
+ mutex_unlock(&mtd_table_mutex);
+
+}
+
+void __put_mtd_device(struct mtd_info *mtd)
+{
+ --mtd->usecount;
+ BUG_ON(mtd->usecount < 0);
+
if (mtd->put_device)
mtd->put_device(mtd);
- mutex_unlock(&mtd_table_mutex);
- BUG_ON(c < 0);
module_put(mtd->owner);
}
@@ -599,7 +629,9 @@ EXPORT_SYMBOL_GPL(add_mtd_device);
EXPORT_SYMBOL_GPL(del_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device_nm);
+EXPORT_SYMBOL_GPL(__get_mtd_device);
EXPORT_SYMBOL_GPL(put_mtd_device);
+EXPORT_SYMBOL_GPL(__put_mtd_device);
EXPORT_SYMBOL_GPL(register_mtd_user);
EXPORT_SYMBOL_GPL(unregister_mtd_user);
EXPORT_SYMBOL_GPL(default_mtd_writev);
@@ -611,14 +643,9 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
static struct proc_dir_entry *proc_mtd;
-static inline int mtd_proc_info (char *buf, int i)
+static inline int mtd_proc_info(char *buf, struct mtd_info *this)
{
- struct mtd_info *this = mtd_table[i];
-
- if (!this)
- return 0;
-
- return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
+ return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
(unsigned long long)this->size,
this->erasesize, this->name);
}
@@ -626,15 +653,15 @@ static inline int mtd_proc_info (char *buf, int i)
static int mtd_read_proc (char *page, char **start, off_t off, int count,
int *eof, void *data_unused)
{
- int len, l, i;
+ struct mtd_info *mtd;
+ int len, l;
off_t begin = 0;
mutex_lock(&mtd_table_mutex);
len = sprintf(page, "dev: size erasesize name\n");
- for (i=0; i< MAX_MTD_DEVICES; i++) {
-
- l = mtd_proc_info(page + len, i);
+ mtd_for_each_device(mtd) {
+ l = mtd_proc_info(page + len, mtd);
len += l;
if (len+begin > off+count)
goto done;
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index a33251f..6a64fde 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -8,4 +8,9 @@
should not use them for _anything_ else */
extern struct mutex mtd_table_mutex;
-extern struct mtd_info *mtd_table[MAX_MTD_DEVICES];
+extern struct mtd_info *__mtd_next_device(int i);
+
+#define mtd_for_each_device(mtd) \
+ for ((mtd) = __mtd_next_device(0); \
+ (mtd) != NULL; \
+ (mtd) = __mtd_next_device(mtd->index + 1))
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 92e12df..328313c 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -429,11 +429,6 @@ static int __init mtdoops_init(void)
mtd_index = simple_strtoul(mtddev, &endp, 0);
if (*endp == '\0')
cxt->mtd_index = mtd_index;
- if (cxt->mtd_index > MAX_MTD_DEVICES) {
- printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
- mtd_index);
- return -EINVAL;
- }
cxt->oops_buf = vmalloc(record_size);
if (!cxt->oops_buf) {
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 7c00319..bd9a443 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -152,18 +152,12 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
dev_name + 4);
- for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) {
- mtd = get_mtd_device(NULL, mtdnr);
- if (!IS_ERR(mtd)) {
- if (!strcmp(mtd->name, dev_name + 4))
- return get_sb_mtd_aux(
- fs_type, flags,
- dev_name, data, mtd,
- fill_super, mnt);
-
- put_mtd_device(mtd);
- }
- }
+ mtd = get_mtd_device_nm(dev_name + 4);
+ if (!IS_ERR(mtd))
+ return get_sb_mtd_aux(
+ fs_type, flags,
+ dev_name, data, mtd,
+ fill_super, mnt);
printk(KERN_NOTICE "MTD:"
" MTD device with name \"%s\" not found.\n",
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 42e5ea49..98a04b3 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -2,11 +2,23 @@ menuconfig MTD_NAND
tristate "NAND Device Support"
depends on MTD
select MTD_NAND_IDS
+ select MTD_NAND_ECC
help
This enables support for accessing all type of NAND flash
devices. For further information see
<http://www.linux-mtd.infradead.org/doc/nand.html>.
+config MTD_NAND_ECC
+ tristate
+
+config MTD_NAND_ECC_SMC
+ bool "NAND ECC Smart Media byte order"
+ depends on MTD_NAND_ECC
+ default n
+ help
+ Software ECC according to the Smart Media Specification.
+ The original Linux implementation had byte 0 and 1 swapped.
+
if MTD_NAND
config MTD_NAND_VERIFY_WRITE
@@ -18,12 +30,9 @@ config MTD_NAND_VERIFY_WRITE
device thinks the write was successful, a bit could have been
flipped accidentally due to device wear or something else.
-config MTD_NAND_ECC_SMC
- bool "NAND ECC Smart Media byte order"
+config MTD_SM_COMMON
+ tristate
default n
- help
- Software ECC according to the Smart Media Specification.
- The original Linux implementation had byte 0 and 1 swapped.
config MTD_NAND_MUSEUM_IDS
bool "Enable chip ids for obsolete ancient NAND devices"
@@ -41,6 +50,23 @@ config MTD_NAND_AUTCPU12
This enables the driver for the autronix autcpu12 board to
access the SmartMediaCard.
+config MTD_NAND_DENALI
+ depends on PCI
+ tristate "Support Denali NAND controller on Intel Moorestown"
+ help
+ Enable the driver for NAND flash on Intel Moorestown, using the
+ Denali NAND controller core.
+
+config MTD_NAND_DENALI_SCRATCH_REG_ADDR
+ hex "Denali NAND size scratch register address"
+ default "0xFF108018"
+ help
+ Some platforms place the NAND chip size in a scratch register
+ because (some versions of) the driver aren't able to automatically
+ determine the size of certain chips. Set the address of the
+ scratch register here to enable this feature. On Intel Moorestown
+ boards, the scratch register is at 0xFF108018.
+
config MTD_NAND_EDB7312
tristate "Support for Cirrus Logic EBD7312 evaluation board"
depends on ARCH_EDB7312
@@ -95,15 +121,21 @@ config MTD_NAND_OMAP_PREFETCH_DMA
or in DMA interrupt mode.
Say y for DMA mode or MPU mode will be used
-config MTD_NAND_TS7250
- tristate "NAND Flash device on TS-7250 board"
- depends on MACH_TS72XX
- help
- Support for NAND flash on Technologic Systems TS-7250 platform.
-
config MTD_NAND_IDS
tristate
+config MTD_NAND_RICOH
+ tristate "Ricoh xD card reader"
+ default n
+ depends on PCI
+ select MTD_SM_COMMON
+ help
+ Enable support for Ricoh R5C852 xD card reader
+ You also need to enable ether
+ NAND SSFDC (SmartMedia) read only translation layer' or new
+ expermental, readwrite
+ 'SmartMedia/xD new translation layer'
+
config MTD_NAND_AU1550
tristate "Au1550/1200 NAND support"
depends on SOC_AU1200 || SOC_AU1550
@@ -358,8 +390,6 @@ config MTD_NAND_ATMEL_ECC_NONE
If unsure, say N
- endchoice
-
endchoice
config MTD_NAND_PXA3xx
@@ -442,6 +472,13 @@ config MTD_NAND_FSL_UPM
Enables support for NAND Flash chips wired onto Freescale PowerPC
processor localbus with User-Programmable Machine support.
+config MTD_NAND_MPC5121_NFC
+ tristate "MPC5121 built-in NAND Flash Controller support"
+ depends on PPC_MPC512x
+ help
+ This enables the driver for the NAND flash controller on the
+ MPC5121 SoC.
+
config MTD_NAND_MXC
tristate "MXC NAND support"
depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3
@@ -481,11 +518,11 @@ config MTD_NAND_SOCRATES
help
Enables support for NAND Flash chips wired onto Socrates board.
-config MTD_NAND_W90P910
- tristate "Support for NAND on w90p910 evaluation board."
+config MTD_NAND_NUC900
+ tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
depends on ARCH_W90X900 && MTD_PARTITIONS
help
This enables the driver for the NAND Flash on evaluation board based
- on w90p910.
+ on w90p910 / NUC9xx.
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 1407bd1..e8ab884 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -2,13 +2,16 @@
# linux/drivers/nand/Makefile
#
-obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
+obj-$(CONFIG_MTD_NAND) += nand.o
+obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
+obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
+obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
@@ -19,7 +22,6 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
-obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
@@ -39,8 +41,10 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
-obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
+obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
+obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
+obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 2d67732..8691e04 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -49,7 +49,7 @@
#define TIMEOUT HZ
-static struct usb_device_id alauda_table [] = {
+static const struct usb_device_id alauda_table[] = {
{ USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
{ USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
{ }
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 524e6c9..04d30887c 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -474,7 +474,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
}
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto err_scan_ident;
}
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 43d46e4..3ffe05d 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -451,7 +451,7 @@ static int __init au1xxx_nand_init(void)
u32 nand_phys;
/* Allocate memory for MTD device structure and private data */
- au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!au1550_mtd) {
printk("Unable to allocate NAND MTD dev structure.\n");
return -ENOMEM;
@@ -460,10 +460,6 @@ static int __init au1xxx_nand_init(void)
/* Get pointer to private data */
this = (struct nand_chip *)(&au1550_mtd[1]);
- /* Initialize structures */
- memset(au1550_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
/* Link the private data with the MTD structure */
au1550_mtd->priv = this;
au1550_mtd->owner = THIS_MODULE;
@@ -544,7 +540,7 @@ static int __init au1xxx_nand_init(void)
}
nand_phys = (mem_staddr << 4) & 0xFFFC0000;
- p_nand = (void __iomem *)ioremap(nand_phys, 0x1000);
+ p_nand = ioremap(nand_phys, 0x1000);
/* make controller and MTD agree */
if (NAND_CS == 0)
@@ -589,7 +585,7 @@ static int __init au1xxx_nand_init(void)
return 0;
outio:
- iounmap((void *)p_nand);
+ iounmap(p_nand);
outmem:
kfree(au1550_mtd);
@@ -610,7 +606,7 @@ static void __exit au1550_cleanup(void)
kfree(au1550_mtd);
/* Unmap */
- iounmap((void *)p_nand);
+ iounmap(p_nand);
}
module_exit(au1550_cleanup);
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index c997f98..dfe262c 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -13,7 +13,6 @@
*****************************************************************************/
/* ---- Include Files ---------------------------------------------------- */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -447,7 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
* layout we'll be using.
*/
- err = nand_scan_ident(board_mtd, 1);
+ err = nand_scan_ident(board_mtd, 1, NULL);
if (err) {
printk(KERN_ERR "nand_scan failed: %d\n", err);
iounmap(bcm_umi_io_base);
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 8506e7e..2974995 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -68,6 +68,27 @@
#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>"
#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver"
+/* NFC_STAT Masks */
+#define NBUSY 0x01 /* Not Busy */
+#define WB_FULL 0x02 /* Write Buffer Full */
+#define PG_WR_STAT 0x04 /* Page Write Pending */
+#define PG_RD_STAT 0x08 /* Page Read Pending */
+#define WB_EMPTY 0x10 /* Write Buffer Empty */
+
+/* NFC_IRQSTAT Masks */
+#define NBUSYIRQ 0x01 /* Not Busy IRQ */
+#define WB_OVF 0x02 /* Write Buffer Overflow */
+#define WB_EDGE 0x04 /* Write Buffer Edge Detect */
+#define RD_RDY 0x08 /* Read Data Ready */
+#define WR_DONE 0x10 /* Page Write Done */
+
+/* NFC_RST Masks */
+#define ECC_RST 0x01 /* ECC (and NFC counters) Reset */
+
+/* NFC_PGCTL Masks */
+#define PG_RD_START 0x01 /* Page Read Start */
+#define PG_WR_START 0x02 /* Page Write Start */
+
#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
static int hardware_ecc = 1;
#else
@@ -487,7 +508,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
* transferred to generate the correct ECC register
* values.
*/
- bfin_write_NFC_RST(0x1);
+ bfin_write_NFC_RST(ECC_RST);
SSYNC();
disable_dma(CH_NFC);
@@ -497,7 +518,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
set_dma_config(CH_NFC, 0x0);
set_dma_start_addr(CH_NFC, (unsigned long) buf);
-/* The DMAs have different size on BF52x and BF54x */
+ /* The DMAs have different size on BF52x and BF54x */
#ifdef CONFIG_BF52x
set_dma_x_count(CH_NFC, (page_size >> 1));
set_dma_x_modify(CH_NFC, 2);
@@ -517,9 +538,9 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
/* Start PAGE read/write operation */
if (is_read)
- bfin_write_NFC_PGCTL(0x1);
+ bfin_write_NFC_PGCTL(PG_RD_START);
else
- bfin_write_NFC_PGCTL(0x2);
+ bfin_write_NFC_PGCTL(PG_WR_START);
wait_for_completion(&info->dma_completion);
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e5a9f9c..db1dfc5 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -762,7 +762,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
/* Scan to find existence of the device */
- if (nand_scan_ident(mtd, 2)) {
+ if (nand_scan_ident(mtd, 2, NULL)) {
err = -ENXIO;
goto out_irq;
}
@@ -849,7 +849,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
kfree(mtd);
}
-static struct pci_device_id cafe_nand_tbl[] = {
+static const struct pci_device_id cafe_nand_tbl[] = {
{ PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
PCI_ANY_ID, PCI_ANY_ID },
{ }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 76e2dc8..9c9d893 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -567,8 +567,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_nomem;
}
- vaddr = ioremap(res1->start, res1->end - res1->start);
- base = ioremap(res2->start, res2->end - res2->start);
+ vaddr = ioremap(res1->start, resource_size(res1));
+ base = ioremap(res2->start, resource_size(res2));
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -EINVAL;
@@ -691,7 +691,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
new file mode 100644
index 0000000..ca03428
--- /dev/null
+++ b/drivers/mtd/nand/denali.c
@@ -0,0 +1,2134 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+
+#include "denali.h"
+
+MODULE_LICENSE("GPL");
+
+/* We define a module parameter that allows the user to override
+ * the hardware and decide what timing mode should be used.
+ */
+#define NAND_DEFAULT_TIMINGS -1
+
+static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
+module_param(onfi_timing_mode, int, S_IRUGO);
+MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates"
+ " use default timings");
+
+#define DENALI_NAND_NAME "denali-nand"
+
+/* We define a macro here that combines all interrupts this driver uses into
+ * a single constant value, for convenience. */
+#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
+ INTR_STATUS0__ECC_TRANSACTION_DONE | \
+ INTR_STATUS0__ECC_ERR | \
+ INTR_STATUS0__PROGRAM_FAIL | \
+ INTR_STATUS0__LOAD_COMP | \
+ INTR_STATUS0__PROGRAM_COMP | \
+ INTR_STATUS0__TIME_OUT | \
+ INTR_STATUS0__ERASE_FAIL | \
+ INTR_STATUS0__RST_COMP | \
+ INTR_STATUS0__ERASE_COMP)
+
+/* indicates whether or not the internal value for the flash bank is
+ valid or not */
+#define CHIP_SELECT_INVALID -1
+
+#define SUPPORT_8BITECC 1
+
+/* This macro divides two integers and rounds fractional values up
+ * to the nearest integer value. */
+#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
+
+/* this macro allows us to convert from an MTD structure to our own
+ * device context (denali) structure.
+ */
+#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
+
+/* These constants are defined by the driver to enable common driver
+ configuration options. */
+#define SPARE_ACCESS 0x41
+#define MAIN_ACCESS 0x42
+#define MAIN_SPARE_ACCESS 0x43
+
+#define DENALI_READ 0
+#define DENALI_WRITE 0x100
+
+/* types of device accesses. We can issue commands and get status */
+#define COMMAND_CYCLE 0
+#define ADDR_CYCLE 1
+#define STATUS_CYCLE 2
+
+/* this is a helper macro that allows us to
+ * format the bank into the proper bits for the controller */
+#define BANK(x) ((x) << 24)
+
+/* List of platforms this NAND controller has be integrated into */
+static const struct pci_device_id denali_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+
+
+/* these are static lookup tables that give us easy access to
+ registers in the NAND controller.
+ */
+static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1,
+ INTR_STATUS2,
+ INTR_STATUS3};
+
+static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
+ DEVICE_RESET__BANK1,
+ DEVICE_RESET__BANK2,
+ DEVICE_RESET__BANK3};
+
+static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
+ INTR_STATUS1__TIME_OUT,
+ INTR_STATUS2__TIME_OUT,
+ INTR_STATUS3__TIME_OUT};
+
+static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
+ INTR_STATUS1__RST_COMP,
+ INTR_STATUS2__RST_COMP,
+ INTR_STATUS3__RST_COMP};
+
+/* specifies the debug level of the driver */
+static int nand_debug_level = 0;
+
+/* forward declarations */
+static void clear_interrupts(struct denali_nand_info *denali);
+static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask);
+static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask);
+static uint32_t read_interrupt_status(struct denali_nand_info *denali);
+
+#define DEBUG_DENALI 0
+
+/* This is a wrapper for writing to the denali registers.
+ * this allows us to create debug information so we can
+ * observe how the driver is programming the device.
+ * it uses standard linux convention for (val, addr) */
+static void denali_write32(uint32_t value, void *addr)
+{
+ iowrite32(value, addr);
+
+#if DEBUG_DENALI
+ printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff));
+#endif
+}
+
+/* Certain operations for the denali NAND controller use an indexed mode to read/write
+ data. The operation is performed by writing the address value of the command to
+ the device memory followed by the data. This function abstracts this common
+ operation.
+*/
+static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data)
+{
+ denali_write32(address, denali->flash_mem);
+ denali_write32(data, denali->flash_mem + 0x10);
+}
+
+/* Perform an indexed read of the device */
+static void index_addr_read_data(struct denali_nand_info *denali,
+ uint32_t address, uint32_t *pdata)
+{
+ denali_write32(address, denali->flash_mem);
+ *pdata = ioread32(denali->flash_mem + 0x10);
+}
+
+/* We need to buffer some data for some of the NAND core routines.
+ * The operations manage buffering that data. */
+static void reset_buf(struct denali_nand_info *denali)
+{
+ denali->buf.head = denali->buf.tail = 0;
+}
+
+static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
+{
+ BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
+ denali->buf.buf[denali->buf.tail++] = byte;
+}
+
+/* reads the status of the device */
+static void read_status(struct denali_nand_info *denali)
+{
+ uint32_t cmd = 0x0;
+
+ /* initialize the data buffer to store status */
+ reset_buf(denali);
+
+ /* initiate a device status read */
+ cmd = MODE_11 | BANK(denali->flash_bank);
+ index_addr(denali, cmd | COMMAND_CYCLE, 0x70);
+ denali_write32(cmd | STATUS_CYCLE, denali->flash_mem);
+
+ /* update buffer with status value */
+ write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10));
+
+#if DEBUG_DENALI
+ printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]);
+#endif
+}
+
+/* resets a specific device connected to the core */
+static void reset_bank(struct denali_nand_info *denali)
+{
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = reset_complete[denali->flash_bank] |
+ operation_timeout[denali->flash_bank];
+ int bank = 0;
+
+ clear_interrupts(denali);
+
+ bank = device_reset_banks[denali->flash_bank];
+ denali_write32(bank, denali->flash_reg + DEVICE_RESET);
+
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status & operation_timeout[denali->flash_bank])
+ {
+ printk(KERN_ERR "reset bank failed.\n");
+ }
+}
+
+/* Reset the flash controller */
+static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali)
+{
+ uint32_t i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
+ denali_write32(reset_complete[i] | operation_timeout[i],
+ denali->flash_reg + intr_status_addresses[i]);
+
+ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
+ denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET);
+ while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) &
+ (reset_complete[i] | operation_timeout[i])))
+ ;
+ if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
+ operation_timeout[i])
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Reset operation timed out on bank %d\n", i);
+ }
+
+ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
+ denali_write32(reset_complete[i] | operation_timeout[i],
+ denali->flash_reg + intr_status_addresses[i]);
+
+ return PASS;
+}
+
+/* this routine calculates the ONFI timing values for a given mode and programs
+ * the clocking register accordingly. The mode is determined by the get_onfi_nand_para
+ routine.
+ */
+static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode)
+{
+ uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
+ uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
+ uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
+ uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
+ uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
+ uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
+ uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
+ uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
+ uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
+ uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
+ uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
+ uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
+
+ uint16_t TclsRising = 1;
+ uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
+ uint16_t dv_window = 0;
+ uint16_t en_lo, en_hi;
+ uint16_t acc_clks;
+ uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ en_lo = CEIL_DIV(Trp[mode], CLK_X);
+ en_hi = CEIL_DIV(Treh[mode], CLK_X);
+#if ONFI_BLOOM_TIME
+ if ((en_hi * CLK_X) < (Treh[mode] + 2))
+ en_hi++;
+#endif
+
+ if ((en_lo + en_hi) * CLK_X < Trc[mode])
+ en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
+
+ if ((en_lo + en_hi) < CLK_MULTI)
+ en_lo += CLK_MULTI - en_lo - en_hi;
+
+ while (dv_window < 8) {
+ data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
+
+ data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
+
+ data_invalid =
+ data_invalid_rhoh <
+ data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
+
+ dv_window = data_invalid - Trea[mode];
+
+ if (dv_window < 8)
+ en_lo++;
+ }
+
+ acc_clks = CEIL_DIV(Trea[mode], CLK_X);
+
+ while (((acc_clks * CLK_X) - Trea[mode]) < 3)
+ acc_clks++;
+
+ if ((data_invalid - acc_clks * CLK_X) < 2)
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
+ __FILE__, __LINE__);
+
+ addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
+ re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
+ re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
+ we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
+ cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
+ if (!TclsRising)
+ cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
+ if (cs_cnt == 0)
+ cs_cnt = 1;
+
+ if (Tcea[mode]) {
+ while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
+ cs_cnt++;
+ }
+
+#if MODE5_WORKAROUND
+ if (mode == 5)
+ acc_clks = 5;
+#endif
+
+ /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
+ if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) &&
+ (ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
+ acc_clks = 6;
+
+ denali_write32(acc_clks, denali->flash_reg + ACC_CLKS);
+ denali_write32(re_2_we, denali->flash_reg + RE_2_WE);
+ denali_write32(re_2_re, denali->flash_reg + RE_2_RE);
+ denali_write32(we_2_re, denali->flash_reg + WE_2_RE);
+ denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
+ denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
+ denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
+ denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
+}
+
+/* configures the initial ECC settings for the controller */
+static void set_ecc_config(struct denali_nand_info *denali)
+{
+#if SUPPORT_8BITECC
+ if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
+ (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128))
+ denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+
+ if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
+ == 1) {
+ denali->dev_info.wECCBytesPerSector = 4;
+ denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
+ denali->dev_info.wNumPageSpareFlag =
+ denali->dev_info.wPageSpareSize -
+ denali->dev_info.wPageDataSize /
+ (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
+ denali->dev_info.wECCBytesPerSector
+ - denali->dev_info.wSpareSkipBytes;
+ } else {
+ denali->dev_info.wECCBytesPerSector =
+ (ioread32(denali->flash_reg + ECC_CORRECTION) &
+ ECC_CORRECTION__VALUE) * 13 / 8;
+ if ((denali->dev_info.wECCBytesPerSector) % 2 == 0)
+ denali->dev_info.wECCBytesPerSector += 2;
+ else
+ denali->dev_info.wECCBytesPerSector += 1;
+
+ denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
+ denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize -
+ denali->dev_info.wPageDataSize /
+ (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
+ denali->dev_info.wECCBytesPerSector
+ - denali->dev_info.wSpareSkipBytes;
+ }
+}
+
+/* queries the NAND device to see what ONFI modes it supports. */
+static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
+{
+ int i;
+ uint16_t blks_lun_l, blks_lun_h, n_of_luns;
+ uint32_t blockperlun, id;
+
+ denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET);
+
+ while (!((ioread32(denali->flash_reg + INTR_STATUS0) &
+ INTR_STATUS0__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS0) &
+ INTR_STATUS0__TIME_OUT)))
+ ;
+
+ if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
+ denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET);
+ while (!((ioread32(denali->flash_reg + INTR_STATUS1) &
+ INTR_STATUS1__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS1) &
+ INTR_STATUS1__TIME_OUT)))
+ ;
+
+ if (ioread32(denali->flash_reg + INTR_STATUS1) &
+ INTR_STATUS1__RST_COMP) {
+ denali_write32(DEVICE_RESET__BANK2,
+ denali->flash_reg + DEVICE_RESET);
+ while (!((ioread32(denali->flash_reg + INTR_STATUS2) &
+ INTR_STATUS2__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS2) &
+ INTR_STATUS2__TIME_OUT)))
+ ;
+
+ if (ioread32(denali->flash_reg + INTR_STATUS2) &
+ INTR_STATUS2__RST_COMP) {
+ denali_write32(DEVICE_RESET__BANK3,
+ denali->flash_reg + DEVICE_RESET);
+ while (!((ioread32(denali->flash_reg + INTR_STATUS3) &
+ INTR_STATUS3__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS3) &
+ INTR_STATUS3__TIME_OUT)))
+ ;
+ } else {
+ printk(KERN_ERR "Getting a time out for bank 2!\n");
+ }
+ } else {
+ printk(KERN_ERR "Getting a time out for bank 1!\n");
+ }
+ }
+
+ denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0);
+ denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1);
+ denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2);
+ denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3);
+
+ denali->dev_info.wONFIDevFeatures =
+ ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES);
+ denali->dev_info.wONFIOptCommands =
+ ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS);
+ denali->dev_info.wONFITimingMode =
+ ioread32(denali->flash_reg + ONFI_TIMING_MODE);
+ denali->dev_info.wONFIPgmCacheTimingMode =
+ ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE);
+
+ n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
+ ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
+ blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
+ blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
+
+ blockperlun = (blks_lun_h << 16) | blks_lun_l;
+
+ denali->dev_info.wTotalBlocks = n_of_luns * blockperlun;
+
+ if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
+ ONFI_TIMING_MODE__VALUE))
+ return FAIL;
+
+ for (i = 5; i > 0; i--) {
+ if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i))
+ break;
+ }
+
+ NAND_ONFi_Timing_Mode(denali, i);
+
+ index_addr(denali, MODE_11 | 0, 0x90);
+ index_addr(denali, MODE_11 | 1, 0);
+
+ for (i = 0; i < 3; i++)
+ index_addr_read_data(denali, MODE_11 | 2, &id);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
+
+ denali->dev_info.MLCDevice = id & 0x0C;
+
+ /* By now, all the ONFI devices we know support the page cache */
+ /* rw feature. So here we enable the pipeline_rw_ahead feature */
+ /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
+ /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
+
+ return PASS;
+}
+
+static void get_samsung_nand_para(struct denali_nand_info *denali)
+{
+ uint8_t no_of_planes;
+ uint32_t blk_size;
+ uint64_t plane_size, capacity;
+ uint32_t id_bytes[5];
+ int i;
+
+ index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90);
+ index_addr(denali, (uint32_t)(MODE_11 | 1), 0);
+ for (i = 0; i < 5; i++)
+ index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
+ id_bytes[0], id_bytes[1], id_bytes[2],
+ id_bytes[3], id_bytes[4]);
+
+ if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
+ /* Set timing register values according to datasheet */
+ denali_write32(5, denali->flash_reg + ACC_CLKS);
+ denali_write32(20, denali->flash_reg + RE_2_WE);
+ denali_write32(12, denali->flash_reg + WE_2_RE);
+ denali_write32(14, denali->flash_reg + ADDR_2_DATA);
+ denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT);
+ denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT);
+ denali_write32(2, denali->flash_reg + CS_SETUP_CNT);
+ }
+
+ no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
+ plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4);
+ blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4);
+ capacity = (uint64_t)128 * plane_size * no_of_planes;
+
+ do_div(capacity, blk_size);
+ denali->dev_info.wTotalBlocks = capacity;
+}
+
+static void get_toshiba_nand_para(struct denali_nand_info *denali)
+{
+ void __iomem *scratch_reg;
+ uint32_t tmp;
+
+ /* Workaround to fix a controller bug which reports a wrong */
+ /* spare area size for some kind of Toshiba NAND device */
+ if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
+ (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
+ denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
+ ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+#if SUPPORT_15BITECC
+ denali_write32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+ }
+
+ /* As Toshiba NAND can not provide it's block number, */
+ /* so here we need user to provide the correct block */
+ /* number in a scratch register before the Linux NAND */
+ /* driver is loaded. If no valid value found in the scratch */
+ /* register, then we use default block number value */
+ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
+ if (!scratch_reg) {
+ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
+ __FILE__, __LINE__);
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
+ denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
+ if (denali->dev_info.wTotalBlocks < 512)
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ iounmap(scratch_reg);
+ }
+}
+
+static void get_hynix_nand_para(struct denali_nand_info *denali)
+{
+ void __iomem *scratch_reg;
+ uint32_t main_size, spare_size;
+
+ switch (denali->dev_info.wDeviceID) {
+ case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
+ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
+ denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK);
+ denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
+ denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
+ denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+ denali_write32(0, denali->flash_reg + DEVICE_WIDTH);
+#if SUPPORT_15BITECC
+ denali_write32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+ denali->dev_info.MLCDevice = 1;
+ break;
+ default:
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
+ "Will use default parameter values instead.\n",
+ denali->dev_info.wDeviceID);
+ }
+
+ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
+ if (!scratch_reg) {
+ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
+ __FILE__, __LINE__);
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
+ denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
+ if (denali->dev_info.wTotalBlocks < 512)
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ iounmap(scratch_reg);
+ }
+}
+
+/* determines how many NAND chips are connected to the controller. Note for
+ Intel CE4100 devices we don't support more than one device.
+ */
+static void find_valid_banks(struct denali_nand_info *denali)
+{
+ uint32_t id[LLD_MAX_FLASH_BANKS];
+ int i;
+
+ denali->total_used_banks = 1;
+ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
+ index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
+ index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
+ index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Return 1st ID for bank[%d]: %x\n", i, id[i]);
+
+ if (i == 0) {
+ if (!(id[i] & 0x0ff))
+ break; /* WTF? */
+ } else {
+ if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
+ denali->total_used_banks++;
+ else
+ break;
+ }
+ }
+
+ if (denali->platform == INTEL_CE4100)
+ {
+ /* Platform limitations of the CE4100 device limit
+ * users to a single chip solution for NAND.
+ * Multichip support is not enabled.
+ */
+ if (denali->total_used_banks != 1)
+ {
+ printk(KERN_ERR "Sorry, Intel CE4100 only supports "
+ "a single NAND device.\n");
+ BUG();
+ }
+ }
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "denali->total_used_banks: %d\n", denali->total_used_banks);
+}
+
+static void detect_partition_feature(struct denali_nand_info *denali)
+{
+ if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
+ if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
+ PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
+ denali->dev_info.wSpectraStartBlock =
+ ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
+ MIN_MAX_BANK_1__MIN_VALUE) *
+ denali->dev_info.wTotalBlocks)
+ +
+ (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
+ MIN_BLK_ADDR_1__VALUE);
+
+ denali->dev_info.wSpectraEndBlock =
+ (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
+ MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
+ denali->dev_info.wTotalBlocks)
+ +
+ (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) &
+ MAX_BLK_ADDR_1__VALUE);
+
+ denali->dev_info.wTotalBlocks *= denali->total_used_banks;
+
+ if (denali->dev_info.wSpectraEndBlock >=
+ denali->dev_info.wTotalBlocks) {
+ denali->dev_info.wSpectraEndBlock =
+ denali->dev_info.wTotalBlocks - 1;
+ }
+
+ denali->dev_info.wDataBlockNum =
+ denali->dev_info.wSpectraEndBlock -
+ denali->dev_info.wSpectraStartBlock + 1;
+ } else {
+ denali->dev_info.wTotalBlocks *= denali->total_used_banks;
+ denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
+ denali->dev_info.wSpectraEndBlock =
+ denali->dev_info.wTotalBlocks - 1;
+ denali->dev_info.wDataBlockNum =
+ denali->dev_info.wSpectraEndBlock -
+ denali->dev_info.wSpectraStartBlock + 1;
+ }
+ } else {
+ denali->dev_info.wTotalBlocks *= denali->total_used_banks;
+ denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
+ denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1;
+ denali->dev_info.wDataBlockNum =
+ denali->dev_info.wSpectraEndBlock -
+ denali->dev_info.wSpectraStartBlock + 1;
+ }
+}
+
+static void dump_device_info(struct denali_nand_info *denali)
+{
+ nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n");
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
+ denali->dev_info.wDeviceMaker);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
+ denali->dev_info.wDeviceID);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
+ denali->dev_info.wDeviceType);
+ nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
+ denali->dev_info.wSpectraStartBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
+ denali->dev_info.wSpectraEndBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
+ denali->dev_info.wTotalBlocks);
+ nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
+ denali->dev_info.wPagesPerBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
+ denali->dev_info.wPageSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
+ denali->dev_info.wPageDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
+ denali->dev_info.wPageSpareSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
+ denali->dev_info.wNumPageSpareFlag);
+ nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
+ denali->dev_info.wECCBytesPerSector);
+ nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
+ denali->dev_info.wBlockSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
+ denali->dev_info.wBlockDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
+ denali->dev_info.wDataBlockNum);
+ nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
+ denali->dev_info.bPlaneNum);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
+ denali->dev_info.wDeviceMainAreaSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
+ denali->dev_info.wDeviceSpareAreaSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
+ denali->dev_info.wDevicesConnected);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
+ denali->dev_info.wDeviceWidth);
+ nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
+ denali->dev_info.wHWRevision);
+ nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
+ denali->dev_info.wHWFeatures);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
+ denali->dev_info.wONFIDevFeatures);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
+ denali->dev_info.wONFIOptCommands);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
+ denali->dev_info.wONFITimingMode);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
+ denali->dev_info.wONFIPgmCacheTimingMode);
+ nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
+ denali->dev_info.MLCDevice ? "Yes" : "No");
+ nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
+ denali->dev_info.wSpareSkipBytes);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
+ denali->dev_info.nBitsInPageNumber);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
+ denali->dev_info.nBitsInPageDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
+ denali->dev_info.nBitsInBlockDataSize);
+}
+
+static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali)
+{
+ uint16_t status = PASS;
+ uint8_t no_of_planes;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID);
+ denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID);
+ denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0);
+ denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1);
+ denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2);
+
+ denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c;
+
+ if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
+ ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
+ if (FAIL == get_onfi_nand_para(denali))
+ return FAIL;
+ } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */
+ get_samsung_nand_para(denali);
+ } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */
+ get_toshiba_nand_para(denali);
+ } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */
+ get_hynix_nand_para(denali);
+ } else {
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(denali->flash_reg + ACC_CLKS),
+ ioread32(denali->flash_reg + RE_2_WE),
+ ioread32(denali->flash_reg + WE_2_RE),
+ ioread32(denali->flash_reg + ADDR_2_DATA),
+ ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
+ ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
+ ioread32(denali->flash_reg + CS_SETUP_CNT));
+
+ denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION);
+ denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES);
+
+ denali->dev_info.wDeviceMainAreaSize =
+ ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
+ denali->dev_info.wDeviceSpareAreaSize =
+ ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+
+ denali->dev_info.wPageDataSize =
+ ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
+
+ /* Note: When using the Micon 4K NAND device, the controller will report
+ * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
+ * And if force set it to 218 bytes, the controller can not work
+ * correctly. So just let it be. But keep in mind that this bug may
+ * cause
+ * other problems in future. - Yunpeng 2008-10-10
+ */
+ denali->dev_info.wPageSpareSize =
+ ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+
+ denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK);
+
+ denali->dev_info.wPageSize =
+ denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize;
+ denali->dev_info.wBlockSize =
+ denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock;
+ denali->dev_info.wBlockDataSize =
+ denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize;
+
+ denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH);
+ denali->dev_info.wDeviceType =
+ ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8);
+
+ denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED);
+
+ denali->dev_info.wSpareSkipBytes =
+ ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) *
+ denali->dev_info.wDevicesConnected;
+
+ denali->dev_info.nBitsInPageNumber =
+ ilog2(denali->dev_info.wPagesPerBlock);
+ denali->dev_info.nBitsInPageDataSize =
+ ilog2(denali->dev_info.wPageDataSize);
+ denali->dev_info.nBitsInBlockDataSize =
+ ilog2(denali->dev_info.wBlockDataSize);
+
+ set_ecc_config(denali);
+
+ no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) &
+ NUMBER_OF_PLANES__VALUE;
+
+ switch (no_of_planes) {
+ case 0:
+ case 1:
+ case 3:
+ case 7:
+ denali->dev_info.bPlaneNum = no_of_planes + 1;
+ break;
+ default:
+ status = FAIL;
+ break;
+ }
+
+ find_valid_banks(denali);
+
+ detect_partition_feature(denali);
+
+ dump_device_info(denali);
+
+ /* If the user specified to override the default timings
+ * with a specific ONFI mode, we apply those changes here.
+ */
+ if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
+ {
+ NAND_ONFi_Timing_Mode(denali, onfi_timing_mode);
+ }
+
+ return status;
+}
+
+static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali,
+ uint16_t INT_ENABLE)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (INT_ENABLE)
+ denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
+ else
+ denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
+}
+
+/* validation function to verify that the controlling software is making
+ a valid request
+ */
+static inline bool is_flash_bank_valid(int flash_bank)
+{
+ return (flash_bank >= 0 && flash_bank < 4);
+}
+
+static void denali_irq_init(struct denali_nand_info *denali)
+{
+ uint32_t int_mask = 0;
+
+ /* Disable global interrupts */
+ NAND_LLD_Enable_Disable_Interrupts(denali, false);
+
+ int_mask = DENALI_IRQ_ALL;
+
+ /* Clear all status bits */
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0);
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1);
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2);
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3);
+
+ denali_irq_enable(denali, int_mask);
+}
+
+static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
+{
+ NAND_LLD_Enable_Disable_Interrupts(denali, false);
+ free_irq(irqnum, denali);
+}
+
+static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask)
+{
+ denali_write32(int_mask, denali->flash_reg + INTR_EN0);
+ denali_write32(int_mask, denali->flash_reg + INTR_EN1);
+ denali_write32(int_mask, denali->flash_reg + INTR_EN2);
+ denali_write32(int_mask, denali->flash_reg + INTR_EN3);
+}
+
+/* This function only returns when an interrupt that this driver cares about
+ * occurs. This is to reduce the overhead of servicing interrupts
+ */
+static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
+{
+ return (read_interrupt_status(denali) & DENALI_IRQ_ALL);
+}
+
+/* Interrupts are cleared by writing a 1 to the appropriate status bit */
+static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask)
+{
+ uint32_t intr_status_reg = 0;
+
+ intr_status_reg = intr_status_addresses[denali->flash_bank];
+
+ denali_write32(irq_mask, denali->flash_reg + intr_status_reg);
+}
+
+static void clear_interrupts(struct denali_nand_info *denali)
+{
+ uint32_t status = 0x0;
+ spin_lock_irq(&denali->irq_lock);
+
+ status = read_interrupt_status(denali);
+
+#if DEBUG_DENALI
+ denali->irq_debug_array[denali->idx++] = 0x30000000 | status;
+ denali->idx %= 32;
+#endif
+
+ denali->irq_status = 0x0;
+ spin_unlock_irq(&denali->irq_lock);
+}
+
+static uint32_t read_interrupt_status(struct denali_nand_info *denali)
+{
+ uint32_t intr_status_reg = 0;
+
+ intr_status_reg = intr_status_addresses[denali->flash_bank];
+
+ return ioread32(denali->flash_reg + intr_status_reg);
+}
+
+#if DEBUG_DENALI
+static void print_irq_log(struct denali_nand_info *denali)
+{
+ int i = 0;
+
+ printk("ISR debug log index = %X\n", denali->idx);
+ for (i = 0; i < 32; i++)
+ {
+ printk("%08X: %08X\n", i, denali->irq_debug_array[i]);
+ }
+}
+#endif
+
+/* This is the interrupt service routine. It handles all interrupts
+ * sent to this device. Note that on CE4100, this is a shared
+ * interrupt.
+ */
+static irqreturn_t denali_isr(int irq, void *dev_id)
+{
+ struct denali_nand_info *denali = dev_id;
+ uint32_t irq_status = 0x0;
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&denali->irq_lock);
+
+ /* check to see if a valid NAND chip has
+ * been selected.
+ */
+ if (is_flash_bank_valid(denali->flash_bank))
+ {
+ /* check to see if controller generated
+ * the interrupt, since this is a shared interrupt */
+ if ((irq_status = denali_irq_detected(denali)) != 0)
+ {
+#if DEBUG_DENALI
+ denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status;
+ denali->idx %= 32;
+
+ printk("IRQ status = 0x%04x\n", irq_status);
+#endif
+ /* handle interrupt */
+ /* first acknowledge it */
+ clear_interrupt(denali, irq_status);
+ /* store the status in the device context for someone
+ to read */
+ denali->irq_status |= irq_status;
+ /* notify anyone who cares that it happened */
+ complete(&denali->complete);
+ /* tell the OS that we've handled this */
+ result = IRQ_HANDLED;
+ }
+ }
+ spin_unlock(&denali->irq_lock);
+ return result;
+}
+#define BANK(x) ((x) << 24)
+
+static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
+{
+ unsigned long comp_res = 0;
+ uint32_t intr_status = 0;
+ bool retry = false;
+ unsigned long timeout = msecs_to_jiffies(1000);
+
+ do
+ {
+#if DEBUG_DENALI
+ printk("waiting for 0x%x\n", irq_mask);
+#endif
+ comp_res = wait_for_completion_timeout(&denali->complete, timeout);
+ spin_lock_irq(&denali->irq_lock);
+ intr_status = denali->irq_status;
+
+#if DEBUG_DENALI
+ denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status;
+ denali->idx %= 32;
+#endif
+
+ if (intr_status & irq_mask)
+ {
+ denali->irq_status &= ~irq_mask;
+ spin_unlock_irq(&denali->irq_lock);
+#if DEBUG_DENALI
+ if (retry) printk("status on retry = 0x%x\n", intr_status);
+#endif
+ /* our interrupt was detected */
+ break;
+ }
+ else
+ {
+ /* these are not the interrupts you are looking for -
+ need to wait again */
+ spin_unlock_irq(&denali->irq_lock);
+#if DEBUG_DENALI
+ print_irq_log(denali);
+ printk("received irq nobody cared: irq_status = 0x%x,"
+ " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res);
+#endif
+ retry = true;
+ }
+ } while (comp_res != 0);
+
+ if (comp_res == 0)
+ {
+ /* timeout */
+ printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
+ intr_status, irq_mask);
+
+ intr_status = 0;
+ }
+ return intr_status;
+}
+
+/* This helper function setups the registers for ECC and whether or not
+ the spare area will be transfered. */
+static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
+ bool transfer_spare)
+{
+ int ecc_en_flag = 0, transfer_spare_flag = 0;
+
+ /* set ECC, transfer spare bits if needed */
+ ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
+ transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
+
+ /* Enable spare area/ECC per user's request. */
+ denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
+ denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
+}
+
+/* sends a pipeline command operation to the controller. See the Denali NAND
+ controller's user guide for more information (section 4.2.3.6).
+ */
+static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en,
+ bool transfer_spare, int access_type,
+ int op)
+{
+ int status = PASS;
+ uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
+ irq_mask = 0;
+
+ if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP;
+ else if (op == DENALI_WRITE) irq_mask = 0;
+ else BUG();
+
+ setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
+
+#if DEBUG_DENALI
+ spin_lock_irq(&denali->irq_lock);
+ denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4);
+ denali->idx %= 32;
+ spin_unlock_irq(&denali->irq_lock);
+#endif
+
+
+ /* clear interrupts */
+ clear_interrupts(denali);
+
+ addr = BANK(denali->flash_bank) | denali->page;
+
+ if (op == DENALI_WRITE && access_type != SPARE_ACCESS)
+ {
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ else if (op == DENALI_WRITE && access_type == SPARE_ACCESS)
+ {
+ /* read spare area */
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, access_type);
+
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ else if (op == DENALI_READ)
+ {
+ /* setup page read request for access type */
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, access_type);
+
+ /* page 33 of the NAND controller spec indicates we should not
+ use the pipeline commands in Spare area only mode. So we
+ don't.
+ */
+ if (access_type == SPARE_ACCESS)
+ {
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ else
+ {
+ index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count);
+
+ /* wait for command to be accepted
+ * can always use status0 bit as the mask is identical for each
+ * bank. */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "cmd, page, addr on timeout "
+ "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr);
+ status = FAIL;
+ }
+ else
+ {
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ }
+ }
+ return status;
+}
+
+/* helper function that simply writes a buffer to the flash */
+static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf,
+ int len)
+{
+ uint32_t i = 0, *buf32;
+
+ /* verify that the len is a multiple of 4. see comment in
+ * read_data_from_flash_mem() */
+ BUG_ON((len % 4) != 0);
+
+ /* write the data to the flash memory */
+ buf32 = (uint32_t *)buf;
+ for (i = 0; i < len / 4; i++)
+ {
+ denali_write32(*buf32++, denali->flash_mem + 0x10);
+ }
+ return i*4; /* intent is to return the number of bytes read */
+}
+
+/* helper function that simply reads a buffer from the flash */
+static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf,
+ int len)
+{
+ uint32_t i = 0, *buf32;
+
+ /* we assume that len will be a multiple of 4, if not
+ * it would be nice to know about it ASAP rather than
+ * have random failures...
+ *
+ * This assumption is based on the fact that this
+ * function is designed to be used to read flash pages,
+ * which are typically multiples of 4...
+ */
+
+ BUG_ON((len % 4) != 0);
+
+ /* transfer the data from the flash */
+ buf32 = (uint32_t *)buf;
+ for (i = 0; i < len / 4; i++)
+ {
+ *buf32++ = ioread32(denali->flash_mem + 0x10);
+ }
+ return i*4; /* intent is to return the number of bytes read */
+}
+
+/* writes OOB data to the device */
+static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
+ INTR_STATUS0__PROGRAM_FAIL;
+ int status = 0;
+
+ denali->page = page;
+
+ if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
+ DENALI_WRITE) == PASS)
+ {
+ write_data_to_flash_mem(denali, buf, mtd->oobsize);
+
+#if DEBUG_DENALI
+ spin_lock_irq(&denali->irq_lock);
+ denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize;
+ denali->idx %= 32;
+ spin_unlock_irq(&denali->irq_lock);
+#endif
+
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "OOB write failed\n");
+ status = -EIO;
+ }
+ }
+ else
+ {
+ printk(KERN_ERR "unable to send pipeline command\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* reads OOB data from the device */
+static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0;
+
+ denali->page = page;
+
+#if DEBUG_DENALI
+ printk("read_oob %d\n", page);
+#endif
+ if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
+ DENALI_READ) == PASS)
+ {
+ read_data_from_flash_mem(denali, buf, mtd->oobsize);
+
+ /* wait for command to be accepted
+ * can always use status0 bit as the mask is identical for each
+ * bank. */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "page on OOB timeout %d\n", denali->page);
+ }
+
+ /* We set the device back to MAIN_ACCESS here as I observed
+ * instability with the controller if you do a block erase
+ * and the last transaction was a SPARE_ACCESS. Block erase
+ * is reliable (according to the MTD test infrastructure)
+ * if you are in MAIN_ACCESS.
+ */
+ addr = BANK(denali->flash_bank) | denali->page;
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
+
+#if DEBUG_DENALI
+ spin_lock_irq(&denali->irq_lock);
+ denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize;
+ denali->idx %= 32;
+ spin_unlock_irq(&denali->irq_lock);
+#endif
+ }
+}
+
+/* this function examines buffers to see if they contain data that
+ * indicate that the buffer is part of an erased region of flash.
+ */
+bool is_erased(uint8_t *buf, int len)
+{
+ int i = 0;
+ for (i = 0; i < len; i++)
+ {
+ if (buf[i] != 0xFF)
+ {
+ return false;
+ }
+ }
+ return true;
+}
+#define ECC_SECTOR_SIZE 512
+
+#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
+#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
+#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
+#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO))
+#define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8)
+#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
+
+static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
+ uint8_t *oobbuf, uint32_t irq_status)
+{
+ bool check_erased_page = false;
+
+ if (irq_status & INTR_STATUS0__ECC_ERR)
+ {
+ /* read the ECC errors. we'll ignore them for now */
+ uint32_t err_address = 0, err_correction_info = 0;
+ uint32_t err_byte = 0, err_sector = 0, err_device = 0;
+ uint32_t err_correction_value = 0;
+
+ do
+ {
+ err_address = ioread32(denali->flash_reg +
+ ECC_ERROR_ADDRESS);
+ err_sector = ECC_SECTOR(err_address);
+ err_byte = ECC_BYTE(err_address);
+
+
+ err_correction_info = ioread32(denali->flash_reg +
+ ERR_CORRECTION_INFO);
+ err_correction_value =
+ ECC_CORRECTION_VALUE(err_correction_info);
+ err_device = ECC_ERR_DEVICE(err_correction_info);
+
+ if (ECC_ERROR_CORRECTABLE(err_correction_info))
+ {
+ /* offset in our buffer is computed as:
+ sector number * sector size + offset in
+ sector
+ */
+ int offset = err_sector * ECC_SECTOR_SIZE +
+ err_byte;
+ if (offset < denali->mtd.writesize)
+ {
+ /* correct the ECC error */
+ buf[offset] ^= err_correction_value;
+ denali->mtd.ecc_stats.corrected++;
+ }
+ else
+ {
+ /* bummer, couldn't correct the error */
+ printk(KERN_ERR "ECC offset invalid\n");
+ denali->mtd.ecc_stats.failed++;
+ }
+ }
+ else
+ {
+ /* if the error is not correctable, need to
+ * look at the page to see if it is an erased page.
+ * if so, then it's not a real ECC error */
+ check_erased_page = true;
+ }
+
+#if DEBUG_DENALI
+ printk("Detected ECC error in page %d: err_addr = 0x%08x,"
+ " info to fix is 0x%08x\n", denali->page, err_address,
+ err_correction_info);
+#endif
+ } while (!ECC_LAST_ERR(err_correction_info));
+ }
+ return check_erased_page;
+}
+
+/* programs the controller to either enable/disable DMA transfers */
+static void denali_enable_dma(struct denali_nand_info *denali, bool en)
+{
+ uint32_t reg_val = 0x0;
+
+ if (en) reg_val = DMA_ENABLE__FLAG;
+
+ denali_write32(reg_val, denali->flash_reg + DMA_ENABLE);
+ ioread32(denali->flash_reg + DMA_ENABLE);
+}
+
+/* setups the HW to perform the data DMA */
+static void denali_setup_dma(struct denali_nand_info *denali, int op)
+{
+ uint32_t mode = 0x0;
+ const int page_count = 1;
+ dma_addr_t addr = denali->buf.dma_buf;
+
+ mode = MODE_10 | BANK(denali->flash_bank);
+
+ /* DMA is a four step process */
+
+ /* 1. setup transfer type and # of pages */
+ index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
+
+ /* 2. set memory high address bits 23:8 */
+ index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200);
+
+ /* 3. set memory low address bits 23:8 */
+ index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300);
+
+ /* 4. interrupt when complete, burst len = 64 bytes*/
+ index_addr(denali, mode | 0x14000, 0x2400);
+}
+
+/* writes a page. user specifies type, and this function handles the
+ configuration details. */
+static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, bool raw_xfer)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct pci_dev *pci_dev = denali->dev;
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
+ INTR_STATUS0__PROGRAM_FAIL;
+
+ /* if it is a raw xfer, we want to disable ecc, and send
+ * the spare area.
+ * !raw_xfer - enable ecc
+ * raw_xfer - transfer spare
+ */
+ setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
+
+ /* copy buffer into DMA buffer */
+ memcpy(denali->buf.buf, buf, mtd->writesize);
+
+ if (raw_xfer)
+ {
+ /* transfer the data to the spare area */
+ memcpy(denali->buf.buf + mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize);
+ }
+
+ pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
+
+ clear_interrupts(denali);
+ denali_enable_dma(denali, true);
+
+ denali_setup_dma(denali, DENALI_WRITE);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer);
+ denali->status =
+ (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL :
+ PASS;
+ }
+
+ denali_enable_dma(denali, false);
+ pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
+}
+
+/* NAND core entry points */
+
+/* this is the callback that the NAND core calls to write a page. Since
+ writing a page with ECC or without is similar, all the work is done
+ by write_page above. */
+static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ /* for regular page writes, we let HW handle all the ECC
+ * data written to the device. */
+ write_page(mtd, chip, buf, false);
+}
+
+/* This is the callback that the NAND core calls to write a page without ECC.
+ raw access is similiar to ECC page writes, so all the work is done in the
+ write_page() function above.
+ */
+static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ /* for raw page writes, we want to disable ECC and simply write
+ whatever data is in the buffer. */
+ write_page(mtd, chip, buf, true);
+}
+
+static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return write_oob_data(mtd, chip->oob_poi, page);
+}
+
+static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ read_oob_data(mtd, chip->oob_poi, page);
+
+ return 0; /* notify NAND core to send command to
+ * NAND device. */
+}
+
+static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct pci_dev *pci_dev = denali->dev;
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
+ INTR_STATUS0__ECC_ERR;
+ bool check_erased_page = false;
+
+ setup_ecc_for_xfer(denali, true, false);
+
+ denali_enable_dma(denali, true);
+ pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ clear_interrupts(denali);
+ denali_setup_dma(denali, DENALI_READ);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ memcpy(buf, denali->buf.buf, mtd->writesize);
+
+ check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
+ denali_enable_dma(denali, false);
+
+ if (check_erased_page)
+ {
+ read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
+
+ /* check ECC failures that may have occurred on erased pages */
+ if (check_erased_page)
+ {
+ if (!is_erased(buf, denali->mtd.writesize))
+ {
+ denali->mtd.ecc_stats.failed++;
+ }
+ if (!is_erased(buf, denali->mtd.oobsize))
+ {
+ denali->mtd.ecc_stats.failed++;
+ }
+ }
+ }
+ return 0;
+}
+
+static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct pci_dev *pci_dev = denali->dev;
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
+
+ setup_ecc_for_xfer(denali, false, true);
+ denali_enable_dma(denali, true);
+
+ pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ clear_interrupts(denali);
+ denali_setup_dma(denali, DENALI_READ);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ denali_enable_dma(denali, false);
+
+ memcpy(buf, denali->buf.buf, mtd->writesize);
+ memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
+
+ return 0;
+}
+
+static uint8_t denali_read_byte(struct mtd_info *mtd)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint8_t result = 0xff;
+
+ if (denali->buf.head < denali->buf.tail)
+ {
+ result = denali->buf.buf[denali->buf.head++];
+ }
+
+#if DEBUG_DENALI
+ printk("read byte -> 0x%02x\n", result);
+#endif
+ return result;
+}
+
+static void denali_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+#if DEBUG_DENALI
+ printk("denali select chip %d\n", chip);
+#endif
+ spin_lock_irq(&denali->irq_lock);
+ denali->flash_bank = chip;
+ spin_unlock_irq(&denali->irq_lock);
+}
+
+static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int status = denali->status;
+ denali->status = 0;
+
+#if DEBUG_DENALI
+ printk("waitfunc %d\n", status);
+#endif
+ return status;
+}
+
+static void denali_erase(struct mtd_info *mtd, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ uint32_t cmd = 0x0, irq_status = 0;
+
+#if DEBUG_DENALI
+ printk("erase page: %d\n", page);
+#endif
+ /* clear interrupts */
+ clear_interrupts(denali);
+
+ /* setup page read request for access type */
+ cmd = MODE_10 | BANK(denali->flash_bank) | page;
+ index_addr(denali, (uint32_t)cmd, 0x1);
+
+ /* wait for erase to complete or failure to occur */
+ irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
+ INTR_STATUS0__ERASE_FAIL);
+
+ denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL :
+ PASS;
+}
+
+static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
+ int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+#if DEBUG_DENALI
+ printk("cmdfunc: 0x%x %d %d\n", cmd, col, page);
+#endif
+ switch (cmd)
+ {
+ case NAND_CMD_PAGEPROG:
+ break;
+ case NAND_CMD_STATUS:
+ read_status(denali);
+ break;
+ case NAND_CMD_READID:
+ reset_buf(denali);
+ if (denali->flash_bank < denali->total_used_banks)
+ {
+ /* write manufacturer information into nand
+ buffer for NAND subsystem to fetch.
+ */
+ write_byte_to_buf(denali, denali->dev_info.wDeviceMaker);
+ write_byte_to_buf(denali, denali->dev_info.wDeviceID);
+ write_byte_to_buf(denali, denali->dev_info.bDeviceParam0);
+ write_byte_to_buf(denali, denali->dev_info.bDeviceParam1);
+ write_byte_to_buf(denali, denali->dev_info.bDeviceParam2);
+ }
+ else
+ {
+ int i;
+ for (i = 0; i < 5; i++)
+ write_byte_to_buf(denali, 0xff);
+ }
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_SEQIN:
+ denali->page = page;
+ break;
+ case NAND_CMD_RESET:
+ reset_bank(denali);
+ break;
+ case NAND_CMD_READOOB:
+ /* TODO: Read OOB data */
+ break;
+ default:
+ printk(KERN_ERR ": unsupported command received 0x%x\n", cmd);
+ break;
+ }
+}
+
+/* stubs for ECC functions not used by the NAND core */
+static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
+ uint8_t *ecc_code)
+{
+ printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n");
+ BUG();
+ return -EIO;
+}
+
+static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ printk(KERN_ERR "denali_ecc_correct called unexpectedly\n");
+ BUG();
+ return -EIO;
+}
+
+static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+ printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n");
+ BUG();
+}
+/* end NAND core entry points */
+
+/* Initialization code to bring the device up to a known good state */
+static void denali_hw_init(struct denali_nand_info *denali)
+{
+ denali_irq_init(denali);
+ NAND_Flash_Reset(denali);
+ denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
+ denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE);
+
+ denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES);
+ denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
+
+ /* Should set value for these registers when init */
+ denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
+ denali_write32(1, denali->flash_reg + ECC_ENABLE);
+}
+
+/* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */
+#define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE)
+static struct nand_ecclayout nand_oob_slc = {
+ .eccbytes = 4,
+ .eccpos = { 0, 1, 2, 3 }, /* not used */
+ .oobfree = {{
+ .offset = ECC_BYTES_SLC,
+ .length = 64 - ECC_BYTES_SLC
+ }}
+};
+
+#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
+static struct nand_ecclayout nand_oob_mlc_14bit = {
+ .eccbytes = 14,
+ .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
+ .oobfree = {{
+ .offset = ECC_BYTES_MLC,
+ .length = 64 - ECC_BYTES_MLC
+ }}
+};
+
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/* initalize driver data structures */
+void denali_drv_init(struct denali_nand_info *denali)
+{
+ denali->idx = 0;
+
+ /* setup interrupt handler */
+ /* the completion object will be used to notify
+ * the callee that the interrupt is done */
+ init_completion(&denali->complete);
+
+ /* the spinlock will be used to synchronize the ISR
+ * with any element that might be access shared
+ * data (interrupt status) */
+ spin_lock_init(&denali->irq_lock);
+
+ /* indicate that MTD has not selected a valid bank yet */
+ denali->flash_bank = CHIP_SELECT_INVALID;
+
+ /* initialize our irq_status variable to indicate no interrupts */
+ denali->irq_status = 0;
+}
+
+/* driver entry point */
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_nand_info *denali;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pci_enable_device(dev);
+ if (ret) {
+ printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
+ goto failed_enable;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ /* Due to a silicon limitation, we can only support
+ * ONFI timing mode 1 and below.
+ */
+ if (onfi_timing_mode < -1 || onfi_timing_mode > 1)
+ {
+ printk("Intel CE4100 only supports ONFI timing mode 1 "
+ "or below\n");
+ ret = -EINVAL;
+ goto failed_enable;
+ }
+ denali->platform = INTEL_CE4100;
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ denali->platform = INTEL_MRST;
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_start(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: No second BAR for PCI device; assuming %08Lx\n",
+ (uint64_t)csr_base);
+ }
+ }
+
+ /* Is 32-bit DMA supported? */
+ ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+
+ if (ret)
+ {
+ printk(KERN_ERR "Spectra: no usable DMA configuration\n");
+ goto failed_enable;
+ }
+ denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(dev, denali->buf.dma_buf))
+ {
+ printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
+ goto failed_enable;
+ }
+
+ pci_set_master(dev);
+ denali->dev = dev;
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ printk(KERN_ERR "Spectra: Unable to request memory regions\n");
+ goto failed_req_csr;
+ }
+
+ denali->flash_reg = ioremap_nocache(csr_base, csr_len);
+ if (!denali->flash_reg) {
+ printk(KERN_ERR "Spectra: Unable to remap memory region\n");
+ ret = -ENOMEM;
+ goto failed_remap_csr;
+ }
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
+ (uint64_t)csr_base, denali->flash_reg, csr_len);
+
+ denali->flash_mem = ioremap_nocache(mem_base, mem_len);
+ if (!denali->flash_mem) {
+ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
+ iounmap(denali->flash_reg);
+ ret = -ENOMEM;
+ goto failed_remap_csr;
+ }
+
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Remapped flash base address: "
+ "0x%p, len: %ld\n",
+ denali->flash_mem, csr_len);
+
+ denali_hw_init(denali);
+ denali_drv_init(denali);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
+ if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
+ DENALI_NAND_NAME, denali)) {
+ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ goto failed_request_irq;
+ }
+
+ /* now that our ISR is registered, we can enable interrupts */
+ NAND_LLD_Enable_Disable_Interrupts(denali, true);
+
+ pci_set_drvdata(dev, denali);
+
+ NAND_Read_Device_ID(denali);
+
+ /* MTD supported page sizes vary by kernel. We validate our
+ kernel supports the device here.
+ */
+ if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
+ {
+ ret = -ENODEV;
+ printk(KERN_ERR "Spectra: device size not supported by this "
+ "version of MTD.");
+ goto failed_nand;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(denali->flash_reg + ACC_CLKS),
+ ioread32(denali->flash_reg + RE_2_WE),
+ ioread32(denali->flash_reg + WE_2_RE),
+ ioread32(denali->flash_reg + ADDR_2_DATA),
+ ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
+ ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
+ ioread32(denali->flash_reg + CS_SETUP_CNT));
+
+ denali->mtd.name = "Denali NAND";
+ denali->mtd.owner = THIS_MODULE;
+ denali->mtd.priv = &denali->nand;
+
+ /* register the driver with the NAND core subsystem */
+ denali->nand.select_chip = denali_select_chip;
+ denali->nand.cmdfunc = denali_cmdfunc;
+ denali->nand.read_byte = denali_read_byte;
+ denali->nand.waitfunc = denali_waitfunc;
+
+ /* scan for NAND devices attached to the controller
+ * this is the first stage in a two step process to register
+ * with the nand subsystem */
+ if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL))
+ {
+ ret = -ENXIO;
+ goto failed_nand;
+ }
+
+ /* second stage of the NAND scan
+ * this stage requires information regarding ECC and
+ * bad block management. */
+
+ /* Bad block management */
+ denali->nand.bbt_td = &bbt_main_descr;
+ denali->nand.bbt_md = &bbt_mirror_descr;
+
+ /* skip the scan for now until we have OOB read and write support */
+ denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
+ denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+
+ if (denali->dev_info.MLCDevice)
+ {
+ denali->nand.ecc.layout = &nand_oob_mlc_14bit;
+ denali->nand.ecc.bytes = ECC_BYTES_MLC;
+ }
+ else /* SLC */
+ {
+ denali->nand.ecc.layout = &nand_oob_slc;
+ denali->nand.ecc.bytes = ECC_BYTES_SLC;
+ }
+
+ /* These functions are required by the NAND core framework, otherwise,
+ the NAND core will assert. However, we don't need them, so we'll stub
+ them out. */
+ denali->nand.ecc.calculate = denali_ecc_calculate;
+ denali->nand.ecc.correct = denali_ecc_correct;
+ denali->nand.ecc.hwctl = denali_ecc_hwctl;
+
+ /* override the default read operations */
+ denali->nand.ecc.size = denali->mtd.writesize;
+ denali->nand.ecc.read_page = denali_read_page;
+ denali->nand.ecc.read_page_raw = denali_read_page_raw;
+ denali->nand.ecc.write_page = denali_write_page;
+ denali->nand.ecc.write_page_raw = denali_write_page_raw;
+ denali->nand.ecc.read_oob = denali_read_oob;
+ denali->nand.ecc.write_oob = denali_write_oob;
+ denali->nand.erase_cmd = denali_erase;
+
+ if (nand_scan_tail(&denali->mtd))
+ {
+ ret = -ENXIO;
+ goto failed_nand;
+ }
+
+ ret = add_mtd_device(&denali->mtd);
+ if (ret) {
+ printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret);
+ goto failed_nand;
+ }
+ return 0;
+
+ failed_nand:
+ denali_irq_cleanup(dev->irq, denali);
+ failed_request_irq:
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ failed_remap_csr:
+ pci_release_regions(dev);
+ failed_req_csr:
+ pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ failed_enable:
+ kfree(denali);
+ return ret;
+}
+
+/* driver exit point */
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_nand_info *denali = pci_get_drvdata(dev);
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ nand_release(&denali->mtd);
+ del_mtd_device(&denali->mtd);
+
+ denali_irq_cleanup(dev->irq, denali);
+
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ pci_set_drvdata(dev, NULL);
+ kfree(denali);
+}
+
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+
+static int __devinit denali_init(void)
+{
+ printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
+ return pci_register_driver(&denali_pci_driver);
+}
+
+/* Free memory */
+static void __devexit denali_exit(void)
+{
+ pci_unregister_driver(&denali_pci_driver);
+}
+
+module_init(denali_init);
+module_exit(denali_exit);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
new file mode 100644
index 0000000..422a29a
--- /dev/null
+++ b/drivers/mtd/nand/denali.h
@@ -0,0 +1,816 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/mtd/nand.h>
+
+#define DEVICE_RESET 0x0
+#define DEVICE_RESET__BANK0 0x0001
+#define DEVICE_RESET__BANK1 0x0002
+#define DEVICE_RESET__BANK2 0x0004
+#define DEVICE_RESET__BANK3 0x0008
+
+#define TRANSFER_SPARE_REG 0x10
+#define TRANSFER_SPARE_REG__FLAG 0x0001
+
+#define LOAD_WAIT_CNT 0x20
+#define LOAD_WAIT_CNT__VALUE 0xffff
+
+#define PROGRAM_WAIT_CNT 0x30
+#define PROGRAM_WAIT_CNT__VALUE 0xffff
+
+#define ERASE_WAIT_CNT 0x40
+#define ERASE_WAIT_CNT__VALUE 0xffff
+
+#define INT_MON_CYCCNT 0x50
+#define INT_MON_CYCCNT__VALUE 0xffff
+
+#define RB_PIN_ENABLED 0x60
+#define RB_PIN_ENABLED__BANK0 0x0001
+#define RB_PIN_ENABLED__BANK1 0x0002
+#define RB_PIN_ENABLED__BANK2 0x0004
+#define RB_PIN_ENABLED__BANK3 0x0008
+
+#define MULTIPLANE_OPERATION 0x70
+#define MULTIPLANE_OPERATION__FLAG 0x0001
+
+#define MULTIPLANE_READ_ENABLE 0x80
+#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
+
+#define COPYBACK_DISABLE 0x90
+#define COPYBACK_DISABLE__FLAG 0x0001
+
+#define CACHE_WRITE_ENABLE 0xa0
+#define CACHE_WRITE_ENABLE__FLAG 0x0001
+
+#define CACHE_READ_ENABLE 0xb0
+#define CACHE_READ_ENABLE__FLAG 0x0001
+
+#define PREFETCH_MODE 0xc0
+#define PREFETCH_MODE__PREFETCH_EN 0x0001
+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
+
+#define CHIP_ENABLE_DONT_CARE 0xd0
+#define CHIP_EN_DONT_CARE__FLAG 0x01
+
+#define ECC_ENABLE 0xe0
+#define ECC_ENABLE__FLAG 0x0001
+
+#define GLOBAL_INT_ENABLE 0xf0
+#define GLOBAL_INT_EN_FLAG 0x01
+
+#define WE_2_RE 0x100
+#define WE_2_RE__VALUE 0x003f
+
+#define ADDR_2_DATA 0x110
+#define ADDR_2_DATA__VALUE 0x003f
+
+#define RE_2_WE 0x120
+#define RE_2_WE__VALUE 0x003f
+
+#define ACC_CLKS 0x130
+#define ACC_CLKS__VALUE 0x000f
+
+#define NUMBER_OF_PLANES 0x140
+#define NUMBER_OF_PLANES__VALUE 0x0007
+
+#define PAGES_PER_BLOCK 0x150
+#define PAGES_PER_BLOCK__VALUE 0xffff
+
+#define DEVICE_WIDTH 0x160
+#define DEVICE_WIDTH__VALUE 0x0003
+
+#define DEVICE_MAIN_AREA_SIZE 0x170
+#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
+
+#define DEVICE_SPARE_AREA_SIZE 0x180
+#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
+
+#define TWO_ROW_ADDR_CYCLES 0x190
+#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
+
+#define MULTIPLANE_ADDR_RESTRICT 0x1a0
+#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
+
+#define ECC_CORRECTION 0x1b0
+#define ECC_CORRECTION__VALUE 0x001f
+
+#define READ_MODE 0x1c0
+#define READ_MODE__VALUE 0x000f
+
+#define WRITE_MODE 0x1d0
+#define WRITE_MODE__VALUE 0x000f
+
+#define COPYBACK_MODE 0x1e0
+#define COPYBACK_MODE__VALUE 0x000f
+
+#define RDWR_EN_LO_CNT 0x1f0
+#define RDWR_EN_LO_CNT__VALUE 0x001f
+
+#define RDWR_EN_HI_CNT 0x200
+#define RDWR_EN_HI_CNT__VALUE 0x001f
+
+#define MAX_RD_DELAY 0x210
+#define MAX_RD_DELAY__VALUE 0x000f
+
+#define CS_SETUP_CNT 0x220
+#define CS_SETUP_CNT__VALUE 0x001f
+
+#define SPARE_AREA_SKIP_BYTES 0x230
+#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
+
+#define SPARE_AREA_MARKER 0x240
+#define SPARE_AREA_MARKER__VALUE 0xffff
+
+#define DEVICES_CONNECTED 0x250
+#define DEVICES_CONNECTED__VALUE 0x0007
+
+#define DIE_MASK 0x260
+#define DIE_MASK__VALUE 0x00ff
+
+#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
+
+#define WRITE_PROTECT 0x280
+#define WRITE_PROTECT__FLAG 0x0001
+
+#define RE_2_RE 0x290
+#define RE_2_RE__VALUE 0x003f
+
+#define MANUFACTURER_ID 0x300
+#define MANUFACTURER_ID__VALUE 0x00ff
+
+#define DEVICE_ID 0x310
+#define DEVICE_ID__VALUE 0x00ff
+
+#define DEVICE_PARAM_0 0x320
+#define DEVICE_PARAM_0__VALUE 0x00ff
+
+#define DEVICE_PARAM_1 0x330
+#define DEVICE_PARAM_1__VALUE 0x00ff
+
+#define DEVICE_PARAM_2 0x340
+#define DEVICE_PARAM_2__VALUE 0x00ff
+
+#define LOGICAL_PAGE_DATA_SIZE 0x350
+#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
+
+#define LOGICAL_PAGE_SPARE_SIZE 0x360
+#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
+
+#define REVISION 0x370
+#define REVISION__VALUE 0xffff
+
+#define ONFI_DEVICE_FEATURES 0x380
+#define ONFI_DEVICE_FEATURES__VALUE 0x003f
+
+#define ONFI_OPTIONAL_COMMANDS 0x390
+#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
+
+#define ONFI_TIMING_MODE 0x3a0
+#define ONFI_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
+
+#define FEATURES 0x3f0
+#define FEATURES__N_BANKS 0x0003
+#define FEATURES__ECC_MAX_ERR 0x003c
+#define FEATURES__DMA 0x0040
+#define FEATURES__CMD_DMA 0x0080
+#define FEATURES__PARTITION 0x0100
+#define FEATURES__XDMA_SIDEBAND 0x0200
+#define FEATURES__GPREG 0x0400
+#define FEATURES__INDEX_ADDR 0x0800
+
+#define TRANSFER_MODE 0x400
+#define TRANSFER_MODE__VALUE 0x0003
+
+#define INTR_STATUS0 0x410
+#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS0__ECC_ERR 0x0002
+#define INTR_STATUS0__DMA_CMD_COMP 0x0004
+#define INTR_STATUS0__TIME_OUT 0x0008
+#define INTR_STATUS0__PROGRAM_FAIL 0x0010
+#define INTR_STATUS0__ERASE_FAIL 0x0020
+#define INTR_STATUS0__LOAD_COMP 0x0040
+#define INTR_STATUS0__PROGRAM_COMP 0x0080
+#define INTR_STATUS0__ERASE_COMP 0x0100
+#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS0__LOCKED_BLK 0x0400
+#define INTR_STATUS0__UNSUP_CMD 0x0800
+#define INTR_STATUS0__INT_ACT 0x1000
+#define INTR_STATUS0__RST_COMP 0x2000
+#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS0__PAGE_XFER_INC 0x8000
+
+#define INTR_EN0 0x420
+#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN0__ECC_ERR 0x0002
+#define INTR_EN0__DMA_CMD_COMP 0x0004
+#define INTR_EN0__TIME_OUT 0x0008
+#define INTR_EN0__PROGRAM_FAIL 0x0010
+#define INTR_EN0__ERASE_FAIL 0x0020
+#define INTR_EN0__LOAD_COMP 0x0040
+#define INTR_EN0__PROGRAM_COMP 0x0080
+#define INTR_EN0__ERASE_COMP 0x0100
+#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN0__LOCKED_BLK 0x0400
+#define INTR_EN0__UNSUP_CMD 0x0800
+#define INTR_EN0__INT_ACT 0x1000
+#define INTR_EN0__RST_COMP 0x2000
+#define INTR_EN0__PIPE_CMD_ERR 0x4000
+#define INTR_EN0__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT0 0x430
+#define PAGE_CNT0__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR0 0x440
+#define ERR_PAGE_ADDR0__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR0 0x450
+#define ERR_BLOCK_ADDR0__VALUE 0xffff
+
+#define INTR_STATUS1 0x460
+#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS1__ECC_ERR 0x0002
+#define INTR_STATUS1__DMA_CMD_COMP 0x0004
+#define INTR_STATUS1__TIME_OUT 0x0008
+#define INTR_STATUS1__PROGRAM_FAIL 0x0010
+#define INTR_STATUS1__ERASE_FAIL 0x0020
+#define INTR_STATUS1__LOAD_COMP 0x0040
+#define INTR_STATUS1__PROGRAM_COMP 0x0080
+#define INTR_STATUS1__ERASE_COMP 0x0100
+#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS1__LOCKED_BLK 0x0400
+#define INTR_STATUS1__UNSUP_CMD 0x0800
+#define INTR_STATUS1__INT_ACT 0x1000
+#define INTR_STATUS1__RST_COMP 0x2000
+#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS1__PAGE_XFER_INC 0x8000
+
+#define INTR_EN1 0x470
+#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN1__ECC_ERR 0x0002
+#define INTR_EN1__DMA_CMD_COMP 0x0004
+#define INTR_EN1__TIME_OUT 0x0008
+#define INTR_EN1__PROGRAM_FAIL 0x0010
+#define INTR_EN1__ERASE_FAIL 0x0020
+#define INTR_EN1__LOAD_COMP 0x0040
+#define INTR_EN1__PROGRAM_COMP 0x0080
+#define INTR_EN1__ERASE_COMP 0x0100
+#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN1__LOCKED_BLK 0x0400
+#define INTR_EN1__UNSUP_CMD 0x0800
+#define INTR_EN1__INT_ACT 0x1000
+#define INTR_EN1__RST_COMP 0x2000
+#define INTR_EN1__PIPE_CMD_ERR 0x4000
+#define INTR_EN1__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT1 0x480
+#define PAGE_CNT1__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR1 0x490
+#define ERR_PAGE_ADDR1__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR1 0x4a0
+#define ERR_BLOCK_ADDR1__VALUE 0xffff
+
+#define INTR_STATUS2 0x4b0
+#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS2__ECC_ERR 0x0002
+#define INTR_STATUS2__DMA_CMD_COMP 0x0004
+#define INTR_STATUS2__TIME_OUT 0x0008
+#define INTR_STATUS2__PROGRAM_FAIL 0x0010
+#define INTR_STATUS2__ERASE_FAIL 0x0020
+#define INTR_STATUS2__LOAD_COMP 0x0040
+#define INTR_STATUS2__PROGRAM_COMP 0x0080
+#define INTR_STATUS2__ERASE_COMP 0x0100
+#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS2__LOCKED_BLK 0x0400
+#define INTR_STATUS2__UNSUP_CMD 0x0800
+#define INTR_STATUS2__INT_ACT 0x1000
+#define INTR_STATUS2__RST_COMP 0x2000
+#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS2__PAGE_XFER_INC 0x8000
+
+#define INTR_EN2 0x4c0
+#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN2__ECC_ERR 0x0002
+#define INTR_EN2__DMA_CMD_COMP 0x0004
+#define INTR_EN2__TIME_OUT 0x0008
+#define INTR_EN2__PROGRAM_FAIL 0x0010
+#define INTR_EN2__ERASE_FAIL 0x0020
+#define INTR_EN2__LOAD_COMP 0x0040
+#define INTR_EN2__PROGRAM_COMP 0x0080
+#define INTR_EN2__ERASE_COMP 0x0100
+#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN2__LOCKED_BLK 0x0400
+#define INTR_EN2__UNSUP_CMD 0x0800
+#define INTR_EN2__INT_ACT 0x1000
+#define INTR_EN2__RST_COMP 0x2000
+#define INTR_EN2__PIPE_CMD_ERR 0x4000
+#define INTR_EN2__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT2 0x4d0
+#define PAGE_CNT2__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR2 0x4e0
+#define ERR_PAGE_ADDR2__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR2 0x4f0
+#define ERR_BLOCK_ADDR2__VALUE 0xffff
+
+#define INTR_STATUS3 0x500
+#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS3__ECC_ERR 0x0002
+#define INTR_STATUS3__DMA_CMD_COMP 0x0004
+#define INTR_STATUS3__TIME_OUT 0x0008
+#define INTR_STATUS3__PROGRAM_FAIL 0x0010
+#define INTR_STATUS3__ERASE_FAIL 0x0020
+#define INTR_STATUS3__LOAD_COMP 0x0040
+#define INTR_STATUS3__PROGRAM_COMP 0x0080
+#define INTR_STATUS3__ERASE_COMP 0x0100
+#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS3__LOCKED_BLK 0x0400
+#define INTR_STATUS3__UNSUP_CMD 0x0800
+#define INTR_STATUS3__INT_ACT 0x1000
+#define INTR_STATUS3__RST_COMP 0x2000
+#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS3__PAGE_XFER_INC 0x8000
+
+#define INTR_EN3 0x510
+#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN3__ECC_ERR 0x0002
+#define INTR_EN3__DMA_CMD_COMP 0x0004
+#define INTR_EN3__TIME_OUT 0x0008
+#define INTR_EN3__PROGRAM_FAIL 0x0010
+#define INTR_EN3__ERASE_FAIL 0x0020
+#define INTR_EN3__LOAD_COMP 0x0040
+#define INTR_EN3__PROGRAM_COMP 0x0080
+#define INTR_EN3__ERASE_COMP 0x0100
+#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN3__LOCKED_BLK 0x0400
+#define INTR_EN3__UNSUP_CMD 0x0800
+#define INTR_EN3__INT_ACT 0x1000
+#define INTR_EN3__RST_COMP 0x2000
+#define INTR_EN3__PIPE_CMD_ERR 0x4000
+#define INTR_EN3__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT3 0x520
+#define PAGE_CNT3__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR3 0x530
+#define ERR_PAGE_ADDR3__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR3 0x540
+#define ERR_BLOCK_ADDR3__VALUE 0xffff
+
+#define DATA_INTR 0x550
+#define DATA_INTR__WRITE_SPACE_AV 0x0001
+#define DATA_INTR__READ_DATA_AV 0x0002
+
+#define DATA_INTR_EN 0x560
+#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
+#define DATA_INTR_EN__READ_DATA_AV 0x0002
+
+#define GPREG_0 0x570
+#define GPREG_0__VALUE 0xffff
+
+#define GPREG_1 0x580
+#define GPREG_1__VALUE 0xffff
+
+#define GPREG_2 0x590
+#define GPREG_2__VALUE 0xffff
+
+#define GPREG_3 0x5a0
+#define GPREG_3__VALUE 0xffff
+
+#define ECC_THRESHOLD 0x600
+#define ECC_THRESHOLD__VALUE 0x03ff
+
+#define ECC_ERROR_BLOCK_ADDRESS 0x610
+#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
+
+#define ECC_ERROR_PAGE_ADDRESS 0x620
+#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
+#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
+
+#define ECC_ERROR_ADDRESS 0x630
+#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
+#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
+
+#define ERR_CORRECTION_INFO 0x640
+#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
+#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
+#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
+#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
+
+#define DMA_ENABLE 0x700
+#define DMA_ENABLE__FLAG 0x0001
+
+#define IGNORE_ECC_DONE 0x710
+#define IGNORE_ECC_DONE__FLAG 0x0001
+
+#define DMA_INTR 0x720
+#define DMA_INTR__TARGET_ERROR 0x0001
+#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
+
+#define DMA_INTR_EN 0x730
+#define DMA_INTR_EN__TARGET_ERROR 0x0001
+#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
+
+#define TARGET_ERR_ADDR_LO 0x740
+#define TARGET_ERR_ADDR_LO__VALUE 0xffff
+
+#define TARGET_ERR_ADDR_HI 0x750
+#define TARGET_ERR_ADDR_HI__VALUE 0xffff
+
+#define CHNL_ACTIVE 0x760
+#define CHNL_ACTIVE__CHANNEL0 0x0001
+#define CHNL_ACTIVE__CHANNEL1 0x0002
+#define CHNL_ACTIVE__CHANNEL2 0x0004
+#define CHNL_ACTIVE__CHANNEL3 0x0008
+
+#define ACTIVE_SRC_ID 0x800
+#define ACTIVE_SRC_ID__VALUE 0x00ff
+
+#define PTN_INTR 0x810
+#define PTN_INTR__CONFIG_ERROR 0x0001
+#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR__REG_ACCESS_ERROR 0x0020
+
+#define PTN_INTR_EN 0x820
+#define PTN_INTR_EN__CONFIG_ERROR 0x0001
+#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
+
+#define PERM_SRC_ID_0 0x830
+#define PERM_SRC_ID_0__SRCID 0x00ff
+#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_0 0x840
+#define MIN_BLK_ADDR_0__VALUE 0xffff
+
+#define MAX_BLK_ADDR_0 0x850
+#define MAX_BLK_ADDR_0__VALUE 0xffff
+
+#define MIN_MAX_BANK_0 0x860
+#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_1 0x870
+#define PERM_SRC_ID_1__SRCID 0x00ff
+#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_1 0x880
+#define MIN_BLK_ADDR_1__VALUE 0xffff
+
+#define MAX_BLK_ADDR_1 0x890
+#define MAX_BLK_ADDR_1__VALUE 0xffff
+
+#define MIN_MAX_BANK_1 0x8a0
+#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_2 0x8b0
+#define PERM_SRC_ID_2__SRCID 0x00ff
+#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_2 0x8c0
+#define MIN_BLK_ADDR_2__VALUE 0xffff
+
+#define MAX_BLK_ADDR_2 0x8d0
+#define MAX_BLK_ADDR_2__VALUE 0xffff
+
+#define MIN_MAX_BANK_2 0x8e0
+#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_3 0x8f0
+#define PERM_SRC_ID_3__SRCID 0x00ff
+#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_3 0x900
+#define MIN_BLK_ADDR_3__VALUE 0xffff
+
+#define MAX_BLK_ADDR_3 0x910
+#define MAX_BLK_ADDR_3__VALUE 0xffff
+
+#define MIN_MAX_BANK_3 0x920
+#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_4 0x930
+#define PERM_SRC_ID_4__SRCID 0x00ff
+#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_4 0x940
+#define MIN_BLK_ADDR_4__VALUE 0xffff
+
+#define MAX_BLK_ADDR_4 0x950
+#define MAX_BLK_ADDR_4__VALUE 0xffff
+
+#define MIN_MAX_BANK_4 0x960
+#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_5 0x970
+#define PERM_SRC_ID_5__SRCID 0x00ff
+#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_5 0x980
+#define MIN_BLK_ADDR_5__VALUE 0xffff
+
+#define MAX_BLK_ADDR_5 0x990
+#define MAX_BLK_ADDR_5__VALUE 0xffff
+
+#define MIN_MAX_BANK_5 0x9a0
+#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_6 0x9b0
+#define PERM_SRC_ID_6__SRCID 0x00ff
+#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_6 0x9c0
+#define MIN_BLK_ADDR_6__VALUE 0xffff
+
+#define MAX_BLK_ADDR_6 0x9d0
+#define MAX_BLK_ADDR_6__VALUE 0xffff
+
+#define MIN_MAX_BANK_6 0x9e0
+#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_7 0x9f0
+#define PERM_SRC_ID_7__SRCID 0x00ff
+#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_7 0xa00
+#define MIN_BLK_ADDR_7__VALUE 0xffff
+
+#define MAX_BLK_ADDR_7 0xa10
+#define MAX_BLK_ADDR_7__VALUE 0xffff
+
+#define MIN_MAX_BANK_7 0xa20
+#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
+
+/* flash.h */
+struct device_info_tag {
+ uint16_t wDeviceMaker;
+ uint16_t wDeviceID;
+ uint8_t bDeviceParam0;
+ uint8_t bDeviceParam1;
+ uint8_t bDeviceParam2;
+ uint32_t wDeviceType;
+ uint32_t wSpectraStartBlock;
+ uint32_t wSpectraEndBlock;
+ uint32_t wTotalBlocks;
+ uint16_t wPagesPerBlock;
+ uint16_t wPageSize;
+ uint16_t wPageDataSize;
+ uint16_t wPageSpareSize;
+ uint16_t wNumPageSpareFlag;
+ uint16_t wECCBytesPerSector;
+ uint32_t wBlockSize;
+ uint32_t wBlockDataSize;
+ uint32_t wDataBlockNum;
+ uint8_t bPlaneNum;
+ uint16_t wDeviceMainAreaSize;
+ uint16_t wDeviceSpareAreaSize;
+ uint16_t wDevicesConnected;
+ uint16_t wDeviceWidth;
+ uint16_t wHWRevision;
+ uint16_t wHWFeatures;
+
+ uint16_t wONFIDevFeatures;
+ uint16_t wONFIOptCommands;
+ uint16_t wONFITimingMode;
+ uint16_t wONFIPgmCacheTimingMode;
+
+ uint16_t MLCDevice;
+ uint16_t wSpareSkipBytes;
+
+ uint8_t nBitsInPageNumber;
+ uint8_t nBitsInPageDataSize;
+ uint8_t nBitsInBlockDataSize;
+};
+
+/* ffsdefs.h */
+#define CLEAR 0 /*use this to clear a field instead of "fail"*/
+#define SET 1 /*use this to set a field instead of "pass"*/
+#define FAIL 1 /*failed flag*/
+#define PASS 0 /*success flag*/
+#define ERR -1 /*error flag*/
+
+/* lld.h */
+#define GOOD_BLOCK 0
+#define DEFECTIVE_BLOCK 1
+#define READ_ERROR 2
+
+#define CLK_X 5
+#define CLK_MULTI 4
+
+/* ffsport.h */
+#define VERBOSE 1
+
+#define NAND_DBG_WARN 1
+#define NAND_DBG_DEBUG 2
+#define NAND_DBG_TRACE 3
+
+#ifdef VERBOSE
+#define nand_dbg_print(level, args...) \
+ do { \
+ if (level <= nand_debug_level) \
+ printk(KERN_ALERT args); \
+ } while (0)
+#else
+#define nand_dbg_print(level, args...)
+#endif
+
+
+/* spectraswconfig.h */
+#define CMD_DMA 0
+
+#define SPECTRA_PARTITION_ID 0
+/**** Block Table and Reserved Block Parameters *****/
+#define SPECTRA_START_BLOCK 3
+#define NUM_FREE_BLOCKS_GATE 30
+
+/* KBV - Updated to LNW scratch register address */
+#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
+#define SCRATCH_REG_SIZE 64
+
+#define GLOB_HWCTL_DEFAULT_BLKS 2048
+
+#define SUPPORT_15BITECC 1
+#define SUPPORT_8BITECC 1
+
+#define CUSTOM_CONF_PARAMS 0
+
+#define ONFI_BLOOM_TIME 1
+#define MODE5_WORKAROUND 0
+
+/* lld_nand.h */
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _LLD_NAND_
+#define _LLD_NAND_
+
+#define MODE_00 0x00000000
+#define MODE_01 0x04000000
+#define MODE_10 0x08000000
+#define MODE_11 0x0C000000
+
+
+#define DATA_TRANSFER_MODE 0
+#define PROTECTION_PER_BLOCK 1
+#define LOAD_WAIT_COUNT 2
+#define PROGRAM_WAIT_COUNT 3
+#define ERASE_WAIT_COUNT 4
+#define INT_MONITOR_CYCLE_COUNT 5
+#define READ_BUSY_PIN_ENABLED 6
+#define MULTIPLANE_OPERATION_SUPPORT 7
+#define PRE_FETCH_MODE 8
+#define CE_DONT_CARE_SUPPORT 9
+#define COPYBACK_SUPPORT 10
+#define CACHE_WRITE_SUPPORT 11
+#define CACHE_READ_SUPPORT 12
+#define NUM_PAGES_IN_BLOCK 13
+#define ECC_ENABLE_SELECT 14
+#define WRITE_ENABLE_2_READ_ENABLE 15
+#define ADDRESS_2_DATA 16
+#define READ_ENABLE_2_WRITE_ENABLE 17
+#define TWO_ROW_ADDRESS_CYCLES 18
+#define MULTIPLANE_ADDRESS_RESTRICT 19
+#define ACC_CLOCKS 20
+#define READ_WRITE_ENABLE_LOW_COUNT 21
+#define READ_WRITE_ENABLE_HIGH_COUNT 22
+
+#define ECC_SECTOR_SIZE 512
+#define LLD_MAX_FLASH_BANKS 4
+
+#define DENALI_BUF_SIZE NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE
+
+struct nand_buf
+{
+ int head;
+ int tail;
+ uint8_t buf[DENALI_BUF_SIZE];
+ dma_addr_t dma_buf;
+};
+
+#define INTEL_CE4100 1
+#define INTEL_MRST 2
+
+struct denali_nand_info {
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ struct device_info_tag dev_info;
+ int flash_bank; /* currently selected chip */
+ int status;
+ int platform;
+ struct nand_buf buf;
+ struct pci_dev *dev;
+ int total_used_banks;
+ uint32_t block; /* stored for future use */
+ uint16_t page;
+ void __iomem *flash_reg; /* Mapped io reg base address */
+ void __iomem *flash_mem; /* Mapped io reg base address */
+
+ /* elements used by ISR */
+ struct completion complete;
+ spinlock_t irq_lock;
+ uint32_t irq_status;
+ int irq_debug_array[32];
+ int idx;
+};
+
+static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali);
+static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali);
+static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, uint16_t INT_ENABLE);
+
+#endif /*_LLD_NAND_*/
+
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ae30fb6..3f38fb8 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -874,7 +874,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
priv->ctrl = ctrl;
priv->dev = ctrl->dev;
- priv->vbase = ioremap(res.start, res.end - res.start + 1);
+ priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
dev_err(ctrl->dev, "failed to map chip region\n");
ret = -ENOMEM;
@@ -891,7 +891,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
if (ret)
goto err;
- ret = nand_scan_ident(&priv->mtd, 1);
+ ret = nand_scan_ident(&priv->mtd, 1, NULL);
if (ret)
goto err;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 4b96296..2d215cc 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -49,7 +49,10 @@ struct fsl_upm_nand {
uint32_t wait_flags;
};
-#define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd)
+static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
+{
+ return container_of(mtdinfo, struct fsl_upm_nand, mtd);
+}
static int fun_chip_ready(struct mtd_info *mtd)
{
@@ -303,7 +306,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
FSL_UPM_WAIT_WRITE_BYTE;
fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
- io_res.end - io_res.start + 1);
+ resource_size(&io_res));
if (!fun->io_base) {
ret = -ENOMEM;
goto err2;
@@ -350,7 +353,7 @@ static int __devexit fun_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id of_fun_match[] = {
+static const struct of_device_id of_fun_match[] = {
{ .compatible = "fsl,upm-nand" },
{},
};
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 8f902e7..0cde618 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -181,11 +181,11 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
iounmap(gpiomtd->io_sync);
if (res)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
@@ -208,14 +208,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
{
void __iomem *ptr;
- if (!request_mem_region(res->start, res->end - res->start + 1, name)) {
+ if (!request_mem_region(res->start, resource_size(res), name)) {
*err = -EBUSY;
return NULL;
}
ptr = ioremap(res->start, size);
if (!ptr) {
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
*err = -ENOMEM;
}
return ptr;
@@ -338,10 +338,10 @@ err_nwp:
err_nce:
iounmap(gpiomtd->io_sync);
if (res1)
- release_mem_region(res1->start, res1->end - res1->start + 1);
+ release_mem_region(res1->start, resource_size(res1));
err_sync:
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res0->start, res0->end - res0->start + 1);
+ release_mem_region(res0->start, resource_size(res0));
err_map:
kfree(gpiomtd);
return ret;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
new file mode 100644
index 0000000..3d0867d
--- /dev/null
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis
+ * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
+ * Piotr Ziecik <kosmo@semihalf.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include <asm/mpc5121.h>
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS 8
+#define NFC_SPARE_LEN 0x40
+#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR 0x1E04
+#define NFC_FLASH_ADDR 0x1E06
+#define NFC_FLASH_CMD 0x1E08
+#define NFC_CONFIG 0x1E0A
+#define NFC_ECC_STATUS1 0x1E0C
+#define NFC_ECC_STATUS2 0x1E0E
+#define NFC_SPAS 0x1E10
+#define NFC_WRPROT 0x1E12
+#define NFC_NF_WRPRST 0x1E18
+#define NFC_CONFIG1 0x1E1A
+#define NFC_CONFIG2 0x1E1C
+#define NFC_UNLOCKSTART_BLK0 0x1E20
+#define NFC_UNLOCKEND_BLK0 0x1E22
+#define NFC_UNLOCKSTART_BLK1 0x1E24
+#define NFC_UNLOCKEND_BLK1 0x1E26
+#define NFC_UNLOCKSTART_BLK2 0x1E28
+#define NFC_UNLOCKEND_BLK2 0x1E2A
+#define NFC_UNLOCKSTART_BLK3 0x1E2C
+#define NFC_UNLOCKEND_BLK3 0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK (7 << 0)
+#define NFC_ACTIVE_CS_SHIFT 5
+#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED (1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT (1 << 0)
+#define NFC_FULL_PAGE_DMA (1 << 1)
+#define NFC_SPARE_ONLY (1 << 2)
+#define NFC_ECC_ENABLE (1 << 3)
+#define NFC_INT_MASK (1 << 4)
+#define NFC_BIG_ENDIAN (1 << 5)
+#define NFC_RESET (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_PPB_32 (0 << 9)
+#define NFC_PPB_64 (1 << 9)
+#define NFC_PPB_128 (2 << 9)
+#define NFC_PPB_256 (3 << 9)
+#define NFC_PPB_MASK (3 << 9)
+#define NFC_FULL_PAGE_INT (1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND (1 << 0)
+#define NFC_ADDRESS (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+#define NFC_CMD_FAIL (1 << 15)
+#define NFC_INT (1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT (1 << 0)
+#define NFC_WPC_LOCK (1 << 1)
+#define NFC_WPC_UNLOCK (1 << 2)
+
+#define DRV_NAME "mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
+#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
+
+struct mpc5121_nfc_prv {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ wait_queue_head_t irq_waitq;
+ uint column;
+ int spareonly;
+ void __iomem *csreg;
+ struct device *dev;
+};
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
+#endif
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+ nfc_write(mtd, NFC_FLASH_ADDR, addr);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+ nfc_write(mtd, NFC_FLASH_CMD, cmd);
+ nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* NFC interrupt handler */
+static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
+{
+ struct mtd_info *mtd = data;
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ wake_up(&prv->irq_waitq);
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for operation complete */
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ int rv;
+
+ if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ rv = wait_event_timeout(prv->irq_waitq,
+ (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
+
+ if (!rv)
+ dev_warn(prv->dev,
+ "Timeout while waiting for interrupt.\n");
+ }
+
+ nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ u32 pagemask = chip->pagemask;
+
+ if (column != -1) {
+ mpc5121_nfc_send_addr(mtd, column);
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_addr(mtd, column >> 8);
+ }
+
+ if (page != -1) {
+ do {
+ mpc5121_nfc_send_addr(mtd, page & 0xFF);
+ page >>= 8;
+ pagemask >>= 8;
+ } while (pagemask);
+ }
+}
+
+/* Control chip select signals */
+static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ if (chip < 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+ return;
+ }
+
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+ NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+
+/* Init external chip select logic on ADS5121 board */
+static int ads5121_chipselect_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
+ if (dn) {
+ prv->csreg = of_iomap(dn, 0);
+ of_node_put(dn);
+ if (!prv->csreg)
+ return -ENOMEM;
+
+ /* CPLD Register 9 controls NAND /CE Lines */
+ prv->csreg += 9;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Control chips select signal on ADS5121 board */
+static void ads5121_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ u8 v;
+
+ v = in_8(prv->csreg);
+ v |= 0x0F;
+
+ if (chip >= 0) {
+ mpc5121_nfc_select_chip(mtd, 0);
+ v &= ~(1 << chip);
+ } else
+ mpc5121_nfc_select_chip(mtd, -1);
+
+ out_8(prv->csreg, v);
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles ready/busy signal internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
+ int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ prv->column = (column >= 0) ? column : 0;
+ prv->spareonly = 0;
+
+ switch (command) {
+ case NAND_CMD_PAGEPROG:
+ mpc5121_nfc_send_prog_page(mtd);
+ break;
+ /*
+ * NFC does not support sub-page reads and writes,
+ * so emulate them using full page transfers.
+ */
+ case NAND_CMD_READ0:
+ column = 0;
+ break;
+
+ case NAND_CMD_READ1:
+ prv->column += 256;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_READOOB:
+ prv->spareonly = 1;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_SEQIN:
+ mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
+ column = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_READID:
+ case NAND_CMD_STATUS:
+ break;
+
+ default:
+ return;
+ }
+
+ mpc5121_nfc_send_cmd(mtd, command);
+ mpc5121_nfc_addr_cycle(mtd, column, page);
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+ mpc5121_nfc_send_read_page(mtd);
+ break;
+
+ case NAND_CMD_READID:
+ mpc5121_nfc_send_read_id(mtd);
+ break;
+
+ case NAND_CMD_STATUS:
+ mpc5121_nfc_send_read_status(mtd);
+ if (chip->options & NAND_BUSWIDTH_16)
+ prv->column = 1;
+ else
+ prv->column = 0;
+ break;
+ }
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+ u8 *buffer, uint size, int wr)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ uint o, s, sbsize, blksize;
+
+ /*
+ * NAND spare area is available through NFC spare buffers.
+ * The NFC divides spare area into (page_size / 512) chunks.
+ * Each chunk is placed into separate spare memory area, using
+ * first (spare_size / num_of_chunks) bytes of the buffer.
+ *
+ * For NAND device in which the spare area is not divided fully
+ * by the number of chunks, number of used bytes in each spare
+ * buffer is rounded down to the nearest even number of bytes,
+ * and all remaining bytes are added to the last used spare area.
+ *
+ * For more information read section 26.6.10 of MPC5121e
+ * Microcontroller Reference Manual, Rev. 3.
+ */
+
+ /* Calculate number of valid bytes in each spare buffer */
+ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+ while (size) {
+ /* Calculate spare buffer number */
+ s = offset / sbsize;
+ if (s > NFC_SPARE_BUFFERS - 1)
+ s = NFC_SPARE_BUFFERS - 1;
+
+ /*
+ * Calculate offset to requested data block in selected spare
+ * buffer and its size.
+ */
+ o = offset - (s * sbsize);
+ blksize = min(sbsize - o, size);
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+ buffer, blksize);
+ else
+ memcpy_fromio(buffer,
+ prv->regs + NFC_SPARE_AREA(s) + o, blksize);
+
+ buffer += blksize;
+ offset += blksize;
+ size -= blksize;
+ };
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
+ int wr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ uint c = prv->column;
+ uint l;
+
+ /* Handle spare area access */
+ if (prv->spareonly || c >= mtd->writesize) {
+ /* Calculate offset from beginning of spare area */
+ if (c >= mtd->writesize)
+ c -= mtd->writesize;
+
+ prv->column += len;
+ mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+ return;
+ }
+
+ /*
+ * Handle main area access - limit copy length to prevent
+ * crossing main/spare boundary.
+ */
+ l = min((uint)len, mtd->writesize - c);
+ prv->column += l;
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+ else
+ memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+ /* Handle crossing main/spare boundary */
+ if (l != len) {
+ buf += l;
+ len -= l;
+ mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+ }
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
+}
+
+/* Compare buffer with NAND flash */
+static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ u_char tmp[256];
+ uint bsize;
+
+ while (len) {
+ bsize = min(len, 256);
+ mpc5121_nfc_read_buf(mtd, tmp, bsize);
+
+ if (memcmp(buf, tmp, bsize))
+ return 1;
+
+ buf += bsize;
+ len -= bsize;
+ }
+
+ return 0;
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
+{
+ u8 tmp;
+
+ mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/* Read word from NFC buffers */
+static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
+{
+ u16 tmp;
+
+ mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct mpc512x_reset_module *rm;
+ struct device_node *rmnode;
+ uint rcw_pagesize = 0;
+ uint rcw_sparesize = 0;
+ uint rcw_width;
+ uint rcwh;
+ uint romloc, ps;
+
+ rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
+ if (!rmnode) {
+ dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
+ "node in device tree!\n");
+ return -ENODEV;
+ }
+
+ rm = of_iomap(rmnode, 0);
+ if (!rm) {
+ dev_err(prv->dev, "Error mapping reset module node!\n");
+ return -EBUSY;
+ }
+
+ rcwh = in_be32(&rm->rcwhr);
+
+ /* Bit 6: NFC bus width */
+ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+ /* Bit 7: NFC Page/Spare size */
+ ps = (rcwh >> 7) & 0x1;
+
+ /* Bits [22:21]: ROM Location */
+ romloc = (rcwh >> 21) & 0x3;
+
+ /* Decode RCW bits */
+ switch ((ps << 2) | romloc) {
+ case 0x00:
+ case 0x01:
+ rcw_pagesize = 512;
+ rcw_sparesize = 16;
+ break;
+ case 0x02:
+ case 0x03:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 128;
+ break;
+ case 0x04:
+ case 0x05:
+ rcw_pagesize = 2048;
+ rcw_sparesize = 64;
+ break;
+ case 0x06:
+ case 0x07:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 218;
+ break;
+ }
+
+ mtd->writesize = rcw_pagesize;
+ mtd->oobsize = rcw_sparesize;
+ if (rcw_width == 2)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ dev_notice(prv->dev, "Configured for "
+ "%u-bit NAND, page size %u "
+ "with %u spare.\n",
+ rcw_width * 8, rcw_pagesize,
+ rcw_sparesize);
+ iounmap(rm);
+ of_node_put(rmnode);
+ return 0;
+}
+
+/* Free driver resources */
+static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ if (prv->clk) {
+ clk_disable(prv->clk);
+ clk_put(prv->clk);
+ }
+
+ if (prv->csreg)
+ iounmap(prv->csreg);
+}
+
+static int __devinit mpc5121_nfc_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct device_node *rootnode, *dn = op->node;
+ struct device *dev = &op->dev;
+ struct mpc5121_nfc_prv *prv;
+ struct resource res;
+ struct mtd_info *mtd;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
+ struct nand_chip *chip;
+ unsigned long regs_paddr, regs_size;
+ const uint *chips_no;
+ int resettime = 0;
+ int retval = 0;
+ int rev, len;
+
+ /*
+ * Check SoC revision. This driver supports only NFC
+ * in MPC5121 revision 2 and MPC5123 revision 3.
+ */
+ rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+ if ((rev != 2) && (rev != 3)) {
+ dev_err(dev, "SoC revision %u is not supported!\n", rev);
+ return -ENXIO;
+ }
+
+ prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
+ if (!prv) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ mtd = &prv->mtd;
+ chip = &prv->chip;
+
+ mtd->priv = chip;
+ chip->priv = prv;
+ prv->dev = dev;
+
+ /* Read NFC configuration from Reset Config Word */
+ retval = mpc5121_nfc_read_hw_config(mtd);
+ if (retval) {
+ dev_err(dev, "Unable to read NFC config!\n");
+ return retval;
+ }
+
+ prv->irq = irq_of_parse_and_map(dn, 0);
+ if (prv->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ chips_no = of_get_property(dn, "chips", &len);
+ if (!chips_no || len != sizeof(*chips_no)) {
+ dev_err(dev, "Invalid/missing 'chips' property!\n");
+ return -EINVAL;
+ }
+
+ regs_paddr = res.start;
+ regs_size = res.end - res.start + 1;
+
+ if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
+ if (!prv->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mtd->name = "MPC5121 NAND";
+ chip->dev_ready = mpc5121_nfc_dev_ready;
+ chip->cmdfunc = mpc5121_nfc_command;
+ chip->read_byte = mpc5121_nfc_read_byte;
+ chip->read_word = mpc5121_nfc_read_word;
+ chip->read_buf = mpc5121_nfc_read_buf;
+ chip->write_buf = mpc5121_nfc_write_buf;
+ chip->verify_buf = mpc5121_nfc_verify_buf;
+ chip->select_chip = mpc5121_nfc_select_chip;
+ chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Support external chip-select logic on ADS5121 board */
+ rootnode = of_find_node_by_path("/");
+ if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
+ retval = ads5121_chipselect_init(mtd);
+ if (retval) {
+ dev_err(dev, "Chipselect init error!\n");
+ of_node_put(rootnode);
+ return retval;
+ }
+
+ chip->select_chip = ads5121_select_chip;
+ }
+ of_node_put(rootnode);
+
+ /* Enable NFC clock */
+ prv->clk = clk_get(dev, "nfc_clk");
+ if (!prv->clk) {
+ dev_err(dev, "Unable to acquire NFC clock!\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+ clk_enable(prv->clk);
+
+ /* Reset NAND Flash controller */
+ nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+ while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+ if (resettime++ >= NFC_RESET_TIMEOUT) {
+ dev_err(dev, "Timeout while resetting NFC!\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
+ udelay(1);
+ }
+
+ /* Enable write to NFC memory */
+ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+ /* Enable write to all NAND pages */
+ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+ nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+ nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+ /*
+ * Setup NFC:
+ * - Big Endian transfers,
+ * - Interrupt after full page read/write.
+ */
+ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+ NFC_FULL_PAGE_INT);
+
+ /* Set spare area size */
+ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+ init_waitqueue_head(&prv->irq_waitq);
+ retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
+ mtd);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ goto error;
+ }
+
+ /* Detect NAND chips */
+ if (nand_scan(mtd, *chips_no)) {
+ dev_err(dev, "NAND Flash not found !\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -ENXIO;
+ goto error;
+ }
+
+ /* Set erase block size */
+ switch (mtd->erasesize / mtd->writesize) {
+ case 32:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+ break;
+
+ case 64:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+ break;
+
+ case 128:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+ break;
+
+ case 256:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported NAND flash!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -ENXIO;
+ goto error;
+ }
+
+ dev_set_drvdata(dev, mtd);
+
+ /* Register device in MTD */
+#ifdef CONFIG_MTD_PARTITIONS
+ retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
+#ifdef CONFIG_MTD_OF_PARTS
+ if (retval == 0)
+ retval = of_mtd_parse_partitions(dev, dn, &parts);
+#endif
+ if (retval < 0) {
+ dev_err(dev, "Error parsing MTD partitions!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (retval > 0)
+ retval = add_mtd_partitions(mtd, parts, retval);
+ else
+#endif
+ retval = add_mtd_device(mtd);
+
+ if (retval) {
+ dev_err(dev, "Error adding MTD device!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ goto error;
+ }
+
+ return 0;
+error:
+ mpc5121_nfc_free(dev, mtd);
+ return retval;
+}
+
+static int __devexit mpc5121_nfc_remove(struct of_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nand_release(mtd);
+ devm_free_irq(dev, prv->irq, mtd);
+ mpc5121_nfc_free(dev, mtd);
+
+ return 0;
+}
+
+static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
+ { .compatible = "fsl,mpc5121-nfc", },
+ {},
+};
+
+static struct of_platform_driver mpc5121_nfc_driver = {
+ .match_table = mpc5121_nfc_match,
+ .probe = mpc5121_nfc_probe,
+ .remove = __devexit_p(mpc5121_nfc_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpc5121_nfc_init(void)
+{
+ return of_register_platform_driver(&mpc5121_nfc_driver);
+}
+
+module_init(mpc5121_nfc_init);
+
+static void __exit mpc5121_nfc_cleanup(void)
+{
+ of_unregister_platform_driver(&mpc5121_nfc_driver);
+}
+
+module_exit(mpc5121_nfc_cleanup);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index b2900d8..82e9438 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -38,7 +38,7 @@
#define DRIVER_NAME "mxc_nand"
#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
-#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27())
+#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
/* Addresses for NFC registers */
#define NFC_BUF_SIZE 0xE00
@@ -168,11 +168,7 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
{
struct mxc_nand_host *host = dev_id;
- uint16_t tmp;
-
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_INT_MSK; /* Disable interrupt */
- writew(tmp, host->regs + NFC_CONFIG1);
+ disable_irq_nosync(irq);
wake_up(&host->irq_waitq);
@@ -184,15 +180,13 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
*/
static void wait_op_done(struct mxc_nand_host *host, int useirq)
{
- uint32_t tmp;
- int max_retries = 2000;
+ uint16_t tmp;
+ int max_retries = 8000;
if (useirq) {
if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_INT_MSK; /* Enable interrupt */
- writew(tmp, host->regs + NFC_CONFIG1);
+ enable_irq(host->irq);
wait_event(host->irq_waitq,
readw(host->regs + NFC_CONFIG2) & NFC_INT);
@@ -226,8 +220,23 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
writew(cmd, host->regs + NFC_FLASH_CMD);
writew(NFC_CMD, host->regs + NFC_CONFIG2);
- /* Wait for operation to complete */
- wait_op_done(host, useirq);
+ if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+ int max_retries = 100;
+ /* Reset completion is indicated by NFC_CONFIG2 */
+ /* being set to 0 */
+ while (max_retries-- > 0) {
+ if (readw(host->regs + NFC_CONFIG2) == 0) {
+ break;
+ }
+ udelay(1);
+ }
+ if (max_retries < 0)
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n",
+ __func__);
+ } else {
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+ }
}
/* This function sends an address (or partial address) to the
@@ -542,6 +551,41 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
}
}
+static void preset(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t tmp;
+
+ /* enable interrupt, disable spare enable */
+ tmp = readw(host->regs + NFC_CONFIG1);
+ tmp &= ~NFC_INT_MSK;
+ tmp &= ~NFC_SP_EN;
+ if (nand_chip->ecc.mode == NAND_ECC_HW) {
+ tmp |= NFC_ECC_EN;
+ } else {
+ tmp &= ~NFC_ECC_EN;
+ }
+ writew(tmp, host->regs + NFC_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, host->regs + NFC_CONFIG);
+
+ /* Blocks to be unlocked */
+ if (nfc_is_v21()) {
+ writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
+ writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
+ } else if (nfc_is_v1()) {
+ writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
+ } else
+ BUG();
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, host->regs + NFC_WRPROT);
+}
+
/* Used by the upper layer to write command to NAND Flash for
* different operations to be carried out on NAND Flash */
static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
@@ -559,6 +603,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
/* Command pre-processing step */
switch (command) {
+ case NAND_CMD_RESET:
+ send_cmd(host, command, false);
+ preset(mtd);
+ break;
case NAND_CMD_STATUS:
host->buf_start = 0;
@@ -679,7 +727,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
- uint16_t tmp;
int err = 0, nr_parts = 0;
struct nand_ecclayout *oob_smallpage, *oob_largepage;
@@ -743,51 +790,17 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->spare_len = 64;
oob_smallpage = &nandv2_hw_eccoob_smallpage;
oob_largepage = &nandv2_hw_eccoob_largepage;
+ this->ecc.bytes = 9;
} else if (nfc_is_v1()) {
host->regs = host->base;
host->spare0 = host->base + 0x800;
host->spare_len = 16;
oob_smallpage = &nandv1_hw_eccoob_smallpage;
oob_largepage = &nandv1_hw_eccoob_largepage;
- } else
- BUG();
-
- /* disable interrupt and spare enable */
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_INT_MSK;
- tmp &= ~NFC_SP_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
-
- init_waitqueue_head(&host->irq_waitq);
-
- host->irq = platform_get_irq(pdev, 0);
-
- err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host);
- if (err)
- goto eirq;
-
- /* Reset NAND */
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
- /* preset operation */
- /* Unlock the internal RAM Buffer */
- writew(0x2, host->regs + NFC_CONFIG);
-
- /* Blocks to be unlocked */
- if (nfc_is_v21()) {
- writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
- writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
- this->ecc.bytes = 9;
- } else if (nfc_is_v1()) {
- writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
- writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
this->ecc.bytes = 3;
} else
BUG();
- /* Unlock Block Command for given address range */
- writew(0x4, host->regs + NFC_WRPROT);
-
this->ecc.size = 512;
this->ecc.layout = oob_smallpage;
@@ -796,14 +809,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.hwctl = mxc_nand_enable_hwecc;
this->ecc.correct = mxc_nand_correct_data;
this->ecc.mode = NAND_ECC_HW;
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_ECC_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
} else {
this->ecc.mode = NAND_ECC_SOFT;
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_ECC_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
}
/* NAND bus width determines access funtions used by upper layer */
@@ -817,8 +824,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->options |= NAND_USE_FLASH_BBT;
}
+ init_waitqueue_head(&host->irq_waitq);
+
+ host->irq = platform_get_irq(pdev, 0);
+
+ err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
+ if (err)
+ goto eirq;
+
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
err = -ENXIO;
goto escan;
}
@@ -886,11 +901,14 @@ static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
int ret = 0;
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
- if (mtd) {
- ret = mtd->suspend(mtd);
- /* Disable the NFC clock */
- clk_disable(host->clk);
- }
+
+ ret = mtd->suspend(mtd);
+
+ /*
+ * nand_suspend locks the device for exclusive access, so
+ * the clock must already be off.
+ */
+ BUG_ON(!ret && host->clk_act);
return ret;
}
@@ -904,11 +922,7 @@ static int mxcnd_resume(struct platform_device *pdev)
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
- if (mtd) {
- /* Enable the NFC clock */
- clk_enable(host->clk);
- mtd->resume(mtd);
- }
+ mtd->resume(mtd);
return ret;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8f2958f..4a7b864 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -108,6 +108,35 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
*/
DEFINE_LED_TRIGGER(nand_led_trigger);
+static int check_offs_len(struct mtd_info *mtd,
+ loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (len & ((1 << chip->phys_erase_shift) - 1)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ /* Do not allow past end of device */
+ if (ofs + len > mtd->size) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
/**
* nand_release_device - [GENERIC] release chip
* @mtd: MTD device structure
@@ -318,6 +347,9 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
struct nand_chip *chip = mtd->priv;
u16 bad;
+ if (chip->options & NAND_BB_LAST_PAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
if (getchip) {
@@ -335,14 +367,18 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
bad = cpu_to_le16(chip->read_word(mtd));
if (chip->badblockpos & 0x1)
bad >>= 8;
- if ((bad & 0xFF) != 0xff)
- res = 1;
+ else
+ bad &= 0xFF;
} else {
chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
- if (chip->read_byte(mtd) != 0xff)
- res = 1;
+ bad = chip->read_byte(mtd);
}
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+
if (getchip)
nand_release_device(mtd);
@@ -363,6 +399,9 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
uint8_t buf[2] = { 0, 0 };
int block, ret;
+ if (chip->options & NAND_BB_LAST_PAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+
/* Get block number */
block = (int)(ofs >> chip->bbt_erase_shift);
if (chip->bbt)
@@ -401,6 +440,11 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
+
+ /* broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
/* Check the WP bit */
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
@@ -744,9 +788,6 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
chip->state = FL_PM_SUSPENDED;
spin_unlock(lock);
return 0;
- } else {
- spin_unlock(lock);
- return -EAGAIN;
}
}
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -835,6 +876,168 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
}
/**
+ * __nand_unlock - [REPLACABLE] unlocks specified locked blockes
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ * @invert - when = 0, unlock the range of blocks within the lower and
+ * upper boundary address
+ * whne = 1, unlock the range of blocks outside the boundaries
+ * of the lower and upper boundary address
+ *
+ * @return - unlock status
+ */
+static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len, int invert)
+{
+ int ret = 0;
+ int status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ /* Submit address of first page to unlock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+ /* Submit address of last page to unlock */
+ page = (ofs + len) >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
+ (page | invert) & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ udelay(1000);
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_unlock - [REPLACABLE] unlocks specified locked blockes
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ *
+ * @return - unlock status
+ */
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr;
+ struct nand_chip *chip = mtd->priv;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ /* Align to last block address if size addresses end of the device */
+ if (ofs + len == mtd->size)
+ len -= mtd->erasesize;
+
+ nand_get_device(chip, mtd, FL_UNLOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0);
+
+out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * nand_lock - [REPLACABLE] locks all blockes present in the device
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ *
+ * @return - lock status
+ *
+ * This feature is not support in many NAND parts. 'Micron' NAND parts
+ * do have this feature, but it allows only to lock all blocks not for
+ * specified range for block.
+ *
+ * Implementing 'lock' feature by making use of 'unlock', for now.
+ */
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr, status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ nand_get_device(chip, mtd, FL_LOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ __func__);
+ status = MTD_ERASE_FAILED;
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Submit address of first page to lock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ udelay(1000);
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0x1);
+
+out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
* nand_read_page_raw - [Intern] read raw page data without ecc
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1232,6 +1435,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
+ mtd->oobavail : mtd->oobsize;
+
uint8_t *bufpoi, *oob, *buf;
stats = mtd->ecc_stats;
@@ -1282,18 +1488,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
buf += bytes;
if (unlikely(oob)) {
- /* Raw mode does data:oob:data:oob */
- if (ops->mode != MTD_OOB_RAW) {
- int toread = min(oobreadlen,
- chip->ecc.layout->oobavail);
- if (toread) {
- oob = nand_transfer_oob(chip,
- oob, ops, toread);
- oobreadlen -= toread;
- }
- } else
- buf = nand_transfer_oob(chip,
- buf, ops, mtd->oobsize);
+
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip,
+ oob, ops, toread);
+ oobreadlen -= toread;
+ }
}
if (!(chip->options & NAND_NO_READRDY)) {
@@ -1880,11 +2082,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
* @oob: oob data buffer
* @ops: oob ops structure
*/
-static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
- struct mtd_oob_ops *ops)
+static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
{
- size_t len = ops->ooblen;
-
switch(ops->mode) {
case MTD_OOB_PLACE:
@@ -1939,6 +2139,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
int chipnr, realpage, page, blockmask, column;
struct nand_chip *chip = mtd->priv;
uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
+ mtd->oobavail : mtd->oobsize;
+
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
int ret, subpage;
@@ -1980,6 +2185,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (likely(!oob))
memset(chip->oob_poi, 0xff, mtd->oobsize);
+ /* Don't allow multipage oob writes with offset */
+ if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
+ return -EINVAL;
+
while(1) {
int bytes = mtd->writesize;
int cached = writelen > bytes && page != blockmask;
@@ -1995,8 +2204,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
wbuf = chip->buffers->databuf;
}
- if (unlikely(oob))
- oob = nand_fill_oob(chip, oob, ops);
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ oob = nand_fill_oob(chip, oob, len, ops);
+ oobwritelen -= len;
+ }
ret = chip->write_page(mtd, chip, wbuf, page, cached,
(ops->mode == MTD_OOB_RAW));
@@ -2170,7 +2382,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
chip->pagebuf = -1;
memset(chip->oob_poi, 0xff, mtd->oobsize);
- nand_fill_oob(chip, ops->oobbuf, ops);
+ nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
memset(chip->oob_poi, 0xff, mtd->oobsize);
@@ -2293,25 +2505,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
__func__, (unsigned long long)instr->addr,
(unsigned long long)instr->len);
- /* Start address must align on block boundary */
- if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+ if (check_offs_len(mtd, instr->addr, instr->len))
return -EINVAL;
- }
-
- /* Length must align on block boundary */
- if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow erase past end of device */
- if ((instr->len + instr->addr) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n",
- __func__);
- return -EINVAL;
- }
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
@@ -2582,11 +2777,11 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
- int busw, int *maf_id)
+ int busw, int *maf_id,
+ struct nand_flash_dev *type)
{
- struct nand_flash_dev *type = NULL;
int i, dev_id, maf_idx;
- int tmp_id, tmp_manf;
+ u8 id_data[8];
/* Select the device */
chip->select_chip(mtd, 0);
@@ -2612,27 +2807,26 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- /* Read manufacturer and device IDs */
+ /* Read entire ID string */
- tmp_manf = chip->read_byte(mtd);
- tmp_id = chip->read_byte(mtd);
+ for (i = 0; i < 8; i++)
+ id_data[i] = chip->read_byte(mtd);
- if (tmp_manf != *maf_id || tmp_id != dev_id) {
+ if (id_data[0] != *maf_id || id_data[1] != dev_id) {
printk(KERN_INFO "%s: second ID read did not match "
"%02x,%02x against %02x,%02x\n", __func__,
- *maf_id, dev_id, tmp_manf, tmp_id);
+ *maf_id, dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
- /* Lookup the flash id */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (dev_id == nand_flash_ids[i].id) {
- type = &nand_flash_ids[i];
- break;
- }
- }
-
if (!type)
+ type = nand_flash_ids;
+
+ for (; type->name != NULL; type++)
+ if (dev_id == type->id)
+ break;
+
+ if (!type->name)
return ERR_PTR(-ENODEV);
if (!mtd->name)
@@ -2644,21 +2838,45 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (!type->pagesize) {
int extid;
/* The 3rd id byte holds MLC / multichip data */
- chip->cellinfo = chip->read_byte(mtd);
+ chip->cellinfo = id_data[2];
/* The 4th id byte is the important one */
- extid = chip->read_byte(mtd);
- /* Calc pagesize */
- mtd->writesize = 1024 << (extid & 0x3);
- extid >>= 2;
- /* Calc oobsize */
- mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
- extid >>= 2;
- /* Calc blocksize. Blocksize is multiples of 64KiB */
- mtd->erasesize = (64 * 1024) << (extid & 0x03);
- extid >>= 2;
- /* Get buswidth information */
- busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+ extid = id_data[3];
+ /*
+ * Field definitions are in the following datasheets:
+ * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
+ * New style (6 byte ID): Samsung K9GAG08U0D (p.40)
+ *
+ * Check for wraparound + Samsung ID + nonzero 6th byte
+ * to decide what to do.
+ */
+ if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
+ id_data[0] == NAND_MFR_SAMSUNG &&
+ id_data[5] != 0x00) {
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (extid & 0x03) == 0x01 ? 128 : 218;
+ extid >>= 2;
+ /* Calc blocksize */
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+ busw = 0;
+ } else {
+ /* Calc pagesize */
+ mtd->writesize = 1024 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (8 << (extid & 0x01)) *
+ (mtd->writesize >> 9);
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+ }
} else {
/*
* Old devices have chip data hardcoded in the device id table
@@ -2704,6 +2922,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/* Set the bad block position */
chip->badblockpos = mtd->writesize > 512 ?
NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+ chip->badblockbits = 8;
/* Get chip options, preserve non chip based options */
chip->options &= ~NAND_CHIPOPTIONS_MSK;
@@ -2720,6 +2939,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+ /*
+ * Bad block marker is stored in the last page of each block
+ * on Samsung and Hynix MLC devices
+ */
+ if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ (*maf_id == NAND_MFR_SAMSUNG ||
+ *maf_id == NAND_MFR_HYNIX))
+ chip->options |= NAND_BB_LAST_PAGE;
+
/* Check for AND chips with 4 page planes */
if (chip->options & NAND_4PAGE_ARRAY)
chip->erase_cmd = multi_erase_cmd;
@@ -2741,13 +2969,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
* nand_scan_ident - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
* @maxchips: Number of chips to scan for
+ * @table: Alternative NAND ID table
*
* This is the first phase of the normal nand_scan() function. It
* reads the flash ID and sets up MTD fields accordingly.
*
* The mtd->owner field must be set to the module of the caller.
*/
-int nand_scan_ident(struct mtd_info *mtd, int maxchips)
+int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
{
int i, busw, nand_maf_id;
struct nand_chip *chip = mtd->priv;
@@ -2759,7 +2989,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
nand_set_defaults(chip, busw);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
+ type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table);
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@@ -2989,7 +3219,8 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Fill in remaining MTD driver data */
mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
+ MTD_CAP_NANDFLASH;
mtd->erase = nand_erase;
mtd->point = NULL;
mtd->unpoint = NULL;
@@ -3050,7 +3281,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
BUG();
}
- ret = nand_scan_ident(mtd, maxchips);
+ ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
return ret;
@@ -3077,6 +3308,8 @@ void nand_release(struct mtd_info *mtd)
kfree(chip->buffers);
}
+EXPORT_SYMBOL_GPL(nand_lock);
+EXPORT_SYMBOL_GPL(nand_unlock);
EXPORT_SYMBOL_GPL(nand_scan);
EXPORT_SYMBOL_GPL(nand_scan_ident);
EXPORT_SYMBOL_GPL(nand_scan_tail);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 55c23e5..ad97c0c 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -237,15 +237,33 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
+ int res;
ops.mode = MTD_OOB_RAW;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
- ops.oobbuf = buf;
- ops.datbuf = buf;
- ops.len = len;
- return mtd->read_oob(mtd, offs, &ops);
+
+ while (len > 0) {
+ if (len <= mtd->writesize) {
+ ops.oobbuf = buf + len;
+ ops.datbuf = buf;
+ ops.len = len;
+ return mtd->read_oob(mtd, offs, &ops);
+ } else {
+ ops.oobbuf = buf + mtd->writesize;
+ ops.datbuf = buf;
+ ops.len = mtd->writesize;
+ res = mtd->read_oob(mtd, offs, &ops);
+
+ if (res)
+ return res;
+ }
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ }
+ return 0;
}
/*
@@ -414,6 +432,9 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
from = (loff_t)startblock << (this->bbt_erase_shift - 1);
}
+ if (this->options & NAND_BB_LAST_PAGE)
+ from += mtd->erasesize - (mtd->writesize * len);
+
for (i = startblock; i < numblocks;) {
int ret;
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
index 7cec2cd..198b304 100644
--- a/drivers/mtd/nand/nand_bcm_umi.h
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -167,18 +167,27 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
int numToRead = 16; /* There are 16 bytes per sector in the OOB */
/* ECC is already paused when this function is called */
+ if (pageSize != NAND_DATA_ACCESS_SIZE) {
+ /* skip BI */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
- if (pageSize == NAND_DATA_ACCESS_SIZE) {
- while (numToRead > numEccBytes) {
- /* skip free oob region */
+ while (numToRead > numEccBytes) {
+ /* skip free oob region */
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp++ = REG_NAND_DATA8;
#else
- REG_NAND_DATA8;
+ REG_NAND_DATA8;
#endif
- numToRead--;
- }
+ numToRead--;
+ }
+ if (pageSize == NAND_DATA_ACCESS_SIZE) {
/* read ECC bytes before BI */
nand_bcm_umi_bch_resume_read_ecc_calc();
@@ -190,6 +199,7 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
#else
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
+ numToRead--;
}
nand_bcm_umi_bch_pause_read_ecc_calc();
@@ -204,49 +214,18 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
numToRead--;
}
- /* read ECC bytes */
- nand_bcm_umi_bch_resume_read_ecc_calc();
- while (numToRead) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
-#endif
- numToRead--;
- }
- } else {
- /* skip BI */
+ }
+ /* read ECC bytes */
+ nand_bcm_umi_bch_resume_read_ecc_calc();
+ while (numToRead) {
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp = REG_NAND_DATA8;
+ eccCalc[eccPos++] = *oobp;
+ oobp++;
#else
- REG_NAND_DATA8;
+ eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
numToRead--;
-
- while (numToRead > numEccBytes) {
- /* skip free oob region */
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
-#else
- REG_NAND_DATA8;
-#endif
- numToRead--;
- }
-
- /* read ECC bytes */
- nand_bcm_umi_bch_resume_read_ecc_calc();
- while (numToRead) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
-#endif
- numToRead--;
- }
}
}
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 69ee2c9..89907ed9 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -82,6 +82,7 @@ struct nand_flash_dev nand_flash_ids[] = {
/* 1 Gigabit */
{"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
{"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
+ {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
{"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
{"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 7281000..261337e 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -80,6 +80,9 @@
#ifndef CONFIG_NANDSIM_DBG
#define CONFIG_NANDSIM_DBG 0
#endif
+#ifndef CONFIG_NANDSIM_MAX_PARTS
+#define CONFIG_NANDSIM_MAX_PARTS 32
+#endif
static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
@@ -94,7 +97,7 @@ static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
static uint log = CONFIG_NANDSIM_LOG;
static uint dbg = CONFIG_NANDSIM_DBG;
-static unsigned long parts[MAX_MTD_DEVICES];
+static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
static unsigned int parts_num;
static char *badblocks = NULL;
static char *weakblocks = NULL;
@@ -135,8 +138,8 @@ MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read I
MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
-MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
-MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)");
+MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
+MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
MODULE_PARM_DESC(log, "Perform logging if not zero");
@@ -288,7 +291,7 @@ union ns_mem {
* The structure which describes all the internal simulator data.
*/
struct nandsim {
- struct mtd_partition partitions[MAX_MTD_DEVICES];
+ struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
unsigned int nbparts;
uint busw; /* flash chip bus width (8 or 16) */
@@ -312,7 +315,7 @@ struct nandsim {
union ns_mem buf;
/* NAND flash "geometry" */
- struct nandsin_geometry {
+ struct {
uint64_t totsz; /* total flash size, bytes */
uint32_t secsz; /* flash sector (erase block) size, bytes */
uint pgsz; /* NAND flash page size, bytes */
@@ -331,7 +334,7 @@ struct nandsim {
} geom;
/* NAND flash internal registers */
- struct nandsim_regs {
+ struct {
unsigned command; /* the command register */
u_char status; /* the status register */
uint row; /* the page number */
@@ -342,7 +345,7 @@ struct nandsim {
} regs;
/* NAND flash lines state */
- struct ns_lines_status {
+ struct {
int ce; /* chip Enable */
int cle; /* command Latch Enable */
int ale; /* address Latch Enable */
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 1f6f741..8c0b693 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -105,21 +105,21 @@ static int nomadik_nand_probe(struct platform_device *pdev)
ret = -EIO;
goto err_unmap;
}
- host->addr_va = ioremap(res->start, res->end - res->start + 1);
+ host->addr_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
if (!res) {
ret = -EIO;
goto err_unmap;
}
- host->data_va = ioremap(res->start, res->end - res->start + 1);
+ host->data_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
if (!res) {
ret = -EIO;
goto err_unmap;
}
- host->cmd_va = ioremap(res->start, res->end - res->start + 1);
+ host->cmd_va = ioremap(res->start, resource_size(res));
if (!host->addr_va || !host->data_va || !host->cmd_va) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/w90p910_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7680e73..6eddf73 100644
--- a/drivers/mtd/nand/w90p910_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009 Nuvoton technology corporation.
+ * Copyright © 2009 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
@@ -55,7 +55,7 @@
#define write_addr_reg(dev, val) \
__raw_writel((val), (dev)->reg + REG_SMADDR)
-struct w90p910_nand {
+struct nuc900_nand {
struct mtd_info mtd;
struct nand_chip chip;
void __iomem *reg;
@@ -76,49 +76,49 @@ static const struct mtd_partition partitions[] = {
}
};
-static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd)
+static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
{
unsigned char ret;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
ret = (unsigned char)read_data_reg(nand);
return ret;
}
-static void w90p910_nand_read_buf(struct mtd_info *mtd,
- unsigned char *buf, int len)
+static void nuc900_nand_read_buf(struct mtd_info *mtd,
+ unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
buf[i] = (unsigned char)read_data_reg(nand);
}
-static void w90p910_nand_write_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
+static void nuc900_nand_write_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
write_data_reg(nand, buf[i]);
}
-static int w90p910_verify_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
+static int nuc900_verify_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++) {
if (buf[i] != (unsigned char)read_data_reg(nand))
@@ -128,7 +128,7 @@ static int w90p910_verify_buf(struct mtd_info *mtd,
return 0;
}
-static int w90p910_check_rb(struct w90p910_nand *nand)
+static int nuc900_check_rb(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@@ -139,24 +139,24 @@ static int w90p910_check_rb(struct w90p910_nand *nand)
return val;
}
-static int w90p910_nand_devready(struct mtd_info *mtd)
+static int nuc900_nand_devready(struct mtd_info *mtd)
{
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
int ready;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
- ready = (w90p910_check_rb(nand)) ? 1 : 0;
+ ready = (nuc900_check_rb(nand)) ? 1 : 0;
return ready;
}
-static void w90p910_nand_command_lp(struct mtd_info *mtd,
- unsigned int command, int column, int page_addr)
+static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
{
register struct nand_chip *chip = mtd->priv;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
if (command == NAND_CMD_READOOB) {
column += mtd->writesize;
@@ -212,7 +212,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
write_cmd_reg(nand, NAND_CMD_STATUS);
write_cmd_reg(nand, command);
- while (!w90p910_check_rb(nand))
+ while (!nuc900_check_rb(nand))
;
return;
@@ -241,7 +241,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
}
-static void w90p910_nand_enable(struct w90p910_nand *nand)
+static void nuc900_nand_enable(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@@ -262,37 +262,37 @@ static void w90p910_nand_enable(struct w90p910_nand *nand)
spin_unlock(&nand->lock);
}
-static int __devinit w90p910_nand_probe(struct platform_device *pdev)
+static int __devinit nuc900_nand_probe(struct platform_device *pdev)
{
- struct w90p910_nand *w90p910_nand;
+ struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
int retval;
struct resource *res;
retval = 0;
- w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL);
- if (!w90p910_nand)
+ nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
+ if (!nuc900_nand)
return -ENOMEM;
- chip = &(w90p910_nand->chip);
+ chip = &(nuc900_nand->chip);
- w90p910_nand->mtd.priv = chip;
- w90p910_nand->mtd.owner = THIS_MODULE;
- spin_lock_init(&w90p910_nand->lock);
+ nuc900_nand->mtd.priv = chip;
+ nuc900_nand->mtd.owner = THIS_MODULE;
+ spin_lock_init(&nuc900_nand->lock);
- w90p910_nand->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(w90p910_nand->clk)) {
+ nuc900_nand->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nuc900_nand->clk)) {
retval = -ENOENT;
goto fail1;
}
- clk_enable(w90p910_nand->clk);
-
- chip->cmdfunc = w90p910_nand_command_lp;
- chip->dev_ready = w90p910_nand_devready;
- chip->read_byte = w90p910_nand_read_byte;
- chip->write_buf = w90p910_nand_write_buf;
- chip->read_buf = w90p910_nand_read_buf;
- chip->verify_buf = w90p910_verify_buf;
+ clk_enable(nuc900_nand->clk);
+
+ chip->cmdfunc = nuc900_nand_command_lp;
+ chip->dev_ready = nuc900_nand_devready;
+ chip->read_byte = nuc900_nand_read_byte;
+ chip->write_buf = nuc900_nand_write_buf;
+ chip->read_buf = nuc900_nand_read_buf;
+ chip->verify_buf = nuc900_verify_buf;
chip->chip_delay = 50;
chip->options = 0;
chip->ecc.mode = NAND_ECC_SOFT;
@@ -308,75 +308,75 @@ static int __devinit w90p910_nand_probe(struct platform_device *pdev)
goto fail1;
}
- w90p910_nand->reg = ioremap(res->start, resource_size(res));
- if (!w90p910_nand->reg) {
+ nuc900_nand->reg = ioremap(res->start, resource_size(res));
+ if (!nuc900_nand->reg) {
retval = -ENOMEM;
goto fail2;
}
- w90p910_nand_enable(w90p910_nand);
+ nuc900_nand_enable(nuc900_nand);
- if (nand_scan(&(w90p910_nand->mtd), 1)) {
+ if (nand_scan(&(nuc900_nand->mtd), 1)) {
retval = -ENXIO;
goto fail3;
}
- add_mtd_partitions(&(w90p910_nand->mtd), partitions,
+ add_mtd_partitions(&(nuc900_nand->mtd), partitions,
ARRAY_SIZE(partitions));
- platform_set_drvdata(pdev, w90p910_nand);
+ platform_set_drvdata(pdev, nuc900_nand);
return retval;
-fail3: iounmap(w90p910_nand->reg);
+fail3: iounmap(nuc900_nand->reg);
fail2: release_mem_region(res->start, resource_size(res));
-fail1: kfree(w90p910_nand);
+fail1: kfree(nuc900_nand);
return retval;
}
-static int __devexit w90p910_nand_remove(struct platform_device *pdev)
+static int __devexit nuc900_nand_remove(struct platform_device *pdev)
{
- struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev);
+ struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
struct resource *res;
- iounmap(w90p910_nand->reg);
+ iounmap(nuc900_nand->reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- clk_disable(w90p910_nand->clk);
- clk_put(w90p910_nand->clk);
+ clk_disable(nuc900_nand->clk);
+ clk_put(nuc900_nand->clk);
- kfree(w90p910_nand);
+ kfree(nuc900_nand);
platform_set_drvdata(pdev, NULL);
return 0;
}
-static struct platform_driver w90p910_nand_driver = {
- .probe = w90p910_nand_probe,
- .remove = __devexit_p(w90p910_nand_remove),
+static struct platform_driver nuc900_nand_driver = {
+ .probe = nuc900_nand_probe,
+ .remove = __devexit_p(nuc900_nand_remove),
.driver = {
- .name = "w90p910-fmi",
+ .name = "nuc900-fmi",
.owner = THIS_MODULE,
},
};
-static int __init w90p910_nand_init(void)
+static int __init nuc900_nand_init(void)
{
- return platform_driver_register(&w90p910_nand_driver);
+ return platform_driver_register(&nuc900_nand_driver);
}
-static void __exit w90p910_nand_exit(void)
+static void __exit nuc900_nand_exit(void)
{
- platform_driver_unregister(&w90p910_nand_driver);
+ platform_driver_unregister(&nuc900_nand_driver);
}
-module_init(w90p910_nand_init);
-module_exit(w90p910_nand_exit);
+module_init(nuc900_nand_init);
+module_exit(nuc900_nand_exit);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_DESCRIPTION("w90p910 nand driver!");
+MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:w90p910-fmi");
+MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 7545568..ee87325 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -292,11 +292,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
u32 *p = (u32 *)buf;
/* take care of subpage reads */
- for (; len % 4 != 0; ) {
- *buf++ = __raw_readb(info->nand.IO_ADDR_R);
- len--;
+ if (len % 4) {
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len % 4);
+ else
+ omap_read_buf8(mtd, buf, len % 4);
+ p = (u32 *) (buf + len % 4);
+ len -= len % 4;
}
- p = (u32 *) buf;
/* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
@@ -502,7 +505,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
omap_write_buf_pref(mtd, buf, len);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, buf, len, 0x1);
+ omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
}
/**
@@ -1028,7 +1031,8 @@ out_free_info:
static int omap_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct omap_nand_info *info = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
platform_set_drvdata(pdev, NULL);
if (use_dma)
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index d60fc57..da6e753 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -80,6 +80,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct nand_chip *nc;
struct orion_nand_data *board;
+ struct resource *res;
void __iomem *io_base;
int ret = 0;
#ifdef CONFIG_MTD_PARTITIONS
@@ -95,8 +96,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
}
mtd = (struct mtd_info *)(nc + 1);
- io_base = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto no_res;
+ }
+
+ io_base = ioremap(res->start, resource_size(res));
if (!io_base) {
printk(KERN_ERR "orion_nand: ioremap failed\n");
ret = -EIO;
@@ -120,6 +126,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
if (board->width == 16)
nc->options |= NAND_BUSWIDTH_16;
+ if (board->dev_ready)
+ nc->dev_ready = board->dev_ready;
+
platform_set_drvdata(pdev, mtd);
if (nand_scan(mtd, 1)) {
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index a8b9376..090a05c 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -209,7 +209,7 @@ static int __devexit pasemi_nand_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id pasemi_nand_match[] =
+static const struct of_device_id pasemi_nand_match[] =
{
{
.compatible = "pasemi,localbus-nand",
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5d55152..e02fa4f 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,17 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
goto fail_free_irq;
}
+ if (mtd_has_cmdlinepart()) {
+ static const char *probes[] = { "cmdlinepart", NULL };
+ struct mtd_partition *parts;
+ int nr_parts;
+
+ nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
+
+ if (nr_parts)
+ return add_mtd_partitions(mtd, parts, nr_parts);
+ }
+
return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
fail_free_irq:
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
new file mode 100644
index 0000000..78a4232
--- /dev/null
+++ b/drivers/mtd/nand/r852.c
@@ -0,0 +1,1140 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include "sm_common.h"
+#include "r852.h"
+
+
+static int r852_enable_dma = 1;
+module_param(r852_enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* read register */
+static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
+{
+ uint8_t reg = readb(dev->mmio + address);
+ return reg;
+}
+
+/* write register */
+static inline void r852_write_reg(struct r852_device *dev,
+ int address, uint8_t value)
+{
+ writeb(value, dev->mmio + address);
+ mmiowb();
+}
+
+
+/* read dword sized register */
+static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
+{
+ uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
+ return reg;
+}
+
+/* write dword sized register */
+static inline void r852_write_reg_dword(struct r852_device *dev,
+ int address, uint32_t value)
+{
+ writel(cpu_to_le32(value), dev->mmio + address);
+ mmiowb();
+}
+
+/* returns pointer to our private structure */
+static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ return (struct r852_device *)chip->priv;
+}
+
+
+/* check if controller supports dma */
+static void r852_dma_test(struct r852_device *dev)
+{
+ dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
+ (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
+
+ if (!dev->dma_usable)
+ message("Non dma capable device detected, dma disabled");
+
+ if (!r852_enable_dma) {
+ message("disabling dma on user request");
+ dev->dma_usable = 0;
+ }
+}
+
+/*
+ * Enable dma. Enables ether first or second stage of the DMA,
+ * Expects dev->dma_dir and dev->dma_state be set
+ */
+static void r852_dma_enable(struct r852_device *dev)
+{
+ uint8_t dma_reg, dma_irq_reg;
+
+ /* Set up dma settings */
+ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
+ dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
+
+ if (dev->dma_dir)
+ dma_reg |= R852_DMA_READ;
+
+ if (dev->dma_state == DMA_INTERNAL) {
+ dma_reg |= R852_DMA_INTERNAL;
+ /* Precaution to make sure HW doesn't write */
+ /* to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ } else {
+ dma_reg |= R852_DMA_MEMORY;
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_dma_addr));
+ }
+
+ /* Precaution: make sure write reached the device */
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
+
+ /* Set dma irq */
+ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ dma_irq_reg |
+ R852_DMA_IRQ_INTERNAL |
+ R852_DMA_IRQ_ERROR |
+ R852_DMA_IRQ_MEMORY);
+}
+
+/*
+ * Disable dma, called from the interrupt handler, which specifies
+ * success of the operation via 'error' argument
+ */
+static void r852_dma_done(struct r852_device *dev, int error)
+{
+ WARN_ON(dev->dma_stage == 0);
+
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
+ r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
+
+ /* Precaution to make sure HW doesn't write to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ dev->dma_error = error;
+ dev->dma_stage = 0;
+
+ if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
+ pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
+ dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+ complete(&dev->dma_done);
+}
+
+/*
+ * Wait, till dma is done, which includes both phases of it
+ */
+static int r852_dma_wait(struct r852_device *dev)
+{
+ long timeout = wait_for_completion_timeout(&dev->dma_done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ dbg("timeout waiting for DMA interrupt");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Read/Write one page using dma. Only pages can be read (512 bytes)
+*/
+static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
+{
+ int bounce = 0;
+ unsigned long flags;
+ int error;
+
+ dev->dma_error = 0;
+
+ /* Set dma direction */
+ dev->dma_dir = do_read;
+ dev->dma_stage = 1;
+
+ dbg_verbose("doing dma %s ", do_read ? "read" : "write");
+
+ /* Set intial dma state: for reading first fill on board buffer,
+ from device, for writes first fill the buffer from memory*/
+ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
+
+ /* if incoming buffer is not page aligned, we should do bounce */
+ if ((unsigned long)buf & (R852_DMA_LEN-1))
+ bounce = 1;
+
+ if (!bounce) {
+ dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
+ R852_DMA_LEN,
+ (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+
+ if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
+ bounce = 1;
+ }
+
+ if (bounce) {
+ dbg_verbose("dma: using bounce buffer");
+ dev->phys_dma_addr = dev->phys_bounce_buffer;
+ if (!do_read)
+ memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
+ }
+
+ /* Enable DMA */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ r852_dma_enable(dev);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* Wait till complete */
+ error = r852_dma_wait(dev);
+
+ if (error) {
+ r852_dma_done(dev, error);
+ return;
+ }
+
+ if (do_read && bounce)
+ memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
+}
+
+/*
+ * Program data lines of the nand chip to send data to it
+ */
+void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ /* Don't allow any access to hardware if we suspect card removal */
+ if (dev->card_unstable)
+ return;
+
+ /* Special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, (uint8_t *)buf, 0);
+ return;
+ }
+
+ /* write DWORD chinks - faster */
+ while (len) {
+ reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
+ r852_write_reg_dword(dev, R852_DATALINE, reg);
+ buf += 4;
+ len -= 4;
+
+ }
+
+ /* write rest */
+ while (len)
+ r852_write_reg(dev, R852_DATALINE, *buf++);
+}
+
+/*
+ * Read data lines of the nand chip to retrieve data
+ */
+void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ if (dev->card_unstable) {
+ /* since we can't signal error here, at least, return
+ predictable buffer */
+ memset(buf, 0, len);
+ return;
+ }
+
+ /* special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, buf, 1);
+ return;
+ }
+
+ /* read in dword sized chunks */
+ while (len >= 4) {
+
+ reg = r852_read_reg_dword(dev, R852_DATALINE);
+ *buf++ = reg & 0xFF;
+ *buf++ = (reg >> 8) & 0xFF;
+ *buf++ = (reg >> 16) & 0xFF;
+ *buf++ = (reg >> 24) & 0xFF;
+ len -= 4;
+ }
+
+ /* read the reset by bytes */
+ while (len--)
+ *buf++ = r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Read one byte from nand chip
+ */
+static uint8_t r852_read_byte(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* Same problem as in r852_read_buf.... */
+ if (dev->card_unstable)
+ return 0;
+
+ return r852_read_reg(dev, R852_DATALINE);
+}
+
+
+/*
+ * Readback the buffer to verify it
+ */
+int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* We can't be sure about anything here... */
+ if (dev->card_unstable)
+ return -1;
+
+ /* This will never happen, unless you wired up a nand chip
+ with > 512 bytes page size to the reader */
+ if (len > SM_SECTOR_SIZE)
+ return 0;
+
+ r852_read_buf(mtd, dev->tmp_buffer, len);
+ return memcmp(buf, dev->tmp_buffer, len);
+}
+
+/*
+ * Control several chip lines & send commands
+ */
+void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+
+ dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
+ R852_CTL_ON | R852_CTL_CARDENABLE);
+
+ if (ctrl & NAND_ALE)
+ dev->ctlreg |= R852_CTL_DATA;
+
+ if (ctrl & NAND_CLE)
+ dev->ctlreg |= R852_CTL_COMMAND;
+
+ if (ctrl & NAND_NCE)
+ dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
+ else
+ dev->ctlreg &= ~R852_CTL_WRITE;
+
+ /* when write is stareted, enable write access */
+ if (dat == NAND_CMD_ERASE1)
+ dev->ctlreg |= R852_CTL_WRITE;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
+ to set write mode */
+ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
+ dev->ctlreg |= R852_CTL_WRITE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ if (dat != NAND_CMD_NONE)
+ r852_write_reg(dev, R852_DATALINE, dat);
+}
+
+/*
+ * Wait till card is ready.
+ * based on nand_wait, but returns errors on DMA error
+ */
+int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct r852_device *dev = (struct r852_device *)chip->priv;
+
+ unsigned long timeout;
+ int status;
+
+ timeout = jiffies + (chip->state == FL_ERASING ?
+ msecs_to_jiffies(400) : msecs_to_jiffies(20));
+
+ while (time_before(jiffies, timeout))
+ if (chip->dev_ready(mtd))
+ break;
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ status = (int)chip->read_byte(mtd);
+
+ /* Unfortunelly, no way to send detailed error status... */
+ if (dev->dma_error) {
+ status |= NAND_STATUS_FAIL;
+ dev->dma_error = 0;
+ }
+ return status;
+}
+
+/*
+ * Check if card is ready
+ */
+
+int r852_ready(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
+}
+
+
+/*
+ * Set ECC engine mode
+*/
+
+void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ /* enable ecc generation/check*/
+ dev->ctlreg |= R852_CTL_ECC_ENABLE;
+
+ /* flush ecc buffer */
+ r852_write_reg(dev, R852_CTL,
+ dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return;
+
+ case NAND_ECC_READSYN:
+ /* disable ecc generation */
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+}
+
+/*
+ * Calculate ECC, only used for writes
+ */
+
+int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ struct sm_oob *oob = (struct sm_oob *)ecc_code;
+ uint32_t ecc1, ecc2;
+
+ if (dev->card_unstable)
+ return 0;
+
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
+ ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
+
+ oob->ecc1[0] = (ecc1) & 0xFF;
+ oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
+ oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
+
+ oob->ecc2[0] = (ecc2) & 0xFF;
+ oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
+ oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return 0;
+}
+
+/*
+ * Correct the data using ECC, hw did almost everything for us
+ */
+
+int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ uint16_t ecc_reg;
+ uint8_t ecc_status, err_byte;
+ int i, error = 0;
+
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return 0;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+ ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+
+ for (i = 0 ; i <= 1 ; i++) {
+
+ ecc_status = (ecc_reg >> 8) & 0xFF;
+
+ /* ecc uncorrectable error */
+ if (ecc_status & R852_ECC_FAIL) {
+ dbg("ecc: unrecoverable error, in half %d", i);
+ error = -1;
+ goto exit;
+ }
+
+ /* correctable error */
+ if (ecc_status & R852_ECC_CORRECTABLE) {
+
+ err_byte = ecc_reg & 0xFF;
+ dbg("ecc: recoverable error, "
+ "in half %d, byte %d, bit %d", i,
+ err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
+
+ dat[err_byte] ^=
+ 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
+ error++;
+ }
+
+ dat += 256;
+ ecc_reg >>= 16;
+ }
+exit:
+ return error;
+}
+
+/*
+ * This is copy of nand_read_oob_std
+ * nand_read_oob_syndrome assumes we can send column address - we can't
+ */
+static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ if (sndcmd) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ sndcmd = 0;
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return sndcmd;
+}
+
+/*
+ * Start the nand engine
+ */
+
+void r852_engine_enable(struct r852_device *dev)
+{
+ if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ } else {
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ }
+ msleep(300);
+ r852_write_reg(dev, R852_CTL, 0);
+}
+
+
+/*
+ * Stop the nand engine
+ */
+
+void r852_engine_disable(struct r852_device *dev)
+{
+ r852_write_reg_dword(dev, R852_HW, 0);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
+}
+
+/*
+ * Test if card is present
+ */
+
+void r852_card_update_present(struct r852_device *dev)
+{
+ unsigned long flags;
+ uint8_t reg;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ reg = r852_read_reg(dev, R852_CARD_STA);
+ dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Update card detection IRQ state according to current card state
+ * which is read in r852_card_update_present
+ */
+void r852_update_card_detect(struct r852_device *dev)
+{
+ int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ dev->card_unstable = 0;
+
+ card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
+ card_detect_reg |= R852_CARD_IRQ_GENABLE;
+
+ card_detect_reg |= dev->card_detected ?
+ R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
+
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
+}
+
+ssize_t r852_media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
+ struct r852_device *dev = r852_get_dev(mtd);
+ char *data = dev->sm ? "smartmedia" : "xd";
+
+ strcpy(buf, data);
+ return strlen(data);
+}
+
+DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+
+
+/* Detect properties of card in slot */
+void r852_update_media_status(struct r852_device *dev)
+{
+ uint8_t reg;
+ unsigned long flags;
+ int readonly;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (!dev->card_detected) {
+ message("card removed");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ;
+ }
+
+ readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
+ reg = r852_read_reg(dev, R852_DMA_CAP);
+ dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
+
+ message("detected %s %s card in slot",
+ dev->sm ? "SmartMedia" : "xD",
+ readonly ? "readonly" : "writeable");
+
+ dev->readonly = readonly;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Register the nand device
+ * Called when the card is detected
+ */
+int r852_register_nand_device(struct r852_device *dev)
+{
+ dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+
+ if (!dev->mtd)
+ goto error1;
+
+ WARN_ON(dev->card_registred);
+
+ dev->mtd->owner = THIS_MODULE;
+ dev->mtd->priv = dev->chip;
+ dev->mtd->dev.parent = &dev->pci_dev->dev;
+
+ if (dev->readonly)
+ dev->chip->options |= NAND_ROM;
+
+ r852_engine_enable(dev);
+
+ if (sm_register_device(dev->mtd, dev->sm))
+ goto error2;
+
+ if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
+ message("can't create media type sysfs attribute");
+
+ dev->card_registred = 1;
+ return 0;
+error2:
+ kfree(dev->mtd);
+error1:
+ /* Force card redetect */
+ dev->card_detected = 0;
+ return -1;
+}
+
+/*
+ * Unregister the card
+ */
+
+void r852_unregister_nand_device(struct r852_device *dev)
+{
+ if (!dev->card_registred)
+ return;
+
+ device_remove_file(&dev->mtd->dev, &dev_attr_media_type);
+ nand_release(dev->mtd);
+ r852_engine_disable(dev);
+ dev->card_registred = 0;
+ kfree(dev->mtd);
+ dev->mtd = NULL;
+}
+
+/* Card state updater */
+void r852_card_detect_work(struct work_struct *work)
+{
+ struct r852_device *dev =
+ container_of(work, struct r852_device, card_detect_work.work);
+
+ r852_card_update_present(dev);
+ dev->card_unstable = 0;
+
+ /* False alarm */
+ if (dev->card_detected == dev->card_registred)
+ goto exit;
+
+ /* Read media properties */
+ r852_update_media_status(dev);
+
+ /* Register the card */
+ if (dev->card_detected)
+ r852_register_nand_device(dev);
+ else
+ r852_unregister_nand_device(dev);
+exit:
+ /* Update detection logic */
+ r852_update_card_detect(dev);
+}
+
+/* Ack + disable IRQ generation */
+static void r852_disable_irqs(struct r852_device *dev)
+{
+ uint8_t reg;
+ reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
+
+ reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ reg & ~R852_DMA_IRQ_MASK);
+
+ r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
+}
+
+/* Interrupt handler */
+static irqreturn_t r852_irq(int irq, void *data)
+{
+ struct r852_device *dev = (struct r852_device *)data;
+
+ uint8_t card_status, dma_status;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+
+ /* We can recieve shared interrupt while pci is suspended
+ in that case reads will return 0xFFFFFFFF.... */
+ if (dev->insuspend)
+ goto out;
+
+ /* handle card detection interrupts first */
+ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
+ r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
+
+ if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
+
+ ret = IRQ_HANDLED;
+ dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
+
+ /* we shouldn't recieve any interrupts if we wait for card
+ to settle */
+ WARN_ON(dev->card_unstable);
+
+ /* disable irqs while card is unstable */
+ /* this will timeout DMA if active, but better that garbage */
+ r852_disable_irqs(dev);
+
+ if (dev->card_unstable)
+ goto out;
+
+ /* let, card state to settle a bit, and then do the work */
+ dev->card_unstable = 1;
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(100));
+ goto out;
+ }
+
+
+ /* Handle dma interrupts */
+ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
+
+ if (dma_status & R852_DMA_IRQ_MASK) {
+
+ ret = IRQ_HANDLED;
+
+ if (dma_status & R852_DMA_IRQ_ERROR) {
+ dbg("recieved dma error IRQ");
+ r852_dma_done(dev, -EIO);
+ goto out;
+ }
+
+ /* recieved DMA interrupt out of nowhere? */
+ WARN_ON_ONCE(dev->dma_stage == 0);
+
+ if (dev->dma_stage == 0)
+ goto out;
+
+ /* done device access */
+ if (dev->dma_state == DMA_INTERNAL &&
+ (dma_status & R852_DMA_IRQ_INTERNAL)) {
+
+ dev->dma_state = DMA_MEMORY;
+ dev->dma_stage++;
+ }
+
+ /* done memory DMA */
+ if (dev->dma_state == DMA_MEMORY &&
+ (dma_status & R852_DMA_IRQ_MEMORY)) {
+ dev->dma_state = DMA_INTERNAL;
+ dev->dma_stage++;
+ }
+
+ /* Enable 2nd half of dma dance */
+ if (dev->dma_stage == 2)
+ r852_dma_enable(dev);
+
+ /* Operation done */
+ if (dev->dma_stage == 3)
+ r852_dma_done(dev, 0);
+ goto out;
+ }
+
+ /* Handle unknown interrupts */
+ if (dma_status)
+ dbg("bad dma IRQ status = %x", dma_status);
+
+ if (card_status & ~R852_CARD_STA_CD)
+ dbg("strange card status = %x", card_status);
+
+out:
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ret;
+}
+
+int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ int error;
+ struct nand_chip *chip;
+ struct r852_device *dev;
+
+ /* pci initialization */
+ error = pci_enable_device(pci_dev);
+
+ if (error)
+ goto error1;
+
+ pci_set_master(pci_dev);
+
+ error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (error)
+ goto error2;
+
+ error = pci_request_regions(pci_dev, DRV_NAME);
+
+ if (error)
+ goto error3;
+
+ error = -ENOMEM;
+
+ /* init nand chip, but register it only on card insert */
+ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+
+ if (!chip)
+ goto error4;
+
+ /* commands */
+ chip->cmd_ctrl = r852_cmdctl;
+ chip->waitfunc = r852_wait;
+ chip->dev_ready = r852_ready;
+
+ /* I/O */
+ chip->read_byte = r852_read_byte;
+ chip->read_buf = r852_read_buf;
+ chip->write_buf = r852_write_buf;
+ chip->verify_buf = r852_verify_buf;
+
+ /* ecc */
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.size = R852_DMA_LEN;
+ chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.hwctl = r852_ecc_hwctl;
+ chip->ecc.calculate = r852_ecc_calculate;
+ chip->ecc.correct = r852_ecc_correct;
+
+ /* TODO: hack */
+ chip->ecc.read_oob = r852_read_oob;
+
+ /* init our device structure */
+ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
+
+ if (!dev)
+ goto error5;
+
+ chip->priv = dev;
+ dev->chip = chip;
+ dev->pci_dev = pci_dev;
+ pci_set_drvdata(pci_dev, dev);
+
+ dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
+ &dev->phys_bounce_buffer);
+
+ if (!dev->bounce_buffer)
+ goto error6;
+
+
+ error = -ENODEV;
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+
+ if (!dev->mmio)
+ goto error7;
+
+ error = -ENOMEM;
+ dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+
+ if (!dev->tmp_buffer)
+ goto error8;
+
+ init_completion(&dev->dma_done);
+
+ dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+
+ if (!dev->card_workqueue)
+ goto error9;
+
+ INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
+
+ /* shutdown everything - precation */
+ r852_engine_disable(dev);
+ r852_disable_irqs(dev);
+
+ r852_dma_test(dev);
+
+ /*register irq handler*/
+ error = -ENODEV;
+ if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
+ DRV_NAME, dev))
+ goto error10;
+
+ dev->irq = pci_dev->irq;
+ spin_lock_init(&dev->irqlock);
+
+ /* kick initial present test */
+ dev->card_detected = 0;
+ r852_card_update_present(dev);
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 0);
+
+
+ printk(KERN_NOTICE DRV_NAME ": driver loaded succesfully\n");
+ return 0;
+
+error10:
+ destroy_workqueue(dev->card_workqueue);
+error9:
+ kfree(dev->tmp_buffer);
+error8:
+ pci_iounmap(pci_dev, dev->mmio);
+error7:
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+error6:
+ kfree(dev);
+error5:
+ kfree(chip);
+error4:
+ pci_release_regions(pci_dev);
+error3:
+error2:
+ pci_disable_device(pci_dev);
+error1:
+ return error;
+}
+
+void r852_remove(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ /* Stop detect workqueue -
+ we are going to unregister the device anyway*/
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ destroy_workqueue(dev->card_workqueue);
+
+ /* Unregister the device, this might make more IO */
+ r852_unregister_nand_device(dev);
+
+ /* Stop interrupts */
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, dev);
+
+ /* Cleanup */
+ kfree(dev->tmp_buffer);
+ pci_iounmap(pci_dev, dev->mmio);
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+
+ kfree(dev->chip);
+ kfree(dev);
+
+ /* Shutdown the PCI device */
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+void r852_shutdown(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ pci_disable_device(pci_dev);
+}
+
+#ifdef CONFIG_PM
+int r852_suspend(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+ unsigned long flags;
+
+ if (dev->ctlreg & R852_CTL_CARDENABLE)
+ return -EBUSY;
+
+ /* First make sure the detect work is gone */
+ cancel_delayed_work_sync(&dev->card_detect_work);
+
+ /* Turn off the interrupts and stop the device */
+ r852_disable_irqs(dev);
+ r852_engine_disable(dev);
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->insuspend = 1;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* At that point, even if interrupt handler is running, it will quit */
+ /* So wait for this to happen explictly */
+ synchronize_irq(dev->irq);
+
+ /* If card was pulled off just during the suspend, which is very
+ unlikely, we will remove it on resume, it too late now
+ anyway... */
+ dev->card_unstable = 0;
+
+ pci_save_state(to_pci_dev(device));
+ return pci_prepare_to_sleep(to_pci_dev(device));
+}
+
+int r852_resume(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+ unsigned long flags;
+
+ /* Turn on the hardware */
+ pci_back_from_sleep(to_pci_dev(device));
+ pci_restore_state(to_pci_dev(device));
+
+ r852_disable_irqs(dev);
+ r852_card_update_present(dev);
+ r852_engine_disable(dev);
+
+
+ /* Now its safe for IRQ to run */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->insuspend = 0;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+
+ /* If card status changed, just do the work */
+ if (dev->card_detected != dev->card_registred) {
+ dbg("card was %s during low power state",
+ dev->card_detected ? "added" : "removed");
+
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 1000);
+ return 0;
+ }
+
+ /* Otherwise, initialize the card */
+ if (dev->card_registred) {
+ r852_engine_enable(dev);
+ dev->chip->select_chip(dev->mtd, 0);
+ dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1);
+ dev->chip->select_chip(dev->mtd, -1);
+ }
+
+ /* Program card detection IRQ */
+ r852_update_card_detect(dev);
+ return 0;
+}
+#else
+#define r852_suspend NULL
+#define r852_resume NULL
+#endif
+
+static const struct pci_device_id r852_pci_id_tbl[] = {
+
+ { PCI_VDEVICE(RICOH, 0x0852), },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
+
+SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+
+
+static struct pci_driver r852_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = r852_pci_id_tbl,
+ .probe = r852_probe,
+ .remove = r852_remove,
+ .shutdown = r852_shutdown,
+ .driver.pm = &r852_pm_ops,
+};
+
+static __init int r852_module_init(void)
+{
+ return pci_register_driver(&r852_pci_driver);
+}
+
+static void __exit r852_module_exit(void)
+{
+ pci_unregister_driver(&r852_pci_driver);
+}
+
+module_init(r852_module_init);
+module_exit(r852_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
new file mode 100644
index 0000000..8096cc2
--- /dev/null
+++ b/drivers/mtd/nand/r852.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mtd/nand.h>
+#include <linux/spinlock.h>
+
+
+/* nand interface + ecc
+ byte write/read does one cycle on nand data lines.
+ dword write/read does 4 cycles
+ if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
+ results of ecc correction, if DMA read was done before.
+ If write was done two dword reads read generated ecc checksums
+*/
+#define R852_DATALINE 0x00
+
+/* control register */
+#define R852_CTL 0x04
+#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
+#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
+#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
+ /* but has to be set on start...*/
+#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
+#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
+#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
+#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
+#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
+
+/* card detection status */
+#define R852_CARD_STA 0x05
+
+#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
+#define R852_CARD_STA_RO 0x02 /* card is readonly */
+#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
+#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
+#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
+
+/* card detection irq status & enable*/
+#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
+#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
+
+#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
+#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
+#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
+#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
+#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
+#define R852_CARD_IRQ_MASK 0x1D
+
+
+
+/* hardware enable */
+#define R852_HW 0x08
+#define R852_HW_ENABLED 0x01 /* hw enabled */
+#define R852_HW_UNKNOWN 0x80
+
+
+/* dma capabilities */
+#define R852_DMA_CAP 0x09
+#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
+ /* hw is smartmedia */
+#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
+#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
+
+
+/* physical DMA address - 32 bit value*/
+#define R852_DMA_ADDR 0x0C
+
+
+/* dma settings */
+#define R852_DMA_SETTINGS 0x10
+#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
+#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+
+/* dma IRQ status */
+#define R852_DMA_IRQ_STA 0x14
+
+/* dma IRQ enable */
+#define R852_DMA_IRQ_ENABLE 0x18
+
+#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
+#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
+
+
+/* ECC syndrome format - read from reg #0 will return two copies of these for
+ each half of the page.
+ first byte is error byte location, and second, bit location + flags */
+#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
+#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
+#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
+#define R852_ECC_FAIL 0x40 /* non correctable error detected */
+
+#define R852_DMA_LEN 512
+
+#define DMA_INTERNAL 0
+#define DMA_MEMORY 1
+
+struct r852_device {
+ void __iomem *mmio; /* mmio */
+ struct mtd_info *mtd; /* mtd backpointer */
+ struct nand_chip *chip; /* nand chip backpointer */
+ struct pci_dev *pci_dev; /* pci backpointer */
+
+ /* dma area */
+ dma_addr_t phys_dma_addr; /* bus address of buffer*/
+ struct completion dma_done; /* data transfer done */
+
+ dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
+ uint8_t *bounce_buffer; /* virtual address of bounce buffer */
+
+ int dma_dir; /* 1 = read, 0 = write */
+ int dma_stage; /* 0 - idle, 1 - first step,
+ 2 - second step */
+
+ int dma_state; /* 0 = internal, 1 = memory */
+ int dma_error; /* dma errors */
+ int dma_usable; /* is it possible to use dma */
+
+ /* card status area */
+ struct delayed_work card_detect_work;
+ struct workqueue_struct *card_workqueue;
+ int card_registred; /* card registered with mtd */
+ int card_detected; /* card detected in slot */
+ int card_unstable; /* whenever the card is inserted,
+ is not known yet */
+ int readonly; /* card is readonly */
+ int sm; /* Is card smartmedia */
+
+ /* interrupt handling */
+ spinlock_t irqlock; /* IRQ protecting lock */
+ int irq; /* irq num */
+ int insuspend; /* device is suspended */
+
+ /* misc */
+ void *tmp_buffer; /* temporary buffer */
+ uint8_t ctlreg; /* cached contents of control reg */
+};
+
+#define DRV_NAME "r852"
+
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+
+#define message(format, ...) \
+ printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index fa6e9c7..239aadf 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -929,14 +929,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto exit_error;
}
- memset(info, 0, sizeof(*info));
platform_set_drvdata(pdev, info);
spin_lock_init(&info->controller.lock);
@@ -957,7 +956,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* currently we assume we have the one resource */
res = pdev->resource;
- size = res->end - res->start + 1;
+ size = resource_size(res);
info->area = request_mem_region(res->start, size, pdev->name);
@@ -994,15 +993,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* allocate our information */
size = nr_sets * sizeof(*info->mtds);
- info->mtds = kmalloc(size, GFP_KERNEL);
+ info->mtds = kzalloc(size, GFP_KERNEL);
if (info->mtds == NULL) {
dev_err(&pdev->dev, "failed to allocate mtd storage\n");
err = -ENOMEM;
goto exit_error;
}
- memset(info->mtds, 0, size);
-
/* initialise all possible chips */
nmtd = info->mtds;
@@ -1013,7 +1010,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
s3c2410_nand_init_chip(info, nmtd, sets);
nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
- (sets) ? sets->nr_chips : 1);
+ (sets) ? sets->nr_chips : 1,
+ NULL);
if (nmtd->scan_res == 0) {
s3c2410_nand_update_chip(info, nmtd);
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 34752fc..546c2f0 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -855,7 +855,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
nand->read_word = flctl_read_word;
}
- ret = nand_scan_ident(flctl_mtd, 1);
+ ret = nand_scan_ident(flctl_mtd, 1, NULL);
if (ret)
goto err;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
new file mode 100644
index 0000000..ac80fb362
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/nand.h>
+#include "sm_common.h"
+
+static struct nand_ecclayout nand_oob_sm = {
+ .eccbytes = 6,
+ .eccpos = {8, 9, 10, 13, 14, 15},
+ .oobfree = {
+ {.offset = 0 , .length = 4}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ {.offset = 11, .length = 2} /* LBA2 */
+ }
+};
+
+/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* because the 256 byte devices have page depenent oob layout */
+/* However it does preserve the bad block markers */
+/* If you use smftl, it will bypass this and work correctly */
+/* If you not, then you break SmartMedia compliance anyway */
+
+static struct nand_ecclayout nand_oob_sm_small = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ {.offset = 3 , .length = 2}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ }
+};
+
+
+static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_oob_ops ops;
+ struct sm_oob oob;
+ int ret, error = 0;
+
+ memset(&oob, -1, SM_OOB_SIZE);
+ oob.block_status = 0x0F;
+
+ /* As long as this function is called on erase block boundaries
+ it will work correctly for 256 byte nand */
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = (void *)&oob;
+ ops.datbuf = NULL;
+
+
+ ret = mtd->write_oob(mtd, ofs, &ops);
+ if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
+ printk(KERN_NOTICE
+ "sm_common: can't mark sector at %i as bad\n",
+ (int)ofs);
+ error = -EIO;
+ } else
+ mtd->ecc_stats.badblocks++;
+
+ return error;
+}
+
+
+static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
+ {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
+ {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
+ {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
+ {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
+ {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
+ {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
+ {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
+ {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
+ {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
+ {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
+ {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
+ {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
+ {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
+ {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
+ {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
+ {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
+ {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
+ {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 },
+ {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
+ {NULL,}
+};
+
+#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
+static struct nand_flash_dev nand_xd_flash_ids[] = {
+
+ {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
+ {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
+ {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
+ {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
+ {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
+ {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
+ {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
+ {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
+ {NULL,}
+};
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ int ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* Scan for card properties */
+ ret = nand_scan_ident(mtd, 1, smartmedia ?
+ nand_smartmedia_flash_ids : nand_xd_flash_ids);
+
+ if (ret)
+ return ret;
+
+ /* Bad block marker postion */
+ chip->badblockpos = 0x05;
+ chip->badblockbits = 7;
+ chip->block_markbad = sm_block_markbad;
+
+ /* ECC layout */
+ if (mtd->writesize == SM_SECTOR_SIZE)
+ chip->ecc.layout = &nand_oob_sm;
+ else if (mtd->writesize == SM_SMALL_PAGE)
+ chip->ecc.layout = &nand_oob_sm_small;
+ else
+ return -ENODEV;
+
+ ret = nand_scan_tail(mtd);
+
+ if (ret)
+ return ret;
+
+ return add_mtd_device(mtd);
+}
+EXPORT_SYMBOL_GPL(sm_register_device);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
new file mode 100644
index 0000000..00f4a83
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for SmartMedia/xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+
+/* Full oob structure as written on the flash */
+struct sm_oob {
+ uint32_t reserved;
+ uint8_t data_status;
+ uint8_t block_status;
+ uint8_t lba_copy1[2];
+ uint8_t ecc2[3];
+ uint8_t lba_copy2[2];
+ uint8_t ecc1[3];
+} __attribute__((packed));
+
+
+/* one sector is always 512 bytes, but it can consist of two nand pages */
+#define SM_SECTOR_SIZE 512
+
+/* oob area is also 16 bytes, but might be from two pages */
+#define SM_OOB_SIZE 16
+
+/* This is maximum zone size, and all devices that have more that one zone
+ have this size */
+#define SM_MAX_ZONE_SIZE 1024
+
+/* support for small page nand */
+#define SM_SMALL_PAGE 256
+#define SM_SMALL_OOB_SIZE 8
+
+
+extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+
+
+static inline int sm_sector_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->data_status) >= 5;
+}
+
+static inline int sm_block_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->block_status) >= 7;
+}
+
+static inline int sm_block_erased(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, sizeof(*oob)))
+ return 1;
+ return 0;
+}
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a4519a7..b37cbde 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -220,7 +220,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, host);
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto out;
}
@@ -290,7 +290,7 @@ static int __devexit socrates_nand_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id socrates_nand_match[] =
+static const struct of_device_id socrates_nand_match[] =
{
{
.compatible = "abb,socrates-nand",
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index fa28f01..3041d1f 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
int ret;
if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
@@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
struct tmio_nand_data *data = cell->driver_data;
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
@@ -405,14 +405,14 @@ static int tmio_probe(struct platform_device *dev)
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
- tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1);
+ tmio->ccr = ioremap(ccr->start, resource_size(ccr));
if (!tmio->ccr) {
retval = -EIO;
goto err_iomap_ccr;
}
tmio->fcr_base = fcr->start & 0xfffff;
- tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1);
+ tmio->fcr = ioremap(fcr->start, resource_size(fcr));
if (!tmio->fcr) {
retval = -EIO;
goto err_iomap_fcr;
@@ -516,7 +516,7 @@ static int tmio_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
if (cell->suspend)
cell->suspend(dev);
@@ -527,7 +527,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
deleted file mode 100644
index 0f5562a..0000000
--- a/drivers/mtd/nand/ts7250.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * drivers/mtd/nand/ts7250.c
- *
- * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- * Copyright (C) 2004 Marius Gröger (mag@sysgo.de)
- *
- * Derived from drivers/mtd/nand/autcpu12.c
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * TS-7250 board which utilizes a Samsung 32 Mbyte part.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/ts72xx.h>
-
-#include <asm/sizes.h>
-#include <asm/mach-types.h>
-
-/*
- * MTD structure for TS7250 board
- */
-static struct mtd_info *ts7250_mtd = NULL;
-
-#ifdef CONFIG_MTD_PARTITIONS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
-#define NUM_PARTITIONS 3
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info32[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0x00000000,
- .size = 0x00004000,
- }, {
- .name = "Linux",
- .offset = 0x00004000,
- .size = 0x01d00000,
- }, {
- .name = "RedBoot",
- .offset = 0x01d04000,
- .size = 0x002fc000,
- },
-};
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info128[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0x00000000,
- .size = 0x00004000,
- }, {
- .name = "Linux",
- .offset = 0x00004000,
- .size = 0x07d00000,
- }, {
- .name = "RedBoot",
- .offset = 0x07d04000,
- .size = 0x002fc000,
- },
-};
-#endif
-
-
-/*
- * hardware specific access to control-lines
- *
- * ctrl:
- * NAND_NCE: bit 0 -> bit 2
- * NAND_CLE: bit 1 -> bit 1
- * NAND_ALE: bit 2 -> bit 0
- */
-static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
- unsigned char bits;
-
- bits = (ctrl & NAND_NCE) << 2;
- bits |= ctrl & NAND_CLE;
- bits |= (ctrl & NAND_ALE) >> 2;
-
- __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * read device ready pin
- */
-static int ts7250_device_ready(struct mtd_info *mtd)
-{
- return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20;
-}
-
-/*
- * Main initialization routine
- */
-static int __init ts7250_init(void)
-{
- struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
-
- if (!machine_is_ts72xx() || board_is_ts7200())
- return -ENXIO;
-
- /* Allocate memory for MTD device structure and private data */
- ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ts7250_mtd) {
- printk("Unable to allocate TS7250 NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ts7250_mtd[1]);
-
- /* Initialize structures */
- memset(ts7250_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ts7250_mtd->priv = this;
- ts7250_mtd->owner = THIS_MODULE;
-
- /* insert callbacks */
- this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE;
- this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE;
- this->cmd_ctrl = ts7250_hwcontrol;
- this->dev_ready = ts7250_device_ready;
- this->chip_delay = 15;
- this->ecc.mode = NAND_ECC_SOFT;
-
- printk("Searching for NAND flash...\n");
- /* Scan to find existence of the device */
- if (nand_scan(ts7250_mtd, 1)) {
- kfree(ts7250_mtd);
- return -ENXIO;
- }
-#ifdef CONFIG_MTD_PARTITIONS
- ts7250_mtd->name = "ts7250-nand";
- mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info32;
- if (ts7250_mtd->size >= (128 * 0x100000))
- mtd_parts = partition_info128;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb);
-
- /* Return happy */
- return 0;
-}
-
-module_init(ts7250_init);
-
-/*
- * Clean up routine
- */
-static void __exit ts7250_cleanup(void)
-{
- /* Unregister the device */
- del_mtd_device(ts7250_mtd);
-
- /* Free the MTD device structure */
- kfree(ts7250_mtd);
-}
-
-module_exit(ts7250_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>");
-MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 863513c..054a41c 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -274,7 +274,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
struct nand_chip *chip = mtd->priv;
int ret;
- ret = nand_scan_ident(mtd, 1);
+ ret = nand_scan_ident(mtd, 1, NULL);
if (!ret) {
if (mtd->writesize >= 512) {
chip->ecc.size = mtd->writesize;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 1002e18..a4578bf 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -126,7 +126,6 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
del_mtd_blktrans_dev(dev);
kfree(nftl->ReplUnitTable);
kfree(nftl->EUNtable);
- kfree(nftl);
}
/*
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 3a9f157..9a49d68 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -30,6 +30,13 @@ config MTD_ONENAND_OMAP2
Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
via the GPMC memory controller.
+config MTD_ONENAND_SAMSUNG
+ tristate "OneNAND on Samsung SOC controller support"
+ depends on MTD_ONENAND && (ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210)
+ help
+ Support for a OneNAND flash device connected to an Samsung SOC
+ S3C64XX/S5PC1XX controller.
+
config MTD_ONENAND_OTP
bool "OneNAND OTP Support"
select HAVE_MTD_OTP
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
index 64b6cc6..2b7884c 100644
--- a/drivers/mtd/onenand/Makefile
+++ b/drivers/mtd/onenand/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
# Board specific.
obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
# Simulator
obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index fd40634..9f322f1 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -309,7 +309,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
/* panic_write() may be in an interrupt context */
- if (in_interrupt())
+ if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
@@ -386,7 +386,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
/* panic_write() may be in an interrupt context */
- if (in_interrupt())
+ if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
@@ -403,7 +403,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
@@ -426,7 +426,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
if (*done)
break;
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+ dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
if (!*done) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
@@ -521,7 +521,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
@@ -539,7 +539,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+ dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
return 0;
}
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 32f0ed3..26caf25 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -397,7 +397,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) ||
+ ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@@ -426,7 +427,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
- if (ONENAND_IS_MLC(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
dataram = ONENAND_SET_BUFFERRAM0(this);
else
@@ -466,11 +467,11 @@ static inline int onenand_read_ecc(struct onenand_chip *this)
{
int ecc, i, result = 0;
- if (!FLEXONENAND(this))
+ if (!FLEXONENAND(this) && !ONENAND_IS_4KB_PAGE(this))
return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
for (i = 0; i < 4; i++) {
- ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i);
+ ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i*2);
if (likely(!ecc))
continue;
if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
@@ -1425,7 +1426,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
onenand_get_device(mtd, FL_READING);
- ret = ONENAND_IS_MLC(this) ?
+ ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
@@ -1460,7 +1461,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
- ret = ONENAND_IS_MLC(this) ?
+ ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, ops) :
onenand_read_ops_nolock(mtd, from, ops);
else
@@ -1634,7 +1635,6 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
{
struct onenand_chip *this = mtd->priv;
- void __iomem *dataram;
int ret = 0;
int thislen, column;
@@ -1654,10 +1654,9 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
onenand_update_bufferram(mtd, addr, 1);
- dataram = this->base + ONENAND_DATARAM;
- dataram += onenand_bufferram_offset(mtd, ONENAND_DATARAM);
+ this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize);
- if (memcmp(buf, dataram + column, thislen))
+ if (memcmp(buf, this->verify_buf, thislen))
return -EBADMSG;
len -= thislen;
@@ -1926,7 +1925,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
* 2 PLANE, MLC, and Flex-OneNAND do not support
* write-while-program feature.
*/
- if (!ONENAND_IS_2PLANE(this) && !first) {
+ if (!ONENAND_IS_2PLANE(this) && !ONENAND_IS_4KB_PAGE(this) && !first) {
ONENAND_SET_PREV_BUFFERRAM(this);
ret = this->wait(mtd, FL_WRITING);
@@ -1957,7 +1956,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/*
* 2 PLANE, MLC, and Flex-OneNAND wait here
*/
- if (ONENAND_IS_2PLANE(this)) {
+ if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this)) {
ret = this->wait(mtd, FL_WRITING);
/* In partial page write we don't update bufferram */
@@ -2084,7 +2083,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
- if (ONENAND_IS_MLC(this)) {
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) {
/* Set main area of DataRAM to 0xff*/
memset(this->page_buf, 0xff, mtd->writesize);
this->write_bufferram(mtd, ONENAND_DATARAM,
@@ -3027,7 +3026,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = ONENAND_IS_MLC(this) ?
+ ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
@@ -3372,7 +3371,10 @@ static void onenand_check_features(struct mtd_info *mtd)
/* Lock scheme */
switch (density) {
case ONENAND_DEVICE_DENSITY_4Gb:
- this->options |= ONENAND_HAS_2PLANE;
+ if (ONENAND_IS_DDP(this))
+ this->options |= ONENAND_HAS_2PLANE;
+ else
+ this->options |= ONENAND_HAS_4KB_PAGE;
case ONENAND_DEVICE_DENSITY_2Gb:
/* 2Gb DDP does not have 2 plane */
@@ -3393,7 +3395,7 @@ static void onenand_check_features(struct mtd_info *mtd)
break;
}
- if (ONENAND_IS_MLC(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
this->options &= ~ONENAND_HAS_2PLANE;
if (FLEXONENAND(this)) {
@@ -3407,6 +3409,8 @@ static void onenand_check_features(struct mtd_info *mtd)
printk(KERN_DEBUG "Chip support all block unlock\n");
if (this->options & ONENAND_HAS_2PLANE)
printk(KERN_DEBUG "Chip has 2 plane\n");
+ if (this->options & ONENAND_HAS_4KB_PAGE)
+ printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
}
/**
@@ -3759,6 +3763,12 @@ static int onenand_probe(struct mtd_info *mtd)
/* Restore system configuration 1 */
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ /* Workaround */
+ if (syscfg & ONENAND_SYS_CFG1_SYNC_WRITE) {
+ bram_maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ bram_dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+ }
+
/* Check manufacturer ID */
if (onenand_check_maf(bram_maf_id))
return -ENXIO;
@@ -3778,6 +3788,9 @@ static int onenand_probe(struct mtd_info *mtd)
this->device_id = dev_id;
this->version_id = ver_id;
+ /* Check OneNAND features */
+ onenand_check_features(mtd);
+
density = onenand_get_density(dev_id);
if (FLEXONENAND(this)) {
this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
@@ -3799,7 +3812,7 @@ static int onenand_probe(struct mtd_info *mtd)
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
/* We use the full BufferRAM */
- if (ONENAND_IS_MLC(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
mtd->writesize <<= 1;
mtd->oobsize = mtd->writesize >> 5;
@@ -3829,9 +3842,6 @@ static int onenand_probe(struct mtd_info *mtd)
else
mtd->size = this->chipsize;
- /* Check OneNAND features */
- onenand_check_features(mtd);
-
/*
* We emulate the 4KiB page and 256KiB erase block size
* But oobsize is still 64 bytes.
@@ -3926,6 +3936,13 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
__func__);
return -ENOMEM;
}
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!this->verify_buf) {
+ kfree(this->page_buf);
+ return -ENOMEM;
+ }
+#endif
this->options |= ONENAND_PAGEBUF_ALLOC;
}
if (!this->oob_buf) {
@@ -4053,8 +4070,12 @@ void onenand_release(struct mtd_info *mtd)
kfree(this->bbm);
}
/* Buffers allocated by onenand_scan */
- if (this->options & ONENAND_PAGEBUF_ALLOC)
+ if (this->options & ONENAND_PAGEBUF_ALLOC) {
kfree(this->page_buf);
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ kfree(this->verify_buf);
+#endif
+ }
if (this->options & ONENAND_OOBBUF_ALLOC)
kfree(this->oob_buf);
kfree(mtd->eraseregions);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
new file mode 100644
index 0000000..2750317
--- /dev/null
+++ b/drivers/mtd/onenand/samsung.c
@@ -0,0 +1,1071 @@
+/*
+ * Samsung S3C64XX/S5PC1XX OneNAND driver
+ *
+ * Copyright © 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Implementation:
+ * S3C64XX and S5PC100: emulate the pseudo BufferRAM
+ * S5PC110: use DMA
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/mach/flash.h>
+#include <plat/regs-onenand.h>
+
+#include <linux/io.h>
+
+enum soc_type {
+ TYPE_S3C6400,
+ TYPE_S3C6410,
+ TYPE_S5PC100,
+ TYPE_S5PC110,
+};
+
+#define ONENAND_ERASE_STATUS 0x00
+#define ONENAND_MULTI_ERASE_SET 0x01
+#define ONENAND_ERASE_START 0x03
+#define ONENAND_UNLOCK_START 0x08
+#define ONENAND_UNLOCK_END 0x09
+#define ONENAND_LOCK_START 0x0A
+#define ONENAND_LOCK_END 0x0B
+#define ONENAND_LOCK_TIGHT_START 0x0C
+#define ONENAND_LOCK_TIGHT_END 0x0D
+#define ONENAND_UNLOCK_ALL 0x0E
+#define ONENAND_OTP_ACCESS 0x12
+#define ONENAND_SPARE_ACCESS_ONLY 0x13
+#define ONENAND_MAIN_ACCESS_ONLY 0x14
+#define ONENAND_ERASE_VERIFY 0x15
+#define ONENAND_MAIN_SPARE_ACCESS 0x16
+#define ONENAND_PIPELINE_READ 0x4000
+
+#define MAP_00 (0x0)
+#define MAP_01 (0x1)
+#define MAP_10 (0x2)
+#define MAP_11 (0x3)
+
+#define S3C64XX_CMD_MAP_SHIFT 24
+#define S5PC1XX_CMD_MAP_SHIFT 26
+
+#define S3C6400_FBA_SHIFT 10
+#define S3C6400_FPA_SHIFT 4
+#define S3C6400_FSA_SHIFT 2
+
+#define S3C6410_FBA_SHIFT 12
+#define S3C6410_FPA_SHIFT 6
+#define S3C6410_FSA_SHIFT 4
+
+#define S5PC100_FBA_SHIFT 13
+#define S5PC100_FPA_SHIFT 7
+#define S5PC100_FSA_SHIFT 5
+
+/* S5PC110 specific definitions */
+#define S5PC110_DMA_SRC_ADDR 0x400
+#define S5PC110_DMA_SRC_CFG 0x404
+#define S5PC110_DMA_DST_ADDR 0x408
+#define S5PC110_DMA_DST_CFG 0x40C
+#define S5PC110_DMA_TRANS_SIZE 0x414
+#define S5PC110_DMA_TRANS_CMD 0x418
+#define S5PC110_DMA_TRANS_STATUS 0x41C
+#define S5PC110_DMA_TRANS_DIR 0x420
+
+#define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
+#define S5PC110_DMA_CFG_4BURST (0x2 << 16)
+#define S5PC110_DMA_CFG_8BURST (0x3 << 16)
+#define S5PC110_DMA_CFG_16BURST (0x4 << 16)
+
+#define S5PC110_DMA_CFG_INC (0x0 << 8)
+#define S5PC110_DMA_CFG_CNT (0x1 << 8)
+
+#define S5PC110_DMA_CFG_8BIT (0x0 << 0)
+#define S5PC110_DMA_CFG_16BIT (0x1 << 0)
+#define S5PC110_DMA_CFG_32BIT (0x2 << 0)
+
+#define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_16BIT)
+#define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_16BIT)
+
+#define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18)
+#define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16)
+#define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0)
+
+#define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18)
+#define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17)
+#define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16)
+
+#define S5PC110_DMA_DIR_READ 0x0
+#define S5PC110_DMA_DIR_WRITE 0x1
+
+struct s3c_onenand {
+ struct mtd_info *mtd;
+ struct platform_device *pdev;
+ enum soc_type type;
+ void __iomem *base;
+ struct resource *base_res;
+ void __iomem *ahb_addr;
+ struct resource *ahb_res;
+ int bootram_command;
+ void __iomem *page_buf;
+ void __iomem *oob_buf;
+ unsigned int (*mem_addr)(int fba, int fpa, int fsa);
+ unsigned int (*cmd_map)(unsigned int type, unsigned int val);
+ void __iomem *dma_addr;
+ struct resource *dma_res;
+ unsigned long phys_base;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
+};
+
+#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
+#define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr)))
+#define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr)))
+#define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2)))
+
+static struct s3c_onenand *onenand;
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL, };
+#endif
+
+static inline int s3c_read_reg(int offset)
+{
+ return readl(onenand->base + offset);
+}
+
+static inline void s3c_write_reg(int value, int offset)
+{
+ writel(value, onenand->base + offset);
+}
+
+static inline int s3c_read_cmd(unsigned int cmd)
+{
+ return readl(onenand->ahb_addr + cmd);
+}
+
+static inline void s3c_write_cmd(int value, unsigned int cmd)
+{
+ writel(value, onenand->ahb_addr + cmd);
+}
+
+#ifdef SAMSUNG_DEBUG
+static void s3c_dump_reg(void)
+{
+ int i;
+
+ for (i = 0; i < 0x400; i += 0x40) {
+ printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (unsigned int) onenand->base + i,
+ s3c_read_reg(i), s3c_read_reg(i + 0x10),
+ s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
+ }
+}
+#endif
+
+static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
+{
+ return (type << S3C64XX_CMD_MAP_SHIFT) | val;
+}
+
+static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val)
+{
+ return (type << S5PC1XX_CMD_MAP_SHIFT) | val;
+}
+
+static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
+ (fsa << S3C6400_FSA_SHIFT);
+}
+
+static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
+ (fsa << S3C6410_FSA_SHIFT);
+}
+
+static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) |
+ (fsa << S5PC100_FSA_SHIFT);
+}
+
+static void s3c_onenand_reset(void)
+{
+ unsigned long timeout = 0x10000;
+ int stat;
+
+ s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+ while (1 && timeout--) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & RST_CMP)
+ break;
+ }
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ /* Clear interrupt */
+ s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
+ /* Clear the ECC status */
+ s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
+}
+
+static unsigned short s3c_onenand_readw(void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ int reg = addr - this->base;
+ int word_addr = reg >> 1;
+ int value;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_MANUFACTURER_ID:
+ return s3c_read_reg(MANUFACT_ID_OFFSET);
+ case ONENAND_REG_DEVICE_ID:
+ return s3c_read_reg(DEVICE_ID_OFFSET);
+ case ONENAND_REG_VERSION_ID:
+ return s3c_read_reg(FLASH_VER_ID_OFFSET);
+ case ONENAND_REG_DATA_BUFFER_SIZE:
+ return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
+ case ONENAND_REG_TECHNOLOGY:
+ return s3c_read_reg(TECH_OFFSET);
+ case ONENAND_REG_SYS_CFG1:
+ return s3c_read_reg(MEM_CFG_OFFSET);
+
+ /* Used at unlock all status */
+ case ONENAND_REG_CTRL_STATUS:
+ return 0;
+
+ case ONENAND_REG_WP_STATUS:
+ return ONENAND_WP_US;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
+ if (word_addr == 0)
+ return s3c_read_reg(MANUFACT_ID_OFFSET);
+ if (word_addr == 1)
+ return s3c_read_reg(DEVICE_ID_OFFSET);
+ if (word_addr == 2)
+ return s3c_read_reg(FLASH_VER_ID_OFFSET);
+ }
+
+ value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
+ dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+ word_addr, value);
+ return value;
+}
+
+static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int reg = addr - this->base;
+ unsigned int word_addr = reg >> 1;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_SYS_CFG1:
+ s3c_write_reg(value, MEM_CFG_OFFSET);
+ return;
+
+ case ONENAND_REG_START_ADDRESS1:
+ case ONENAND_REG_START_ADDRESS2:
+ return;
+
+ /* Lock/lock-tight/unlock/unlock_all */
+ case ONENAND_REG_START_BLOCK_ADDRESS:
+ return;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if ((unsigned int)addr < ONENAND_DATARAM) {
+ if (value == ONENAND_CMD_READID) {
+ onenand->bootram_command = 1;
+ return;
+ }
+ if (value == ONENAND_CMD_RESET) {
+ s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+ onenand->bootram_command = 0;
+ return;
+ }
+ }
+
+ dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+ word_addr, value);
+
+ s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
+}
+
+static int s3c_onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int flags = INT_ACT;
+ unsigned int stat, ecc;
+ unsigned long timeout;
+
+ switch (state) {
+ case FL_READING:
+ flags |= BLK_RW_CMP | LOAD_CMP;
+ break;
+ case FL_WRITING:
+ flags |= BLK_RW_CMP | PGM_CMP;
+ break;
+ case FL_ERASING:
+ flags |= BLK_RW_CMP | ERS_CMP;
+ break;
+ case FL_LOCKING:
+ flags |= BLK_RW_CMP;
+ break;
+ default:
+ break;
+ }
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & flags)
+ break;
+
+ if (state != FL_READING)
+ cond_resched();
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ /*
+ * In the Spec. it checks the controller status first
+ * However if you get the correct information in case of
+ * power off recovery (POR) test, it should read ECC status first
+ */
+ if (stat & LOAD_CMP) {
+ ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
+ ecc);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ }
+ }
+
+ if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
+ dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
+ stat);
+ if (stat & LOCKED_BLK)
+ dev_info(dev, "%s: it's locked error = 0x%04x\n",
+ __func__, stat);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int *m, *s;
+ int fba, fpa, fsa = 0;
+ unsigned int mem_addr, cmd_map_01, cmd_map_10;
+ int i, mcount, scount;
+ int index;
+
+ fba = (int) (addr >> this->erase_shift);
+ fpa = (int) (addr >> this->page_shift);
+ fpa &= this->page_mask;
+
+ mem_addr = onenand->mem_addr(fba, fpa, fsa);
+ cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
+ cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ case ONENAND_CMD_BUFFERRAM:
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ default:
+ break;
+ }
+
+ index = ONENAND_CURRENT_BUFFERRAM(this);
+
+ /*
+ * Emulate Two BufferRAMs and access with 4 bytes pointer
+ */
+ m = (unsigned int *) onenand->page_buf;
+ s = (unsigned int *) onenand->oob_buf;
+
+ if (index) {
+ m += (this->writesize >> 2);
+ s += (mtd->oobsize >> 2);
+ }
+
+ mcount = mtd->writesize >> 2;
+ scount = mtd->oobsize >> 2;
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(cmd_map_01);
+ return 0;
+
+ case ONENAND_CMD_READOOB:
+ s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(cmd_map_01);
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ *s++ = s3c_read_cmd(cmd_map_01);
+
+ s3c_write_reg(0, TRANS_SPARE_OFFSET);
+ return 0;
+
+ case ONENAND_CMD_PROG:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(*m++, cmd_map_01);
+ return 0;
+
+ case ONENAND_CMD_PROGOOB:
+ s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+
+ /* Main - dummy write */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(0xffffffff, cmd_map_01);
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ s3c_write_cmd(*s++, cmd_map_01);
+
+ s3c_write_reg(0, TRANS_SPARE_OFFSET);
+ return 0;
+
+ case ONENAND_CMD_UNLOCK_ALL:
+ s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
+ return 0;
+
+ case ONENAND_CMD_ERASE:
+ s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
+ return 0;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+ int index = ONENAND_CURRENT_BUFFERRAM(this);
+ unsigned char *p;
+
+ if (area == ONENAND_DATARAM) {
+ p = (unsigned char *) onenand->page_buf;
+ if (index == 1)
+ p += this->writesize;
+ } else {
+ p = (unsigned char *) onenand->oob_buf;
+ if (index == 1)
+ p += mtd->oobsize;
+ }
+
+ return p;
+}
+
+static int onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(buffer, p + offset, count);
+ return 0;
+}
+
+static int onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(p + offset, buffer, count);
+ return 0;
+}
+
+static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status;
+
+ writel(src, base + S5PC110_DMA_SRC_ADDR);
+ writel(dst, base + S5PC110_DMA_DST_ADDR);
+
+ if (direction == S5PC110_DMA_DIR_READ) {
+ writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
+ } else {
+ writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
+ }
+
+ writel(count, base + S5PC110_DMA_TRANS_SIZE);
+ writel(direction, base + S5PC110_DMA_TRANS_DIR);
+
+ writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+
+ do {
+ status = readl(base + S5PC110_DMA_TRANS_STATUS);
+ } while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
+
+ if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+ writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
+ writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
+ return -EIO;
+ }
+
+ writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
+
+ return 0;
+}
+
+static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+ void __iomem *p;
+ void *buf = (void *) buffer;
+ dma_addr_t dma_src, dma_dst;
+ int err;
+
+ p = bufferram = this->base + area;
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ if (area == ONENAND_DATARAM)
+ p += this->writesize;
+ else
+ p += mtd->oobsize;
+ }
+
+ if (offset & 3 || (size_t) buf & 3 ||
+ !onenand->dma_addr || count != mtd->writesize)
+ goto normal;
+
+ /* Handle vmalloc address */
+ if (buf >= high_memory) {
+ struct page *page;
+
+ if (((size_t) buf & PAGE_MASK) !=
+ ((size_t) (buf + count - 1) & PAGE_MASK))
+ goto normal;
+ page = vmalloc_to_page(buf);
+ if (!page)
+ goto normal;
+ buf = page_address(page) + ((size_t) buf & ~PAGE_MASK);
+ }
+
+ /* DMA routine */
+ dma_src = onenand->phys_base + (p - this->base);
+ dma_dst = dma_map_single(&onenand->pdev->dev,
+ buf, count, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&onenand->pdev->dev, dma_dst)) {
+ dev_err(&onenand->pdev->dev,
+ "Couldn't map a %d byte buffer for DMA\n", count);
+ goto normal;
+ }
+ err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
+ count, S5PC110_DMA_DIR_READ);
+ dma_unmap_single(&onenand->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+
+ if (!err)
+ return 0;
+
+normal:
+ if (count != mtd->writesize) {
+ /* Copy the bufferram to memory to prevent unaligned access */
+ memcpy(this->page_buf, bufferram, mtd->writesize);
+ p = this->page_buf + offset;
+ }
+
+ memcpy(buffer, p, count);
+
+ return 0;
+}
+
+static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ unsigned int flags = INT_ACT | LOAD_CMP;
+ unsigned int stat;
+ unsigned long timeout;
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & flags)
+ break;
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ if (stat & LD_FAIL_ECC_ERR) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ if (stat & LOAD_CMP) {
+ int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int block, end;
+ int tmp;
+
+ end = this->chipsize >> this->erase_shift;
+
+ for (block = 0; block < end; block++) {
+ unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
+ tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
+
+ if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
+ dev_err(dev, "block %d is write-protected!\n", block);
+ s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
+ }
+ }
+}
+
+static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
+ size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, start_mem_addr, end_mem_addr;
+
+ start = ofs >> this->erase_shift;
+ start_mem_addr = onenand->mem_addr(start, 0, 0);
+ end = start + (len >> this->erase_shift) - 1;
+ end_mem_addr = onenand->mem_addr(end, 0, 0);
+
+ if (cmd == ONENAND_CMD_LOCK) {
+ s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
+ start_mem_addr));
+ s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
+ end_mem_addr));
+ } else {
+ s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
+ start_mem_addr));
+ s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
+ end_mem_addr));
+ }
+
+ this->wait(mtd, FL_LOCKING);
+}
+
+static void s3c_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ size_t len = this->chipsize;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* No need to check return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Workaround for all block unlock in DDP */
+ if (!ONENAND_IS_DDP(this)) {
+ s3c_onenand_check_lock_status(mtd);
+ return;
+ }
+
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+
+ s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+
+ s3c_onenand_check_lock_status(mtd);
+}
+
+static void s3c_onenand_setup(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ onenand->mtd = mtd;
+
+ if (onenand->type == TYPE_S3C6400) {
+ onenand->mem_addr = s3c6400_mem_addr;
+ onenand->cmd_map = s3c64xx_cmd_map;
+ } else if (onenand->type == TYPE_S3C6410) {
+ onenand->mem_addr = s3c6410_mem_addr;
+ onenand->cmd_map = s3c64xx_cmd_map;
+ } else if (onenand->type == TYPE_S5PC100) {
+ onenand->mem_addr = s5pc100_mem_addr;
+ onenand->cmd_map = s5pc1xx_cmd_map;
+ } else if (onenand->type == TYPE_S5PC110) {
+ /* Use generic onenand functions */
+ onenand->cmd_map = s5pc1xx_cmd_map;
+ this->read_bufferram = s5pc110_read_bufferram;
+ return;
+ } else {
+ BUG();
+ }
+
+ this->read_word = s3c_onenand_readw;
+ this->write_word = s3c_onenand_writew;
+
+ this->wait = s3c_onenand_wait;
+ this->bbt_wait = s3c_onenand_bbt_wait;
+ this->unlock_all = s3c_unlock_all;
+ this->command = s3c_onenand_command;
+
+ this->read_bufferram = onenand_read_bufferram;
+ this->write_bufferram = onenand_write_bufferram;
+}
+
+static int s3c_onenand_probe(struct platform_device *pdev)
+{
+ struct onenand_platform_data *pdata;
+ struct onenand_chip *this;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int size, err;
+ unsigned long onenand_ctrl_cfg = 0;
+
+ pdata = pdev->dev.platform_data;
+ /* No need to check pdata. the platform data is optional */
+
+ size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
+ mtd = kzalloc(size, GFP_KERNEL);
+ if (!mtd) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
+ if (!onenand) {
+ err = -ENOMEM;
+ goto onenand_fail;
+ }
+
+ this = (struct onenand_chip *) &mtd[1];
+ mtd->priv = this;
+ mtd->dev.parent = &pdev->dev;
+ mtd->owner = THIS_MODULE;
+ onenand->pdev = pdev;
+ onenand->type = platform_get_device_id(pdev)->driver_data;
+
+ s3c_onenand_setup(mtd);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENOENT;
+ goto ahb_resource_failed;
+ }
+
+ onenand->base_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->base_res) {
+ dev_err(&pdev->dev, "failed to request memory resource\n");
+ err = -EBUSY;
+ goto resource_failed;
+ }
+
+ onenand->base = ioremap(r->start, resource_size(r));
+ if (!onenand->base) {
+ dev_err(&pdev->dev, "failed to map memory resource\n");
+ err = -EFAULT;
+ goto ioremap_failed;
+ }
+ /* Set onenand_chip also */
+ this->base = onenand->base;
+
+ /* Use runtime badblock check */
+ this->options |= ONENAND_SKIP_UNLOCK_CHECK;
+
+ if (onenand->type != TYPE_S5PC110) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ dev_err(&pdev->dev, "no buffer memory resource defined\n");
+ return -ENOENT;
+ goto ahb_resource_failed;
+ }
+
+ onenand->ahb_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->ahb_res) {
+ dev_err(&pdev->dev, "failed to request buffer memory resource\n");
+ err = -EBUSY;
+ goto ahb_resource_failed;
+ }
+
+ onenand->ahb_addr = ioremap(r->start, resource_size(r));
+ if (!onenand->ahb_addr) {
+ dev_err(&pdev->dev, "failed to map buffer memory resource\n");
+ err = -EINVAL;
+ goto ahb_ioremap_failed;
+ }
+
+ /* Allocate 4KiB BufferRAM */
+ onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!onenand->page_buf) {
+ err = -ENOMEM;
+ goto page_buf_fail;
+ }
+
+ /* Allocate 128 SpareRAM */
+ onenand->oob_buf = kzalloc(128, GFP_KERNEL);
+ if (!onenand->oob_buf) {
+ err = -ENOMEM;
+ goto oob_buf_fail;
+ }
+
+ /* S3C doesn't handle subpage write */
+ mtd->subpage_sft = 0;
+ this->subpagesize = mtd->writesize;
+
+ } else { /* S5PC110 */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ dev_err(&pdev->dev, "no dma memory resource defined\n");
+ return -ENOENT;
+ goto dma_resource_failed;
+ }
+
+ onenand->dma_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->dma_res) {
+ dev_err(&pdev->dev, "failed to request dma memory resource\n");
+ err = -EBUSY;
+ goto dma_resource_failed;
+ }
+
+ onenand->dma_addr = ioremap(r->start, resource_size(r));
+ if (!onenand->dma_addr) {
+ dev_err(&pdev->dev, "failed to map dma memory resource\n");
+ err = -EINVAL;
+ goto dma_ioremap_failed;
+ }
+
+ onenand->phys_base = onenand->base_res->start;
+
+ onenand_ctrl_cfg = readl(onenand->dma_addr + 0x100);
+ if ((onenand_ctrl_cfg & ONENAND_SYS_CFG1_SYNC_WRITE) &&
+ onenand->dma_addr)
+ writel(onenand_ctrl_cfg & ~ONENAND_SYS_CFG1_SYNC_WRITE,
+ onenand->dma_addr + 0x100);
+ else
+ onenand_ctrl_cfg = 0;
+ }
+
+ if (onenand_scan(mtd, 1)) {
+ err = -EFAULT;
+ goto scan_failed;
+ }
+
+ if (onenand->type == TYPE_S5PC110) {
+ if (onenand_ctrl_cfg && onenand->dma_addr)
+ writel(onenand_ctrl_cfg, onenand->dma_addr + 0x100);
+ } else {
+ /* S3C doesn't handle subpage write */
+ mtd->subpage_sft = 0;
+ this->subpagesize = mtd->writesize;
+ }
+
+ if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
+ dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
+ if (err > 0)
+ add_mtd_partitions(mtd, onenand->parts, err);
+ else if (err <= 0 && pdata && pdata->parts)
+ add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+ else
+#endif
+ err = add_mtd_device(mtd);
+
+ platform_set_drvdata(pdev, mtd);
+
+ return 0;
+
+scan_failed:
+ if (onenand->dma_addr)
+ iounmap(onenand->dma_addr);
+dma_ioremap_failed:
+ if (onenand->dma_res)
+ release_mem_region(onenand->dma_res->start,
+ resource_size(onenand->dma_res));
+ kfree(onenand->oob_buf);
+oob_buf_fail:
+ kfree(onenand->page_buf);
+page_buf_fail:
+ if (onenand->ahb_addr)
+ iounmap(onenand->ahb_addr);
+ahb_ioremap_failed:
+ if (onenand->ahb_res)
+ release_mem_region(onenand->ahb_res->start,
+ resource_size(onenand->ahb_res));
+dma_resource_failed:
+ahb_resource_failed:
+ iounmap(onenand->base);
+ioremap_failed:
+ if (onenand->base_res)
+ release_mem_region(onenand->base_res->start,
+ resource_size(onenand->base_res));
+resource_failed:
+ kfree(onenand);
+onenand_fail:
+ kfree(mtd);
+ return err;
+}
+
+static int __devexit s3c_onenand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+
+ onenand_release(mtd);
+ if (onenand->ahb_addr)
+ iounmap(onenand->ahb_addr);
+ if (onenand->ahb_res)
+ release_mem_region(onenand->ahb_res->start,
+ resource_size(onenand->ahb_res));
+ if (onenand->dma_addr)
+ iounmap(onenand->dma_addr);
+ if (onenand->dma_res)
+ release_mem_region(onenand->dma_res->start,
+ resource_size(onenand->dma_res));
+
+ iounmap(onenand->base);
+ release_mem_region(onenand->base_res->start,
+ resource_size(onenand->base_res));
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(onenand->oob_buf);
+ kfree(onenand->page_buf);
+ kfree(onenand);
+ kfree(mtd);
+ return 0;
+}
+
+static int s3c_pm_ops_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct onenand_chip *this = mtd->priv;
+
+ this->wait(mtd, FL_PM_SUSPENDED);
+ return mtd->suspend(mtd);
+}
+
+static int s3c_pm_ops_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct onenand_chip *this = mtd->priv;
+
+ mtd->resume(mtd);
+ this->unlock_all(mtd);
+ return 0;
+}
+
+static const struct dev_pm_ops s3c_pm_ops = {
+ .suspend = s3c_pm_ops_suspend,
+ .resume = s3c_pm_ops_resume,
+};
+
+static struct platform_device_id s3c_onenand_driver_ids[] = {
+ {
+ .name = "s3c6400-onenand",
+ .driver_data = TYPE_S3C6400,
+ }, {
+ .name = "s3c6410-onenand",
+ .driver_data = TYPE_S3C6410,
+ }, {
+ .name = "s5pc100-onenand",
+ .driver_data = TYPE_S5PC100,
+ }, {
+ .name = "s5pc110-onenand",
+ .driver_data = TYPE_S5PC110,
+ }, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
+
+static struct platform_driver s3c_onenand_driver = {
+ .driver = {
+ .name = "samsung-onenand",
+ .pm = &s3c_pm_ops,
+ },
+ .id_table = s3c_onenand_driver_ids,
+ .probe = s3c_onenand_probe,
+ .remove = __devexit_p(s3c_onenand_remove),
+};
+
+static int __init s3c_onenand_init(void)
+{
+ return platform_driver_register(&s3c_onenand_driver);
+}
+
+static void __exit s3c_onenand_exit(void)
+{
+ platform_driver_unregister(&s3c_onenand_driver);
+}
+
+module_init(s3c_onenand_init);
+module_exit(s3c_onenand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("Samsung OneNAND controller support");
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index d2aa9c46..63b83c0 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -817,7 +817,6 @@ static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
vfree(part->sector_map);
kfree(part->header_cache);
kfree(part->blocks);
- kfree(part);
}
static struct mtd_blktrans_ops rfd_ftl_tr = {
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
new file mode 100644
index 0000000..67822cf
--- /dev/null
+++ b/drivers/mtd/sm_ftl.c
@@ -0,0 +1,1284 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/hdreg.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/sysfs.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand_ecc.h>
+#include "nand/sm_common.h"
+#include "sm_ftl.h"
+
+
+
+struct workqueue_struct *cache_flush_workqueue;
+
+static int cache_timeout = 1000;
+module_param(cache_timeout, bool, S_IRUGO);
+MODULE_PARM_DESC(cache_timeout,
+ "Timeout (in ms) for cache flush (1000 ms default");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+
+/* ------------------- sysfs attributtes ---------------------------------- */
+struct sm_sysfs_attribute {
+ struct device_attribute dev_attr;
+ char *data;
+ int len;
+};
+
+ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(attr, struct sm_sysfs_attribute, dev_attr);
+
+ strncpy(buf, sm_attr->data, sm_attr->len);
+ return sm_attr->len;
+}
+
+
+#define NUM_ATTRIBUTES 1
+#define SM_CIS_VENDOR_OFFSET 0x59
+struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute_group *attr_group;
+ struct attribute **attributes;
+ struct sm_sysfs_attribute *vendor_attribute;
+
+ int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
+ SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
+
+ char *vendor = kmalloc(vendor_len, GFP_KERNEL);
+ memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
+ vendor[vendor_len] = 0;
+
+ /* Initialize sysfs attributes */
+ vendor_attribute =
+ kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
+
+ sysfs_attr_init(&vendor_attribute->dev_attr.attr);
+
+ vendor_attribute->data = vendor;
+ vendor_attribute->len = vendor_len;
+ vendor_attribute->dev_attr.attr.name = "vendor";
+ vendor_attribute->dev_attr.attr.mode = S_IRUGO;
+ vendor_attribute->dev_attr.show = sm_attr_show;
+
+
+ /* Create array of pointers to the attributes */
+ attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
+ GFP_KERNEL);
+ attributes[0] = &vendor_attribute->dev_attr.attr;
+
+ /* Finally create the attribute group */
+ attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ attr_group->attrs = attributes;
+ return attr_group;
+}
+
+void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute **attributes = ftl->disk_attributes->attrs;
+ int i;
+
+ for (i = 0; attributes[i] ; i++) {
+
+ struct device_attribute *dev_attr = container_of(attributes[i],
+ struct device_attribute, attr);
+
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(dev_attr,
+ struct sm_sysfs_attribute, dev_attr);
+
+ kfree(sm_attr->data);
+ kfree(sm_attr);
+ }
+
+ kfree(ftl->disk_attributes->attrs);
+ kfree(ftl->disk_attributes);
+}
+
+
+/* ----------------------- oob helpers -------------------------------------- */
+
+static int sm_get_lba(uint8_t *lba)
+{
+ /* check fixed bits */
+ if ((lba[0] & 0xF8) != 0x10)
+ return -2;
+
+ /* check parity - endianess doesn't matter */
+ if (hweight16(*(uint16_t *)lba) & 1)
+ return -2;
+
+ return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
+}
+
+
+/*
+ * Read LBA asscociated with block
+ * returns -1, if block is erased
+ * returns -2 if error happens
+ */
+static int sm_read_lba(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ uint16_t lba_test;
+ int lba;
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
+ return -1;
+
+ /* Now check is both copies of the LBA differ too much */
+ lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
+ if (lba_test && !is_power_of_2(lba_test))
+ return -2;
+
+ /* And read it */
+ lba = sm_get_lba(oob->lba_copy1);
+
+ if (lba == -2)
+ lba = sm_get_lba(oob->lba_copy2);
+
+ return lba;
+}
+
+static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
+{
+ uint8_t tmp[2];
+
+ WARN_ON(lba >= 1000);
+
+ tmp[0] = 0x10 | ((lba >> 7) & 0x07);
+ tmp[1] = (lba << 1) & 0xFF;
+
+ if (hweight16(*(uint16_t *)tmp) & 0x01)
+ tmp[1] |= 1;
+
+ oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
+ oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
+}
+
+
+/* Make offset from parts */
+static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
+{
+ WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
+ WARN_ON(zone < 0 || zone >= ftl->zone_count);
+ WARN_ON(block >= ftl->zone_size);
+ WARN_ON(boffset >= ftl->block_size);
+
+ if (block == -1)
+ return -1;
+
+ return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
+}
+
+/* Breaks offset into parts */
+static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
+ int *zone, int *block, int *boffset)
+{
+ *boffset = do_div(offset, ftl->block_size);
+ *block = do_div(offset, ftl->max_lba);
+ *zone = offset >= ftl->zone_count ? -1 : offset;
+}
+
+/* ---------------------- low level IO ------------------------------------- */
+
+static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
+{
+ uint8_t ecc[3];
+
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
+ if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
+ return -EIO;
+
+ buffer += SM_SMALL_PAGE;
+
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
+ if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
+ return -EIO;
+ return 0;
+}
+
+/* Reads a sector + oob*/
+static int sm_read_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct mtd_oob_ops ops;
+ struct sm_oob tmp_oob;
+ int ret = -EIO;
+ int try = 0;
+
+ /* FTL can contain -1 entries that are by default filled with bits */
+ if (block == -1) {
+ memset(buffer, 0xFF, SM_SECTOR_SIZE);
+ return 0;
+ }
+
+ /* User might not need the oob, but we do for data vertification */
+ if (!oob)
+ oob = &tmp_oob;
+
+ ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+
+again:
+ if (try++) {
+ /* Avoid infinite recursion on CIS reads, sm_recheck_media
+ won't help anyway */
+ if (zone == 0 && block == ftl->cis_block && boffset ==
+ ftl->cis_boffset)
+ return ret;
+
+ /* Test if media is stable */
+ if (try == 3 || sm_recheck_media(ftl))
+ return ret;
+ }
+
+ /* Unfortunelly, oob read will _always_ succeed,
+ despite card removal..... */
+ ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Test for unknown errors */
+ if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
+ dbg("read of block %d at zone %d, failed due to error (%d)",
+ block, zone, ret);
+ goto again;
+ }
+
+ /* Do a basic test on the oob, to guard against returned garbage */
+ if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
+ goto again;
+
+ /* This should never happen, unless there is a bug in the mtd driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ if (!buffer)
+ return 0;
+
+ /* Test if sector marked as bad */
+ if (!sm_sector_valid(oob)) {
+ dbg("read of block %d at zone %d, failed because it is marked"
+ " as bad" , block, zone);
+ goto again;
+ }
+
+ /* Test ECC*/
+ if (ret == -EBADMSG ||
+ (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
+
+ dbg("read of block %d at zone %d, failed due to ECC error",
+ block, zone);
+ goto again;
+ }
+
+ return 0;
+}
+
+/* Writes a sector to media */
+static int sm_write_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_oob_ops ops;
+ struct mtd_info *mtd = ftl->trans->mtd;
+ int ret;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone == 0 && (block == ftl->cis_block || block == 0)) {
+ dbg("attempted to write the CIS!");
+ return -EIO;
+ }
+
+ if (ftl->unstable)
+ return -EIO;
+
+ ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+
+ ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Now we assume that hardware will catch write bitflip errors */
+ /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
+
+ if (ret) {
+ dbg("write to block %d at zone %d, failed with error %d",
+ block, zone, ret);
+
+ sm_recheck_media(ftl);
+ return ret;
+ }
+
+ /* This should never happen, unless there is a bug in the driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ return 0;
+}
+
+/* ------------------------ block IO ------------------------------------- */
+
+/* Write a block using data and lba, and invalid sector bitmap */
+static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
+ int zone, int block, int lba,
+ unsigned long invalid_bitmap)
+{
+ struct sm_oob oob;
+ int boffset;
+ int retry = 0;
+
+ /* Initialize the oob with requested values */
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ sm_write_lba(&oob, lba);
+restart:
+ if (ftl->unstable)
+ return -EIO;
+
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ oob.data_status = 0xFF;
+
+ if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
+
+ sm_printk("sector %d of block at LBA %d of zone %d"
+ " coudn't be read, marking it as invalid",
+ boffset / SM_SECTOR_SIZE, lba, zone);
+
+ oob.data_status = 0;
+ }
+
+ if (ftl->smallpagenand) {
+ __nand_calculate_ecc(buf + boffset,
+ SM_SMALL_PAGE, oob.ecc1);
+
+ __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
+ SM_SMALL_PAGE, oob.ecc2);
+ }
+ if (!sm_write_sector(ftl, zone, block, boffset,
+ buf + boffset, &oob))
+ continue;
+
+ if (!retry) {
+
+ /* If write fails. try to erase the block */
+ /* This is safe, because we never write in blocks
+ that contain valuable data.
+ This is intended to repair block that are marked
+ as erased, but that isn't fully erased*/
+
+ if (sm_erase_block(ftl, zone, block, 0))
+ return -EIO;
+
+ retry = 1;
+ goto restart;
+ } else {
+ sm_mark_block_bad(ftl, zone, block);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+
+/* Mark whole block at offset 'offs' as bad. */
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
+{
+ struct sm_oob oob;
+ int boffset;
+
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ oob.block_status = 0xF0;
+
+ if (ftl->unstable)
+ return;
+
+ if (sm_recheck_media(ftl))
+ return;
+
+ sm_printk("marking block %d of zone %d as bad", block, zone);
+
+ /* We aren't checking the return value, because we don't care */
+ /* This also fails on fake xD cards, but I guess these won't expose
+ any bad blocks till fail completly */
+ for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
+ sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
+}
+
+/*
+ * Erase a block within a zone
+ * If erase succedes, it updates free block fifo, otherwise marks block as bad
+ */
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct erase_info erase;
+
+ erase.mtd = mtd;
+ erase.callback = sm_erase_callback;
+ erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
+ erase.len = ftl->block_size;
+ erase.priv = (u_long)ftl;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
+ sm_printk("attempted to erase the CIS!");
+ return -EIO;
+ }
+
+ if (mtd->erase(mtd, &erase)) {
+ sm_printk("erase of block %d in zone %d failed",
+ block, zone_num);
+ goto error;
+ }
+
+ if (erase.state == MTD_ERASE_PENDING)
+ wait_for_completion(&ftl->erase_completion);
+
+ if (erase.state != MTD_ERASE_DONE) {
+ sm_printk("erase of block %d in zone %d failed after wait",
+ block, zone_num);
+ goto error;
+ }
+
+ if (put_free)
+ kfifo_in(&zone->free_sectors,
+ (const unsigned char *)&block, sizeof(block));
+
+ return 0;
+error:
+ sm_mark_block_bad(ftl, zone_num, block);
+ return -EIO;
+}
+
+static void sm_erase_callback(struct erase_info *self)
+{
+ struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
+ complete(&ftl->erase_completion);
+}
+
+/* Throughtly test that block is valid. */
+static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
+{
+ int boffset;
+ struct sm_oob oob;
+ int lbas[] = { -3, 0, 0, 0 };
+ int i = 0;
+ int test_lba;
+
+
+ /* First just check that block doesn't look fishy */
+ /* Only blocks that are valid or are sliced in two parts, are
+ accepted */
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ /* This shoudn't happen anyway */
+ if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
+ return -2;
+
+ test_lba = sm_read_lba(&oob);
+
+ if (lbas[i] != test_lba)
+ lbas[++i] = test_lba;
+
+ /* If we found three different LBAs, something is fishy */
+ if (i == 3)
+ return -EIO;
+ }
+
+ /* If the block is sliced (partialy erased usually) erase it */
+ if (i == 2) {
+ sm_erase_block(ftl, zone, block, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* ----------------- media scanning --------------------------------- */
+static const struct chs_entry chs_table[] = {
+ { 1, 125, 4, 4 },
+ { 2, 125, 4, 8 },
+ { 4, 250, 4, 8 },
+ { 8, 250, 4, 16 },
+ { 16, 500, 4, 16 },
+ { 32, 500, 8, 16 },
+ { 64, 500, 8, 32 },
+ { 128, 500, 16, 32 },
+ { 256, 1000, 16, 32 },
+ { 512, 1015, 32, 63 },
+ { 1024, 985, 33, 63 },
+ { 2048, 985, 33, 63 },
+ { 0 },
+};
+
+
+static const uint8_t cis_signature[] = {
+ 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
+};
+/* Find out media parameters.
+ * This ideally has to be based on nand id, but for now device size is enough */
+int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
+{
+ int i;
+ int size_in_megs = mtd->size / (1024 * 1024);
+
+ ftl->readonly = mtd->type == MTD_ROM;
+
+ /* Manual settings for very old devices */
+ ftl->zone_count = 1;
+ ftl->smallpagenand = 0;
+
+ switch (size_in_megs) {
+ case 1:
+ /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+
+ break;
+ case 2:
+ /* 2 MiB flash SmartMedia (256 byte pages)*/
+ if (mtd->writesize == SM_SMALL_PAGE) {
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+ /* 2 MiB rom SmartMedia */
+ } else {
+
+ if (!ftl->readonly)
+ return -ENODEV;
+
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+ break;
+ case 4:
+ /* 4 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ break;
+ case 8:
+ /* 8 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+
+ /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
+ sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
+ if (size_in_megs >= 16) {
+ ftl->zone_count = size_in_megs / 16;
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 32 * SM_SECTOR_SIZE;
+ }
+
+ /* Test for proper write,erase and oob sizes */
+ if (mtd->erasesize > ftl->block_size)
+ return -ENODEV;
+
+ if (mtd->writesize > SM_SECTOR_SIZE)
+ return -ENODEV;
+
+ if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
+ return -ENODEV;
+
+ if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
+ return -ENODEV;
+
+ /* We use these functions for IO */
+ if (!mtd->read_oob || !mtd->write_oob)
+ return -ENODEV;
+
+ /* Find geometry information */
+ for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
+ if (chs_table[i].size == size_in_megs) {
+ ftl->cylinders = chs_table[i].cyl;
+ ftl->heads = chs_table[i].head;
+ ftl->sectors = chs_table[i].sec;
+ return 0;
+ }
+ }
+
+ sm_printk("media has unknown size : %dMiB", size_in_megs);
+ ftl->cylinders = 985;
+ ftl->heads = 33;
+ ftl->sectors = 63;
+ return 0;
+}
+
+/* Validate the CIS */
+static int sm_read_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+
+ if (sm_read_sector(ftl,
+ 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
+ return -EIO;
+
+ if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
+ return -EIO;
+
+ if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
+ cis_signature, sizeof(cis_signature))) {
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Scan the media for the CIS */
+static int sm_find_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+ int block, boffset;
+ int block_found = 0;
+ int cis_found = 0;
+
+ /* Search for first valid block */
+ for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
+
+ if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
+ continue;
+
+ if (!sm_block_valid(&oob))
+ continue;
+ block_found = 1;
+ break;
+ }
+
+ if (!block_found)
+ return -EIO;
+
+ /* Search for first valid sector in this block */
+ for (boffset = 0 ; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
+ continue;
+
+ if (!sm_sector_valid(&oob))
+ continue;
+ break;
+ }
+
+ if (boffset == ftl->block_size)
+ return -EIO;
+
+ ftl->cis_block = block;
+ ftl->cis_boffset = boffset;
+ ftl->cis_page_offset = 0;
+
+ cis_found = !sm_read_cis(ftl);
+
+ if (!cis_found) {
+ ftl->cis_page_offset = SM_SMALL_PAGE;
+ cis_found = !sm_read_cis(ftl);
+ }
+
+ if (cis_found) {
+ dbg("CIS block found at offset %x",
+ block * ftl->block_size +
+ boffset + ftl->cis_page_offset);
+ return 0;
+ }
+ return -EIO;
+}
+
+/* Basic test to determine if underlying mtd device if functional */
+static int sm_recheck_media(struct sm_ftl *ftl)
+{
+ if (sm_read_cis(ftl)) {
+
+ if (!ftl->unstable) {
+ sm_printk("media unstable, not allowing writes");
+ ftl->unstable = 1;
+ }
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Initialize a FTL zone */
+static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct sm_oob oob;
+ uint16_t block;
+ int lba;
+ int i = 0;
+ int len;
+
+ dbg("initializing zone %d", zone_num);
+
+ /* Allocate memory for FTL table */
+ zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
+
+ if (!zone->lba_to_phys_table)
+ return -ENOMEM;
+ memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
+
+
+ /* Allocate memory for free sectors FIFO */
+ if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
+ kfree(zone->lba_to_phys_table);
+ return -ENOMEM;
+ }
+
+ /* Now scan the zone */
+ for (block = 0 ; block < ftl->zone_size ; block++) {
+
+ /* Skip blocks till the CIS (including) */
+ if (zone_num == 0 && block <= ftl->cis_block)
+ continue;
+
+ /* Read the oob of first sector */
+ if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
+ return -EIO;
+
+ /* Test to see if block is erased. It is enough to test
+ first sector, because erase happens in one shot */
+ if (sm_block_erased(&oob)) {
+ kfifo_in(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ continue;
+ }
+
+ /* If block is marked as bad, skip it */
+ /* This assumes we can trust first sector*/
+ /* However the way the block valid status is defined, ensures
+ very low probability of failure here */
+ if (!sm_block_valid(&oob)) {
+ dbg("PH %04d <-> <marked bad>", block);
+ continue;
+ }
+
+
+ lba = sm_read_lba(&oob);
+
+ /* Invalid LBA means that block is damaged. */
+ /* We can try to erase it, or mark it as bad, but
+ lets leave that to recovery application */
+ if (lba == -2 || lba >= ftl->max_lba) {
+ dbg("PH %04d <-> LBA %04d(bad)", block, lba);
+ continue;
+ }
+
+
+ /* If there is no collision,
+ just put the sector in the FTL table */
+ if (zone->lba_to_phys_table[lba] < 0) {
+ dbg_verbose("PH %04d <-> LBA %04d", block, lba);
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ sm_printk("collision"
+ " of LBA %d between blocks %d and %d in zone %d",
+ lba, zone->lba_to_phys_table[lba], block, zone_num);
+
+ /* Test that this block is valid*/
+ if (sm_check_block(ftl, zone_num, block))
+ continue;
+
+ /* Test now the old block */
+ if (sm_check_block(ftl, zone_num,
+ zone->lba_to_phys_table[lba])) {
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ /* If both blocks are valid and share same LBA, it means that
+ they hold different versions of same data. It not
+ known which is more recent, thus just erase one of them
+ */
+ sm_printk("both blocks are valid, erasing the later");
+ sm_erase_block(ftl, zone_num, block, 1);
+ }
+
+ dbg("zone initialized");
+ zone->initialized = 1;
+
+ /* No free sectors, means that the zone is heavily damaged, write won't
+ work, but it can still can be (partially) read */
+ if (!kfifo_len(&zone->free_sectors)) {
+ sm_printk("no free blocks in zone %d", zone_num);
+ return 0;
+ }
+
+ /* Randomize first block we write to */
+ get_random_bytes(&i, 2);
+ i %= (kfifo_len(&zone->free_sectors) / 2);
+
+ while (i--) {
+ len = kfifo_out(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ WARN_ON(len != 2);
+ kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
+ }
+ return 0;
+}
+
+/* Get and automaticly initialize an FTL mapping for one zone */
+struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone;
+ int error;
+
+ BUG_ON(zone_num >= ftl->zone_count);
+ zone = &ftl->zones[zone_num];
+
+ if (!zone->initialized) {
+ error = sm_init_zone(ftl, zone_num);
+
+ if (error)
+ return ERR_PTR(error);
+ }
+ return zone;
+}
+
+
+/* ----------------- cache handling ------------------------------------------*/
+
+/* Initialize the one block cache */
+void sm_cache_init(struct sm_ftl *ftl)
+{
+ ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
+ ftl->cache_clean = 1;
+ ftl->cache_zone = -1;
+ ftl->cache_block = -1;
+ /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
+}
+
+/* Put sector in one block cache */
+void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
+ clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
+ ftl->cache_clean = 0;
+}
+
+/* Read a sector from the cache */
+int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ if (test_bit(boffset / SM_SECTOR_SIZE,
+ &ftl->cache_data_invalid_bitmap))
+ return -1;
+
+ memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
+ return 0;
+}
+
+/* Write the cache to hardware */
+int sm_cache_flush(struct sm_ftl *ftl)
+{
+ struct ftl_zone *zone;
+
+ int sector_num;
+ uint16_t write_sector;
+ int zone_num = ftl->cache_zone;
+ int block_num;
+
+ if (ftl->cache_clean)
+ return 0;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(zone_num < 0);
+ zone = &ftl->zones[zone_num];
+ block_num = zone->lba_to_phys_table[ftl->cache_block];
+
+
+ /* Try to read all unread areas of the cache block*/
+ for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
+ ftl->block_size / SM_SECTOR_SIZE) {
+
+ if (!sm_read_sector(ftl,
+ zone_num, block_num, sector_num * SM_SECTOR_SIZE,
+ ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
+ clear_bit(sector_num,
+ &ftl->cache_data_invalid_bitmap);
+ }
+restart:
+
+ if (ftl->unstable)
+ return -EIO;
+
+ /* If there are no spare blocks, */
+ /* we could still continue by erasing/writing the current block,
+ but for such worn out media it doesn't worth the trouble,
+ and the dangers */
+ if (kfifo_out(&zone->free_sectors,
+ (unsigned char *)&write_sector, 2) != 2) {
+ dbg("no free sectors for write!");
+ return -EIO;
+ }
+
+
+ if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
+ ftl->cache_block, ftl->cache_data_invalid_bitmap))
+ goto restart;
+
+ /* Update the FTL table */
+ zone->lba_to_phys_table[ftl->cache_block] = write_sector;
+
+ /* Write succesfull, so erase and free the old block */
+ if (block_num > 0)
+ sm_erase_block(ftl, zone_num, block_num, 1);
+
+ sm_cache_init(ftl);
+ return 0;
+}
+
+
+/* flush timer, runs a second after last write */
+static void sm_cache_flush_timer(unsigned long data)
+{
+ struct sm_ftl *ftl = (struct sm_ftl *)data;
+ queue_work(cache_flush_workqueue, &ftl->flush_work);
+}
+
+/* cache flush work, kicked by timer */
+static void sm_cache_flush_work(struct work_struct *work)
+{
+ struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
+ mutex_lock(&ftl->mutex);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return;
+}
+
+/* ---------------- outside interface -------------------------------------- */
+
+/* outside interface: read a sector */
+static int sm_read(struct mtd_blktrans_dev *dev,
+ unsigned long sect_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error = 0, in_cache = 0;
+ int zone_num, block, boffset;
+
+ sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
+ mutex_lock(&ftl->mutex);
+
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* Have to look at cache first */
+ if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
+ in_cache = 1;
+ if (!sm_cache_get(ftl, buf, boffset))
+ goto unlock;
+ }
+
+ /* Translate the block and return if doesn't exist in the table */
+ block = zone->lba_to_phys_table[block];
+
+ if (block == -1) {
+ memset(buf, 0xFF, SM_SECTOR_SIZE);
+ goto unlock;
+ }
+
+ if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
+ error = -EIO;
+ goto unlock;
+ }
+
+ if (in_cache)
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: write a sector */
+static int sm_write(struct mtd_blktrans_dev *dev,
+ unsigned long sec_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error, zone_num, block, boffset;
+
+ BUG_ON(ftl->readonly);
+ sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
+
+ /* No need in flush thread running now */
+ del_timer(&ftl->timer);
+ mutex_lock(&ftl->mutex);
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* If entry is not in cache, flush it */
+ if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
+
+ error = sm_cache_flush(ftl);
+ if (error)
+ goto unlock;
+
+ ftl->cache_block = block;
+ ftl->cache_zone = zone_num;
+ }
+
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: flush everything */
+static int sm_flush(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int retval;
+
+ mutex_lock(&ftl->mutex);
+ retval = sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return retval;
+}
+
+/* outside interface: device is released */
+static int sm_release(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+
+ mutex_lock(&ftl->mutex);
+ del_timer_sync(&ftl->timer);
+ cancel_work_sync(&ftl->flush_work);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return 0;
+}
+
+/* outside interface: get geometry */
+static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct sm_ftl *ftl = dev->priv;
+ geo->heads = ftl->heads;
+ geo->sectors = ftl->sectors;
+ geo->cylinders = ftl->cylinders;
+ return 0;
+}
+
+/* external interface: main initialization function */
+static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtd_blktrans_dev *trans;
+ struct sm_ftl *ftl;
+
+ /* Allocate & initialize our private structure */
+ ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
+ if (!ftl)
+ goto error1;
+
+
+ mutex_init(&ftl->mutex);
+ setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+ INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
+ init_completion(&ftl->erase_completion);
+
+ /* Read media information */
+ if (sm_get_media_info(ftl, mtd)) {
+ dbg("found unsupported mtd device, aborting");
+ goto error2;
+ }
+
+
+ /* Allocate temporary CIS buffer for read retry support */
+ ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+ if (!ftl->cis_buffer)
+ goto error2;
+
+ /* Allocate zone array, it will be initialized on demand */
+ ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
+ GFP_KERNEL);
+ if (!ftl->zones)
+ goto error3;
+
+ /* Allocate the cache*/
+ ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
+
+ if (!ftl->cache_data)
+ goto error4;
+
+ sm_cache_init(ftl);
+
+
+ /* Allocate upper layer structure and initialize it */
+ trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
+ if (!trans)
+ goto error5;
+
+ ftl->trans = trans;
+ trans->priv = ftl;
+
+ trans->tr = tr;
+ trans->mtd = mtd;
+ trans->devnum = -1;
+ trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
+ trans->readonly = ftl->readonly;
+
+ if (sm_find_cis(ftl)) {
+ dbg("CIS not found on mtd device, aborting");
+ goto error6;
+ }
+
+ ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
+ trans->disk_attributes = ftl->disk_attributes;
+
+ sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
+ (int)(mtd->size / (1024 * 1024)), mtd->index);
+
+ dbg("FTL layout:");
+ dbg("%d zone(s), each consists of %d blocks (+%d spares)",
+ ftl->zone_count, ftl->max_lba,
+ ftl->zone_size - ftl->max_lba);
+ dbg("each block consists of %d bytes",
+ ftl->block_size);
+
+
+ /* Register device*/
+ if (add_mtd_blktrans_dev(trans)) {
+ dbg("error in mtdblktrans layer");
+ goto error6;
+ }
+ return;
+error6:
+ kfree(trans);
+error5:
+ kfree(ftl->cache_data);
+error4:
+ kfree(ftl->zones);
+error3:
+ kfree(ftl->cis_buffer);
+error2:
+ kfree(ftl);
+error1:
+ return;
+}
+
+/* main interface: device {surprise,} removal */
+static void sm_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int i;
+
+ del_mtd_blktrans_dev(dev);
+ ftl->trans = NULL;
+
+ for (i = 0 ; i < ftl->zone_count; i++) {
+
+ if (!ftl->zones[i].initialized)
+ continue;
+
+ kfree(ftl->zones[i].lba_to_phys_table);
+ kfifo_free(&ftl->zones[i].free_sectors);
+ }
+
+ sm_delete_sysfs_attributes(ftl);
+ kfree(ftl->cis_buffer);
+ kfree(ftl->zones);
+ kfree(ftl->cache_data);
+ kfree(ftl);
+}
+
+static struct mtd_blktrans_ops sm_ftl_ops = {
+ .name = "smblk",
+ .major = -1,
+ .part_bits = SM_FTL_PARTN_BITS,
+ .blksize = SM_SECTOR_SIZE,
+ .getgeo = sm_getgeo,
+
+ .add_mtd = sm_add_mtd,
+ .remove_dev = sm_remove_dev,
+
+ .readsect = sm_read,
+ .writesect = sm_write,
+
+ .flush = sm_flush,
+ .release = sm_release,
+
+ .owner = THIS_MODULE,
+};
+
+static __init int sm_module_init(void)
+{
+ int error = 0;
+ cache_flush_workqueue = create_freezeable_workqueue("smflush");
+
+ if (IS_ERR(cache_flush_workqueue))
+ return PTR_ERR(cache_flush_workqueue);
+
+ error = register_mtd_blktrans(&sm_ftl_ops);
+ if (error)
+ destroy_workqueue(cache_flush_workqueue);
+ return error;
+
+}
+
+static void __exit sm_module_exit(void)
+{
+ destroy_workqueue(cache_flush_workqueue);
+ deregister_mtd_blktrans(&sm_ftl_ops);
+}
+
+module_init(sm_module_init);
+module_exit(sm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
diff --git a/drivers/mtd/sm_ftl.h b/drivers/mtd/sm_ftl.h
new file mode 100644
index 0000000..e30e48e
--- /dev/null
+++ b/drivers/mtd/sm_ftl.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * Based loosly on ssfdc.c which is
+ * © 2005 Eptar srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mtd/blktrans.h>
+#include <linux/kfifo.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+
+
+
+struct ftl_zone {
+ int initialized;
+ int16_t *lba_to_phys_table; /* LBA to physical table */
+ struct kfifo free_sectors; /* queue of free sectors */
+};
+
+struct sm_ftl {
+ struct mtd_blktrans_dev *trans;
+
+ struct mutex mutex; /* protects the structure */
+ struct ftl_zone *zones; /* FTL tables for each zone */
+
+ /* Media information */
+ int block_size; /* block size in bytes */
+ int zone_size; /* zone size in blocks */
+ int zone_count; /* number of zones */
+ int max_lba; /* maximum lba in a zone */
+ int smallpagenand; /* 256 bytes/page nand */
+ int readonly; /* is FS readonly */
+ int unstable;
+ int cis_block; /* CIS block location */
+ int cis_boffset; /* CIS offset in the block */
+ int cis_page_offset; /* CIS offset in the page */
+ void *cis_buffer; /* tmp buffer for cis reads */
+
+ /* Cache */
+ int cache_block; /* block number of cached block */
+ int cache_zone; /* zone of cached block */
+ unsigned char *cache_data; /* cached block data */
+ long unsigned int cache_data_invalid_bitmap;
+ int cache_clean;
+ struct work_struct flush_work;
+ struct timer_list timer;
+
+ /* Async erase stuff */
+ struct completion erase_completion;
+
+ /* Geometry stuff */
+ int heads;
+ int sectors;
+ int cylinders;
+
+ struct attribute_group *disk_attributes;
+};
+
+struct chs_entry {
+ unsigned long size;
+ unsigned short cyl;
+ unsigned char head;
+ unsigned char sec;
+};
+
+
+#define SM_FTL_PARTN_BITS 3
+
+#define sm_printk(format, ...) \
+ printk(KERN_WARNING "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+
+static void sm_erase_callback(struct erase_info *self);
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free);
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block);
+
+static int sm_recheck_media(struct sm_ftl *ftl);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 3f67e00..81c4ecd 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -375,7 +375,6 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
del_mtd_blktrans_dev(dev);
kfree(ssfdc->logic_block_map);
- kfree(ssfdc);
}
static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 921a85d..6bc1b82 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -480,12 +480,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 7107fcc..afe71aa 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -141,12 +141,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 56ca62b..161feeb 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -295,12 +295,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 3854afe..531625f 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -221,12 +221,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 700237a..11204e8 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -354,12 +354,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 0a8c7ea..f702a16 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -27,7 +27,7 @@ config MTD_UBI_WL_THRESHOLD
The default value should be OK for SLC NAND flashes, NOR flashes and
other flashes which have eraseblock life-cycle 100000 or more.
However, in case of MLC NAND flashes which typically have eraseblock
- life-cycle less then 10000, the threshold should be lessened (e.g.,
+ life-cycle less than 10000, the threshold should be lessened (e.g.,
to 128 or 256, although it does not have to be power of 2).
config MTD_UBI_BEB_RESERVE
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 55c726d..13b05cb 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -42,7 +42,6 @@
#include <linux/miscdevice.h>
#include <linux/log2.h>
#include <linux/kthread.h>
-#include <linux/reboot.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "ubi.h"
@@ -50,6 +49,12 @@
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+#ifdef CONFIG_MTD_UBI_MODULE
+#define ubi_is_module() 1
+#else
+#define ubi_is_module() 0
+#endif
+
/**
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD character device node path, MTD device name, or MTD device number
@@ -832,34 +837,6 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
}
/**
- * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot.
- * @n: reboot notifier object
- * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF
- * @cmd: pointer to command string for RESTART2
- *
- * This function stops the UBI background thread so that the flash device
- * remains quiescent when Linux restarts the system. Any queued work will be
- * discarded, but this function will block until do_work() finishes if an
- * operation is already in progress.
- *
- * This function solves a real-life problem observed on NOR flashes when an
- * PEB erase operation starts, then the system is rebooted before the erase is
- * finishes, and the boot loader gets confused and dies. So we prefer to finish
- * the ongoing operation before rebooting.
- */
-static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state,
- void *cmd)
-{
- struct ubi_device *ubi;
-
- ubi = container_of(n, struct ubi_device, reboot_notifier);
- if (ubi->bgt_thread)
- kthread_stop(ubi->bgt_thread);
- ubi_sync(ubi->ubi_num);
- return NOTIFY_DONE;
-}
-
-/**
* ubi_attach_mtd_dev - attach an MTD device.
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
@@ -1016,11 +993,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
- /* Flash device priority is 0 - UBI needs to shut down first */
- ubi->reboot_notifier.priority = 1;
- ubi->reboot_notifier.notifier_call = ubi_reboot_notifier;
- register_reboot_notifier(&ubi->reboot_notifier);
-
ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
@@ -1091,7 +1063,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
*/
- unregister_reboot_notifier(&ubi->reboot_notifier);
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
@@ -1241,9 +1212,24 @@ static int __init ubi_init(void)
p->vid_hdr_offs);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
- put_mtd_device(mtd);
ubi_err("cannot attach mtd%d", mtd->index);
- goto out_detach;
+ put_mtd_device(mtd);
+
+ /*
+ * Originally UBI stopped initializing on any error.
+ * However, later on it was found out that this
+ * behavior is not very good when UBI is compiled into
+ * the kernel and the MTD devices to attach are passed
+ * through the command line. Indeed, UBI failure
+ * stopped whole boot sequence.
+ *
+ * To fix this, we changed the behavior for the
+ * non-module case, but preserved the old behavior for
+ * the module case, just for compatibility. This is a
+ * little inconsistent, though.
+ */
+ if (ubi_is_module())
+ goto out_detach;
}
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 533b1a4..4b979e3 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -64,9 +64,9 @@
* device, e.g., make @ubi->min_io_size = 512 in the example above?
*
* A: because when writing a sub-page, MTD still writes a full 2K page but the
- * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing
- * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we
- * prefer to use sub-pages only for EV and VID headers.
+ * bytes which are not relevant to the sub-page are 0xFF. So, basically,
+ * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
+ * Thus, we prefer to use sub-pages only for EC and VID headers.
*
* As it was noted above, the VID header may start at a non-aligned offset.
* For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 17f287d..69fa4ef0 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -488,7 +488,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
- * data, which has to be aligned. The length may be shorter then the logical
+ * data, which has to be aligned. The length may be shorter than the logical
* eraseblock size, ant the logical eraseblock may be appended to more times
* later on. This function guarantees that in case of an unclean reboot the old
* contents is preserved. Returns zero in case of success and a negative error
@@ -571,7 +571,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase);
*
* This function un-maps logical eraseblock @lnum and schedules the
* corresponding physical eraseblock for erasure, so that it will eventually be
- * physically erased in background. This operation is much faster then the
+ * physically erased in background. This operation is much faster than the
* erase operation.
*
* Unlike erase, the un-map operation does not guarantee that the logical
@@ -590,7 +590,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase);
*
* The main and obvious use-case of this function is when the contents of a
* logical eraseblock has to be re-written. Then it is much more efficient to
- * first un-map it, then write new data, rather then first erase it, then write
+ * first un-map it, then write new data, rather than first erase it, then write
* new data. Note, once new data has been written to the logical eraseblock,
* UBI guarantees that the old contents has gone forever. In other words, if an
* unclean reboot happens after the logical eraseblock has been un-mapped and
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index dc5f688..aed19f3 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -231,7 +231,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* case of success this function returns a positive value, in case of failure, a
* negative error code is returned. The success return codes use the following
* bits:
- * o bit 0 is cleared: the first PEB (described by @seb) is newer then the
+ * o bit 0 is cleared: the first PEB (described by @seb) is newer than the
* second PEB (described by @pnum and @vid_hdr);
* o bit 0 is set: the second PEB is newer;
* o bit 1 is cleared: no bit-flips were detected in the newer LEB;
@@ -452,7 +452,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
if (cmp_res & 1) {
/*
- * This logical eraseblock is newer then the one
+ * This logical eraseblock is newer than the one
* found earlier.
*/
err = validate_vid_hdr(vid_hdr, sv, pnum);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5176d48..a637f02 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -350,7 +350,6 @@ struct ubi_wl_entry;
* @bgt_thread: background thread description object
* @thread_enabled: if the background thread is enabled
* @bgt_name: background thread name
- * @reboot_notifier: notifier to terminate background thread before rebooting
*
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
@@ -436,7 +435,6 @@ struct ubi_device {
struct task_struct *bgt_thread;
int thread_enabled;
char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
- struct notifier_block reboot_notifier;
/* I/O sub-system's stuff */
long long flash_size;
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index cd90ff3..14c10be 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -414,7 +414,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
* 0 contains more recent information.
*
* So the plan is to first check LEB 0. Then
- * a. if LEB 0 is OK, it must be containing the most resent data; then
+ * a. if LEB 0 is OK, it must be containing the most recent data; then
* we compare it with LEB 1, and if they are different, we copy LEB
* 0 to LEB 1;
* b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
@@ -848,7 +848,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
goto out_free;
/*
- * Get sure that the scanning information is consistent to the
+ * Make sure that the scanning information is consistent to the
* information stored in the volume table.
*/
err = check_scanning_info(ubi, si);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index f64ddab..ee7b1d8 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -350,7 +350,7 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
* @max: highest possible erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
- * @max and less then @max.
+ * @max and less than @max.
*/
static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
{
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 3ea42ff1..1776ab6 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -480,7 +480,6 @@ static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fire ... Trigger xmit. */
outb(AX_XMIT, AX_CMD);
lp->loading = 0;
- dev->trans_start = jiffies;
if (el_debug > 2)
pr_debug(" queued xmit.\n");
dev_kfree_skb(skb);
@@ -727,7 +726,6 @@ static void el_receive(struct net_device *dev)
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
- return;
}
/**
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 66e0323..baac246 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -380,6 +380,12 @@ out:
return retval;
}
+static irqreturn_t el2_probe_interrupt(int irq, void *seen)
+{
+ *(bool *)seen = true;
+ return IRQ_HANDLED;
+}
+
static int
el2_open(struct net_device *dev)
{
@@ -391,23 +397,35 @@ el2_open(struct net_device *dev)
outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
do {
- retval = request_irq(*irqp, NULL, 0, "bogus", dev);
- if (retval >= 0) {
+ bool seen;
+
+ retval = request_irq(*irqp, el2_probe_interrupt, 0,
+ dev->name, &seen);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
+
/* Twinkle the interrupt, and check if it's seen. */
- unsigned long cookie = probe_irq_on();
+ seen = false;
+ smp_wmb();
outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
outb_p(0x00, E33G_IDCFR);
- if (*irqp == probe_irq_off(cookie) && /* It's a good IRQ line! */
- ((retval = request_irq(dev->irq = *irqp,
- eip_interrupt, 0,
- dev->name, dev)) == 0))
- break;
- } else {
- if (retval != -EBUSY)
- return retval;
- }
+ msleep(1);
+ free_irq(*irqp, el2_probe_interrupt);
+ if (!seen)
+ continue;
+
+ retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
+ dev->name, dev);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
} while (*++irqp);
+
if (*irqp == 0) {
+ err_disable:
outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
return -EAGAIN;
}
@@ -555,7 +573,6 @@ el2_block_output(struct net_device *dev, int count,
}
blocked:;
outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
- return;
}
/* Read the 4 byte, page aligned 8390 specific header. */
@@ -671,7 +688,6 @@ el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring
}
blocked:;
outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
- return;
}
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 29b8d1d..88d766e 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1055,7 +1055,7 @@ static void elp_timeout(struct net_device *dev)
(stat & ACRF) ? "interrupt" : "command");
if (elp_debug >= 1)
pr_debug("%s: status %#02x\n", dev->name, stat);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_dropped++;
netif_wake_queue(dev);
}
@@ -1093,11 +1093,6 @@ static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (elp_debug >= 3)
pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
- /*
- * start the transmit timeout
- */
- dev->trans_start = jiffies;
-
prime_rx(dev);
spin_unlock_irqrestore(&adapter->lock, flags);
netif_start_queue(dev);
@@ -1216,7 +1211,7 @@ static int elp_close(struct net_device *dev)
static void elp_set_mc_list(struct net_device *dev)
{
elp_device *adapter = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
unsigned long flags;
@@ -1231,8 +1226,9 @@ static void elp_set_mc_list(struct net_device *dev)
adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(adapter->tx_pcb.data.multicast[i++],
+ ha->addr, 6);
adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
pr_err("%s: couldn't send set_multicast command\n", dev->name);
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index b32b7a1..82eaf65 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -449,7 +449,6 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
pr_debug("%s", version);
lp = netdev_priv(dev);
- memset(lp, 0, sizeof(*lp));
spin_lock_init(&lp->lock);
lp->base = ioremap(dev->mem_start, RX_BUF_END);
if (!lp->base) {
@@ -505,7 +504,7 @@ static void el16_tx_timeout (struct net_device *dev)
outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -529,7 +528,6 @@ static netdev_tx_t el16_send_packet (struct sk_buff *skb,
hardware_send_packet (dev, buf, skb->len, length - skb->len);
- dev->trans_start = jiffies;
/* Enable the 82586 interrupt input. */
outb (0x84, ioaddr + MISC_CTRL);
@@ -766,7 +764,6 @@ static void init_82586_mem(struct net_device *dev)
if (net_debug > 4)
pr_debug("%s: Initialized 82586, status %04x.\n", dev->name,
readw(shmem+iSCB_STATUS));
- return;
}
static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad)
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index ab9bb3c..91abb96 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -807,7 +807,7 @@ el3_tx_timeout (struct net_device *dev)
dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
inw(ioaddr + TX_FREE));
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
outw(TxReset, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -868,7 +868,6 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- dev->trans_start = jiffies;
if (inw(ioaddr + TX_FREE) > 1536)
netif_start_queue(dev);
else
@@ -1038,7 +1037,6 @@ static void update_stats(struct net_device *dev)
/* Back to window 1, and turn statistics back on. */
EL3WINDOW(1);
outw(StatsEnable, ioaddr + EL3_CMD);
- return;
}
static int
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 2e17837..3bba835 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -958,7 +958,6 @@ static void corkscrew_timer(unsigned long data)
dev->name, media_tbl[dev->if_port].name);
#endif /* AUTOMEDIA */
- return;
}
static void corkscrew_timeout(struct net_device *dev)
@@ -992,7 +991,7 @@ static void corkscrew_timeout(struct net_device *dev)
if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
outw(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
netif_wake_queue(dev);
@@ -1055,7 +1054,6 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
prev_entry->status &= ~0x80000000;
netif_wake_queue(dev);
}
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
/* Put out the doubleword header... */
@@ -1091,7 +1089,6 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD);
#endif /* bus master */
- dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
@@ -1518,7 +1515,6 @@ static void update_stats(int ioaddr, struct net_device *dev)
/* We change back to window 7 (not 1) with the Vortex. */
EL3WINDOW(7);
- return;
}
/* This new version of set_rx_mode() supports v1.4 kernels.
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 1719079..a7b0e5e 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -503,7 +503,6 @@ static int __init do_elmc_probe(struct net_device *dev)
break;
}
- memset(pr, 0, sizeof(struct priv));
pr->slot = slot;
pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
@@ -624,7 +623,7 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -787,8 +786,9 @@ static int init586(struct net_device *dev)
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = num_addrs * 6;
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy((char *) mc_cmd->mc_list[i++],
+ ha->addr, 6);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd = CUC_START;
elmc_id_attn586();
@@ -1152,7 +1152,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
p->scb->cmd = CUC_START;
p->xmit_cmds[0]->cmd_status = 0;
elmc_attn586();
- dev->trans_start = jiffies;
if (!i) {
dev_kfree_skb(skb);
}
@@ -1176,7 +1175,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
#endif
@@ -1190,7 +1188,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
= make16((p->nop_cmds[next_nop]));
p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
if (p->xmit_count != p->xmit_last)
netif_wake_queue(dev);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 5c07b14..38395df 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1533,7 +1533,7 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
{
unsigned char block[62];
unsigned char *bp;
- struct dev_mc_list *dmc;
+ struct netdev_hw_addr *ha;
if(retry==0)
lp->mc_list_valid = 0;
@@ -1543,8 +1543,8 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
block[0]=netdev_mc_count(dev);
bp=block+2;
- netdev_for_each_mc_addr(dmc, dev) {
- memcpy(bp, dmc->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(bp, ha->addr, 6);
bp+=6;
}
if(mc32_command_nowait(dev, 2, block,
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 5f92fdb..d75803e 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1855,7 +1855,6 @@ leave_media_alone:
mod_timer(&vp->timer, RUN_AT(next_tick));
if (vp->deferred)
iowrite16(FakeIntr, ioaddr + EL3_CMD);
- return;
}
static void vortex_tx_timeout(struct net_device *dev)
@@ -1917,7 +1916,7 @@ static void vortex_tx_timeout(struct net_device *dev)
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Switch to register set 7 for normal use. */
EL3WINDOW(7);
@@ -2063,7 +2062,6 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
@@ -2129,8 +2127,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
- skb->len-skb->data_len, PCI_DMA_TODEVICE));
- vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
+ skb_headlen(skb), PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2174,7 +2172,6 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -2800,7 +2797,6 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
}
EL3WINDOW(old_window >> 13);
- return;
}
static int vortex_nway_reset(struct net_device *dev)
@@ -3122,7 +3118,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
- return;
}
/* ACPI: Advanced Configuration and Power Interface. */
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 500e135..903bcb3 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -262,7 +262,7 @@ static int lance_reset (struct net_device *dev)
load_csrs (lp);
lance_init_ring (dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance (lp);
#ifdef DEBUG_DRIVER
printk ("Lance restart=%d\n", status);
@@ -526,7 +526,7 @@ void lance_tx_timeout(struct net_device *dev)
{
printk("lance_tx_timeout\n");
lance_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
@@ -574,7 +574,6 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
outs++;
/* Kick the lance: transmit now */
WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
spin_lock_irqsave (&lp->devlock, flags);
@@ -594,7 +593,7 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -609,8 +608,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -620,7 +619,6 @@ static void lance_load_multicast (struct net_device *dev)
crc = crc >> 26;
mcast_table [crc >> 4] |= 1 << (crc & 0xf);
}
- return;
}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index a09e6ce..9c14975 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -882,7 +882,6 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -910,11 +909,11 @@ static void __cp_set_rx_mode (struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
@@ -1225,8 +1224,6 @@ static void cp_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
spin_unlock_irqrestore(&cp->lock, flags);
-
- return;
}
#ifdef BROKEN
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f0d23de..4ba7293 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1716,8 +1716,6 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
- dev->trans_start = jiffies;
-
tp->cur_tx++;
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
@@ -2503,11 +2501,11 @@ static void __set_rx_mode (struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 56e68db..dd8dc15 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1050,7 +1050,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -1060,7 +1060,6 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_cmd *tx_cmd;
struct i596_tbd *tbd;
short length = skb->len;
- dev->trans_start = jiffies;
DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
dev->name, skb->len, skb->data));
@@ -1542,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
}
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1552,10 +1551,10 @@ static void set_multicast_list(struct net_device *dev)
cmd->cmd.command = CmdMulticastList;
cmd->mc_cnt = cnt * ETH_ALEN;
cp = cmd->mc_addrs;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, dmi->dmi_addr, ETH_ALEN);
+ memcpy(cp, ha->addr, ETH_ALEN);
if (i596_debug > 1)
DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
dev->name, cp));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7b832c7..2decc59 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -483,7 +483,7 @@ config XTENSA_XT2000_SONIC
This is the driver for the onboard card of the Xtensa XT2000 board.
config MIPS_AU1X00_ENET
- bool "MIPS AU1000 Ethernet support"
+ tristate "MIPS AU1000 Ethernet support"
depends on SOC_AU1X00
select PHYLIB
select CRC32
@@ -887,6 +887,13 @@ config BFIN_MAC_RMII
help
Use Reduced PHY MII Interface
+config BFIN_MAC_USE_HWSTAMP
+ bool "Use IEEE 1588 hwstamp"
+ depends on BFIN_MAC && BF518
+ default y
+ help
+ To support the IEEE 1588 Precision Time Protocol (PTP), select y here
+
config SMC9194
tristate "SMC 9194 support"
depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
@@ -1453,20 +1460,6 @@ config FORCEDETH
To compile this driver as a module, choose M here. The module
will be called forcedeth.
-config FORCEDETH_NAPI
- bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
- depends on FORCEDETH && EXPERIMENTAL
- help
- NAPI is a new driver API designed to reduce CPU and interrupt load
- when the driver is receiving lots of packets from the card. It is
- still somewhat experimental and thus not yet enabled by default.
-
- If your estimated Rx load is 10kpps or more, or if the card will be
- deployed on potentially unfriendly networks (e.g. in a firewall),
- then say Y here.
-
- If in doubt, say N.
-
config CS89x0
tristate "CS89x0 support"
depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
@@ -1916,6 +1909,7 @@ config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
+ select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -2434,8 +2428,8 @@ config MV643XX_ETH
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ depends on PPC || MICROBLAZE
select PHYLIB
- depends on PPC_DCR_NATIVE
help
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
core used in Xilinx Spartan and Virtex FPGAs
@@ -2618,11 +2612,11 @@ config EHEA
will be called ehea.
config ENIC
- tristate "Cisco 10G Ethernet NIC support"
+ tristate "Cisco VIC Ethernet NIC Support"
depends on PCI && INET
select INET_LRO
help
- This enables the support for the Cisco 10G Ethernet card.
+ This enables the support for the Cisco VIC Ethernet card.
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
@@ -2862,6 +2856,8 @@ source "drivers/ieee802154/Kconfig"
source "drivers/s390/net/Kconfig"
+source "drivers/net/caif/Kconfig"
+
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
@@ -3180,17 +3176,12 @@ config PPPOATM
config PPPOL2TP
tristate "PPP over L2TP (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PPP && INET
+ depends on EXPERIMENTAL && L2TP && PPP
help
Support for PPP-over-L2TP socket family. L2TP is a protocol
used by ISPs and enterprises to tunnel PPP traffic over UDP
tunnels. L2TP is replacing PPTP for VPN uses.
- This kernel component handles only L2TP data packets: a
- userland daemon handles L2TP the control protocol (tunnel
- and session setup). One such daemon is OpenL2TP
- (http://openl2tp.sourceforge.net/).
-
config SLIP
tristate "SLIP (serial line) support"
---help---
@@ -3277,15 +3268,14 @@ config NET_FC
"SCSI generic support".
config NETCONSOLE
- tristate "Network console logging support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Network console logging support"
---help---
If you want to log kernel messages over the network, enable this.
See <file:Documentation/networking/netconsole.txt> for details.
config NETCONSOLE_DYNAMIC
- bool "Dynamic reconfiguration of logging targets (EXPERIMENTAL)"
- depends on NETCONSOLE && SYSFS && EXPERIMENTAL
+ bool "Dynamic reconfiguration of logging targets"
+ depends on NETCONSOLE && SYSFS
select CONFIGFS_FS
help
This option enables the ability to dynamically reconfigure target
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 12b280a..0a0512a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -161,7 +161,7 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
-obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
+obj-$(CONFIG_PPPOL2TP) += pppox.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
@@ -292,5 +292,6 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_WIMAX) += wimax/
+obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index a8f0512..f142cc2 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -525,7 +525,7 @@ static inline int lance_reset (struct net_device *dev)
load_csrs (lp);
lance_init_ring (dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_start_queue(dev);
status = init_restart_lance (lp);
@@ -588,7 +588,6 @@ static netdev_tx_t lance_start_xmit (struct sk_buff *skb,
/* Kick the lance: transmit now */
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
local_irq_restore(flags);
@@ -602,7 +601,7 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -617,8 +616,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -628,7 +627,6 @@ static void lance_load_multicast (struct net_device *dev)
crc = crc >> 26;
mcast_table [crc >> 4] |= 1 << (crc & 0xf);
}
- return;
}
static void lance_set_multicast (struct net_device *dev)
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index eac7338..b9115a7 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -307,8 +307,6 @@ static void ac_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 97a3dfd..b9a59160 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -661,7 +661,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_std_skbuff[i];
- mapping = pci_unmap_addr(ringp, mapping);
+ mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -681,7 +681,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_mini_skbuff[i];
- mapping = pci_unmap_addr(ringp,mapping);
+ mapping = dma_unmap_addr(ringp,mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -700,7 +700,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_jumbo_skbuff[i];
- mapping = pci_unmap_addr(ringp, mapping);
+ mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -1683,7 +1683,7 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
mapping, mapping);
rd = &ap->rx_std_ring[idx];
@@ -1744,7 +1744,7 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
mapping, mapping);
rd = &ap->rx_mini_ring[idx];
@@ -1800,7 +1800,7 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
mapping, mapping);
rd = &ap->rx_jumbo_ring[idx];
@@ -2013,7 +2013,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
skb = rip->skb;
rip->skb = NULL;
pci_unmap_page(ap->pdev,
- pci_unmap_addr(rip, mapping),
+ dma_unmap_addr(rip, mapping),
mapsize,
PCI_DMA_FROMDEVICE);
skb_put(skb, retdesc->size);
@@ -2078,18 +2078,16 @@ static inline void ace_tx_int(struct net_device *dev,
do {
struct sk_buff *skb;
- dma_addr_t mapping;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + idx;
skb = info->skb;
- mapping = pci_unmap_addr(info, mapping);
- if (mapping) {
- pci_unmap_page(ap->pdev, mapping,
- pci_unmap_len(info, maplen),
+ if (dma_unmap_len(info, maplen)) {
+ pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+ dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
- pci_unmap_addr_set(info, mapping, 0);
+ dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
@@ -2377,14 +2375,12 @@ static int ace_close(struct net_device *dev)
for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
struct sk_buff *skb;
- dma_addr_t mapping;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + i;
skb = info->skb;
- mapping = pci_unmap_addr(info, mapping);
- if (mapping) {
+ if (dma_unmap_len(info, maplen)) {
if (ACE_IS_TIGON_I(ap)) {
/* NB: TIGON_1 is special, tx_ring is in io space */
struct tx_desc __iomem *tx;
@@ -2395,10 +2391,10 @@ static int ace_close(struct net_device *dev)
} else
memset(ap->tx_ring + i, 0,
sizeof(struct tx_desc));
- pci_unmap_page(ap->pdev, mapping,
- pci_unmap_len(info, maplen),
+ pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+ dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
- pci_unmap_addr_set(info, mapping, 0);
+ dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
dev_kfree_skb(skb);
@@ -2433,8 +2429,8 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
info = ap->skb->tx_skbuff + idx;
info->skb = tail;
- pci_unmap_addr_set(info, mapping, mapping);
- pci_unmap_len_set(info, maplen, skb->len);
+ dma_unmap_addr_set(info, mapping, mapping);
+ dma_unmap_len_set(info, maplen, skb->len);
return mapping;
}
@@ -2553,8 +2549,8 @@ restart:
} else {
info->skb = NULL;
}
- pci_unmap_addr_set(info, mapping, mapping);
- pci_unmap_len_set(info, maplen, frag->size);
+ dma_unmap_addr_set(info, mapping, mapping);
+ dma_unmap_len_set(info, maplen, frag->size);
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
}
}
@@ -2923,8 +2919,6 @@ static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int siz
dest += tsize;
size -= tsize;
}
-
- return;
}
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index 17079b9..0681da7 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -589,7 +589,7 @@ struct ace_info {
struct ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -600,8 +600,8 @@ struct ring_info {
*/
struct tx_ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
- DECLARE_PCI_UNMAP_LEN(maplen)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 8d58f0a..585c25f 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1339,8 +1339,6 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
writel( VAL1 | TDMD0, lp->mmio + CMD0);
writel( VAL2 | RDMD0,lp->mmio + CMD0);
- dev->trans_start = jiffies;
-
if(amd8111e_tx_queue_avail(lp) < 0){
netif_stop_queue(dev);
}
@@ -1376,7 +1374,7 @@ list to the device.
*/
static void amd8111e_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
struct amd8111e_priv *lp = netdev_priv(dev);
u32 mc_filter[2] ;
int bit_num;
@@ -1407,8 +1405,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
/* load all the multicast addresses in the logic filter */
lp->options |= OPTION_MULTICAST_ENABLE;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mc_ptr, dev) {
- bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
}
amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
index 1437f5d..2fe60f1 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/apne.c
@@ -521,7 +521,6 @@ apne_block_output(struct net_device *dev, int count,
outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static irqreturn_t apne_interrupt(int irq, void *dev_id)
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 6f8d620..748c9f5 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -593,8 +593,6 @@ static void cops_load (struct net_device *dev)
tangent_wait_reset(ioaddr);
inb(ioaddr); /* Clear initial ready signal. */
}
-
- return;
}
/*
@@ -701,8 +699,6 @@ static void cops_poll(unsigned long ltdev)
/* poll 20 times per second */
cops_timer.expires = jiffies + HZ/20;
add_timer(&cops_timer);
-
- return;
}
/*
@@ -866,7 +862,7 @@ static void cops_timeout(struct net_device *dev)
}
printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
cops_jumpstart(dev); /* Restart the card. */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -919,9 +915,8 @@ static netdev_tx_t cops_send_packet(struct sk_buff *skb,
/* Done sending packet, update counters and cleanup. */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/*
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 6af65b6..adc0755 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -641,7 +641,6 @@ done:
inb_p(base+7);
inb_p(base+7);
}
- return;
}
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d8f0293..a746ba2 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -654,7 +654,6 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
}
}
retval = NETDEV_TX_OK;
- dev->trans_start = jiffies;
lp->next_tx = txbuf;
} else {
retval = NETDEV_TX_BUSY;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 2c712af..48a1dbf 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -164,8 +164,8 @@ static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
{ 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
- { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
- { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT },
{ 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{0,}
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 4b30a46..39214e5 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -677,8 +677,6 @@ static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
lance->RAP = CSR0; /* PCnet-ISA Controller Status */
lance->RDP = INEA|TDMD;
- dev->trans_start = jiffies;
-
if (lowb(priv->tx_ring[(entry+1) % TX_RING_SIZE]->TMD1) != 0) {
netif_stop_queue(dev);
priv->tx_full = 1;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index f1f58c5..8c496fb 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -383,12 +383,12 @@ static void am79c961_setmulticastlist (struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI) {
memset(multi_hash, 0xff, sizeof(multi_hash));
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
memset(multi_hash, 0x00, sizeof(multi_hash));
- netdev_for_each_mc_addr(dmi, dev)
- am79c961_mc_hash(dmi->dmi_addr, multi_hash);
+ netdev_for_each_mc_addr(ha, dev)
+ am79c961_mc_hash(ha->addr, multi_hash);
}
spin_lock_irqsave(&priv->chip_lock, flags);
@@ -469,7 +469,6 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&priv->chip_lock, flags);
write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&priv->chip_lock, flags);
/*
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index aed5b54..e07b314 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -557,14 +557,14 @@ static int hash_get_index(__u8 *addr)
*/
static void at91ether_sethashtable(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
unsigned long mc_filter[2];
unsigned int bitnr;
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
@@ -824,7 +824,6 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Set length of the packet in the Transmit Control register */
at91_emac_write(AT91_EMAC_TCR, skb->len);
- dev->trans_start = jiffies;
} else {
printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index cd17d09..4a5ec94 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -374,8 +374,6 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
-
spin_lock_irq(&ep->tx_pending_lock);
ep->tx_pending++;
if (ep->tx_pending == TX_QUEUE_ENTRIES)
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index e47c0d9..b17ab51 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -736,7 +736,6 @@ ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
local_irq_restore(flags);
/* handle transmit */
- dev->trans_start = jiffies;
/* check to see if we have room for a full sized ether frame */
tmp = priv(dev)->tx_head;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index d9de9bc..1361b73 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -529,7 +529,6 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY; /* unable to queue */
}
- dev->trans_start = jiffies;
ptr = 0x600 * priv(dev)->tx_head;
priv(dev)->tx_head = next_ptr;
next_ptr *= 0x600;
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 6be8b09..24df032 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -708,7 +708,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
/* NPE firmware pads short frames with zeros internally */
wmb();
queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
- dev->trans_start = jiffies;
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
@@ -736,7 +735,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
static void eth_set_mcast_list(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u8 diffs[ETH_ALEN], *addr;
int i;
@@ -749,11 +748,11 @@ static void eth_set_mcast_list(struct net_device *dev)
memset(diffs, 0, ETH_ALEN);
addr = NULL;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!addr)
- addr = mclist->dmi_addr; /* first MAC address */
+ addr = ha->addr; /* first MAC address */
for (i = 0; i < ETH_ALEN; i++)
- diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
+ diffs[i] |= addr[i] ^ ha->addr[i];
}
for (i = 0; i < ETH_ALEN; i++) {
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 84f8a8f..54c6d84 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -332,16 +332,16 @@ ks8695_init_partial_multicast(struct ks8695_priv *ksp,
{
u32 low, high;
int i;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
i = 0;
- netdev_for_each_mc_addr(dmi, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
/* Ran out of space in chip? */
BUG_ON(i == KS8695_NR_ADDRESSES);
- low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) |
- (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]);
- high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]);
+ low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
+ (ha->addr[4] << 8) | (ha->addr[5]);
+ high = (ha->addr[0] << 8) | (ha->addr[1]);
ks8695_writereg(ksp, KS8695_AAL_(i), low);
ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
@@ -1302,8 +1302,6 @@ ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++ksp->tx_ring_used == MAX_TX_DESC)
netif_stop_queue(ndev);
- ndev->trans_start = jiffies;
-
/* Kick the TX DMA in case it decided to go IDLE */
ks8695_writereg(ksp, KS8695_DTSC, 0);
@@ -1472,7 +1470,6 @@ ks8695_probe(struct platform_device *pdev)
/* Configure our private structure a little */
ksp = netdev_priv(ndev);
- memset(ksp, 0, sizeof(struct ks8695_priv));
ksp->dev = &pdev->dev;
ksp->ndev = ndev;
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index f7c9ca1..2e85246 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -483,7 +483,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_init_desc(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
ether->cur_tx = 0x0;
ether->finish_tx = 0x0;
ether->cur_rx = 0x0;
@@ -497,7 +497,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_trigger_tx(dev);
w90p910_trigger_rx(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
@@ -634,8 +634,6 @@ static int w90p910_send_frame(struct net_device *dev,
txbd = &ether->tdesc->desclist[ether->cur_tx];
- dev->trans_start = jiffies;
-
if (txbd->mode & TX_OWEN_DMA)
netif_stop_queue(dev);
@@ -744,7 +742,6 @@ static void netdev_rx(struct net_device *dev)
return;
}
- skb->dev = dev;
skb_reserve(skb, 2);
skb_put(skb, length);
skb_copy_to_linear_data(skb, data, length);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 10a20fb..93185f5 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -583,7 +583,7 @@ static void net_tx_timeout (struct net_device *dev)
outb (0x00, ioaddr + TX_START);
outb (0x03, ioaddr + COL16CNTL);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
lp->tx_started = 0;
lp->tx_queue_ready = 1;
@@ -636,7 +636,6 @@ static netdev_tx_t net_send_packet (struct sk_buff *skb,
outb (0x80 | lp->tx_queue, ioaddr + TX_START);
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_start_queue (dev);
} else if (lp->tx_queue_len < 4096 - 1502)
@@ -796,7 +795,6 @@ net_rx(struct net_device *dev)
printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
dev->name, inb(ioaddr + RX_MODE), i);
}
- return;
}
/* The inverse routine to net_open(). */
@@ -847,12 +845,12 @@ set_rx_mode(struct net_device *dev)
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ ether_crc_le(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit >> 3] |= (1 << bit);
}
outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
@@ -870,7 +868,6 @@ set_rx_mode(struct net_device *dev)
outw(saved_bank, ioaddr + CONFIG_0);
}
spin_unlock_irqrestore (&lp->lock, flags);
- return;
}
#ifdef MODULE
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index a8686bf..b57d7de 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -767,8 +767,8 @@ static void lance_tx_timeout (struct net_device *dev)
/* lance_restart, essentially */
lance_init_ring(dev);
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
- dev->trans_start = jiffies;
- netif_wake_queue (dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
}
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
@@ -836,7 +836,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
/* Trigger an immediate send poll. */
DREG = CSR0_INEA | CSR0_TDMD;
- dev->trans_start = jiffies;
if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
TMD1_OWN_HOST)
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 3233924..7c52150 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -263,8 +263,6 @@ static void atl1c_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & AT_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
-
- return;
}
static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 50dc531..1c3c046 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -317,8 +317,6 @@ static void atl1c_common_task(struct work_struct *work)
if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE)
atl1c_check_link_status(adapter);
-
- return;
}
@@ -354,7 +352,7 @@ static void atl1c_set_multi(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 mac_ctrl_data;
u32 hash_value;
@@ -377,8 +375,8 @@ static void atl1c_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl1c_hash_mc_addr(hw, ha->addr);
atl1c_hash_set(hw, hash_value);
}
}
@@ -1817,7 +1815,6 @@ rrs_checked:
atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
skb_put(skb, length - ETH_FCS_LEN);
skb->protocol = eth_type_trans(skb, netdev);
- skb->dev = netdev;
atl1c_rx_checksum(adapter, skb, rrs);
if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) {
u16 vlan;
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index ffd696e..6943a6c 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -338,8 +338,6 @@ static void atl1e_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & AT_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
-
- return;
}
static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 73302ae..1acea57 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -284,7 +284,7 @@ static void atl1e_set_multi(struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 mac_ctrl_data = 0;
u32 hash_value;
@@ -307,8 +307,8 @@ static void atl1e_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl1e_hash_mc_addr(hw, ha->addr);
atl1e_hash_set(hw, hash_value);
}
}
@@ -707,8 +707,6 @@ static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
adapter->ring_vir_addr = NULL;
adapter->rx_ring.desc = NULL;
rwlock_init(&adapter->tx_ring.tx_lock);
-
- return;
}
/*
@@ -905,8 +903,6 @@ static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
/* Load all of base address above */
AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
-
- return;
}
static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
@@ -950,7 +946,6 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
(((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK)
<< TXQ_CTRL_NUM_TPD_BURST_SHIFT)
| TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
- return;
}
static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
@@ -1004,7 +999,6 @@ static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
- return;
}
static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
@@ -1024,7 +1018,6 @@ static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
<< DMA_CTRL_DMAW_DLY_CNT_SHIFT;
AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
- return;
}
static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
@@ -1428,7 +1421,6 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
"Memory squeeze, deferring packet\n");
goto skip_pkt;
}
- skb->dev = netdev;
memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
skb_put(skb, packet_size);
skb->protocol = eth_type_trans(skb, netdev);
@@ -1680,7 +1672,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
{
struct atl1e_tpd_desc *use_tpd = NULL;
struct atl1e_tx_buffer *tx_buffer = NULL;
- u16 buf_len = skb->len - skb->data_len;
+ u16 buf_len = skb_headlen(skb);
u16 map_len = 0;
u16 mapped_len = 0;
u16 hdr_len = 0;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0ebd820..63b9ba0 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1830,8 +1830,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
adapter->hw_csum_good++;
return;
}
-
- return;
}
/*
@@ -2347,7 +2345,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
- int len = skb->len;
+ int len;
int tso;
int count = 1;
int ret_val;
@@ -2359,7 +2357,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
unsigned int f;
unsigned int proto_hdr_len;
- len -= skb->data_len;
+ len = skb_headlen(skb);
if (unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb);
@@ -3390,7 +3388,6 @@ static void atl1_get_wol(struct net_device *netdev,
wol->wolopts = 0;
if (adapter->wol & ATLX_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
- return;
}
static int atl1_set_wol(struct net_device *netdev,
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 54662f2..8da8738 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -136,7 +136,7 @@ static void atl2_set_multi(struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
u32 hash_value;
@@ -158,8 +158,8 @@ static void atl2_set_multi(struct net_device *netdev)
ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl2_hash_mc_addr(hw, ha->addr);
atl2_hash_set(hw, hash_value);
}
}
@@ -422,7 +422,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
netdev->stats.rx_dropped++;
break;
}
- skb->dev = netdev;
memcpy(skb->data, rxd->packet, rx_size);
skb_put(skb, rx_size);
skb->protocol = eth_type_trans(skb, netdev);
@@ -893,7 +892,6 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
(adapter->txd_write_ptr >> 2));
mmiowb();
- netdev->trans_start = jiffies;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 72f3306..f979ea2 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -123,7 +123,7 @@ static void atlx_set_multi(struct net_device *netdev)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
struct atlx_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
u32 hash_value;
@@ -144,8 +144,8 @@ static void atlx_set_multi(struct net_device *netdev)
iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
/* compute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atlx_hash_mc_addr(hw, ha->addr);
atlx_hash_set(hw, hash_value);
}
}
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 55039d4..bd2f9d3 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -547,7 +547,7 @@ static void tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
/* Try to restart the adapter. */
hardware_init(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
dev->stats.tx_errors++;
}
@@ -586,7 +586,6 @@ static netdev_tx_t atp_send_packet(struct sk_buff *skb,
write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
write_reg_high(ioaddr, IMR, ISRh_RxErr);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
@@ -803,7 +802,6 @@ static void net_rx(struct net_device *dev)
done:
write_reg(ioaddr, CMR1, CMR1_NextPkt);
lp->last_rx_time = jiffies;
- return;
}
static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
@@ -882,11 +880,11 @@ static void set_rx_mode_8012(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
new_mode = CMR2h_Normal;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
}
new_mode = CMR2h_Normal;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 4da191b..ece6128 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -75,14 +75,19 @@ static int au1000_debug = 5;
static int au1000_debug = 3;
#endif
+#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
#define DRV_NAME "au1000_eth"
-#define DRV_VERSION "1.6"
+#define DRV_VERSION "1.7"
#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
#define DRV_DESC "Au1xxx on-chip Ethernet driver"
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
/*
* Theory of operation
@@ -148,7 +153,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
* specific irq-map
*/
-static void enable_mac(struct net_device *dev, int force_reset)
+static void au1000_enable_mac(struct net_device *dev, int force_reset)
{
unsigned long flags;
struct au1000_private *aup = netdev_priv(dev);
@@ -182,8 +187,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: read_MII busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "read_MII busy timeout!!\n");
return -1;
}
}
@@ -197,8 +201,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "mdio_read busy timeout!!\n");
return -1;
}
}
@@ -217,8 +220,7 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr,
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "mdio_write busy timeout!!\n");
return;
}
}
@@ -236,7 +238,7 @@ static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
* _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
return au1000_mdio_read(dev, phy_addr, regnum);
}
@@ -246,7 +248,7 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
{
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
au1000_mdio_write(dev, phy_addr, regnum, value);
return 0;
@@ -256,28 +258,26 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
{
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
return 0;
}
-static void hard_stop(struct net_device *dev)
+static void au1000_hard_stop(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: hard stop\n", dev->name);
+ netif_dbg(aup, drv, dev, "hard stop\n");
aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
au_sync_delay(10);
}
-static void enable_rx_tx(struct net_device *dev)
+static void au1000_enable_rx_tx(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
+ netif_dbg(aup, hw, dev, "enable_rx_tx\n");
aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
au_sync_delay(10);
@@ -297,16 +297,15 @@ au1000_adjust_link(struct net_device *dev)
spin_lock_irqsave(&aup->lock, flags);
if (phydev->link && (aup->old_speed != phydev->speed)) {
- // speed changed
+ /* speed changed */
- switch(phydev->speed) {
+ switch (phydev->speed) {
case SPEED_10:
case SPEED_100:
break;
default:
- printk(KERN_WARNING
- "%s: Speed (%d) is not 10/100 ???\n",
- dev->name, phydev->speed);
+ netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
+ phydev->speed);
break;
}
@@ -316,10 +315,10 @@ au1000_adjust_link(struct net_device *dev)
}
if (phydev->link && (aup->old_duplex != phydev->duplex)) {
- // duplex mode changed
+ /* duplex mode changed */
/* switching duplex mode requires to disable rx and tx! */
- hard_stop(dev);
+ au1000_hard_stop(dev);
if (DUPLEX_FULL == phydev->duplex)
aup->mac->control = ((aup->mac->control
@@ -331,14 +330,14 @@ au1000_adjust_link(struct net_device *dev)
| MAC_DISABLE_RX_OWN);
au_sync_delay(1);
- enable_rx_tx(dev);
+ au1000_enable_rx_tx(dev);
aup->old_duplex = phydev->duplex;
status_change = 1;
}
- if(phydev->link != aup->old_link) {
- // link state changed
+ if (phydev->link != aup->old_link) {
+ /* link state changed */
if (!phydev->link) {
/* link went down */
@@ -354,15 +353,15 @@ au1000_adjust_link(struct net_device *dev)
if (status_change) {
if (phydev->link)
- printk(KERN_INFO "%s: link up (%d/%s)\n",
- dev->name, phydev->speed,
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
else
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
}
}
-static int mii_probe (struct net_device *dev)
+static int au1000_mii_probe (struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
struct phy_device *phydev = NULL;
@@ -373,8 +372,7 @@ static int mii_probe (struct net_device *dev)
if (aup->phy_addr)
phydev = aup->mii_bus->phy_map[aup->phy_addr];
else
- printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
- dev->name);
+ netdev_info(dev, "using PHY-less setup\n");
return 0;
} else {
int phy_addr;
@@ -391,7 +389,7 @@ static int mii_probe (struct net_device *dev)
/* try harder to find a PHY */
if (!phydev && (aup->mac_id == 1)) {
/* no PHY found, maybe we have a dual PHY? */
- printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
+ dev_info(&dev->dev, ": no PHY found on MAC1, "
"let's see if it's attached to MAC0...\n");
/* find the first (lowest address) non-attached PHY on
@@ -417,7 +415,7 @@ static int mii_probe (struct net_device *dev)
}
if (!phydev) {
- printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name);
+ netdev_err(dev, "no PHY found\n");
return -1;
}
@@ -428,7 +426,7 @@ static int mii_probe (struct net_device *dev)
0, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -449,8 +447,8 @@ static int mii_probe (struct net_device *dev)
aup->old_duplex = -1;
aup->phy_dev = phydev;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+ netdev_info(dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n",
phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
return 0;
@@ -462,7 +460,7 @@ static int mii_probe (struct net_device *dev)
* has the virtual and dma address of a buffer suitable for
* both, receive and transmit operations.
*/
-static db_dest_t *GetFreeDB(struct au1000_private *aup)
+static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup)
{
db_dest_t *pDB;
pDB = aup->pDBfree;
@@ -473,7 +471,7 @@ static db_dest_t *GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
+void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
{
db_dest_t *pDBfree = aup->pDBfree;
if (pDBfree)
@@ -481,12 +479,12 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
aup->pDBfree = pDB;
}
-static void reset_mac_unlocked(struct net_device *dev)
+static void au1000_reset_mac_unlocked(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
int i;
- hard_stop(dev);
+ au1000_hard_stop(dev);
*aup->enable = MAC_EN_CLOCK_ENABLE;
au_sync_delay(2);
@@ -507,18 +505,17 @@ static void reset_mac_unlocked(struct net_device *dev)
}
-static void reset_mac(struct net_device *dev)
+static void au1000_reset_mac(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
unsigned long flags;
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: reset mac, aup %x\n",
- dev->name, (unsigned)aup);
+ netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
+ (unsigned)aup);
spin_lock_irqsave(&aup->lock, flags);
- reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked (dev);
spin_unlock_irqrestore(&aup->lock, flags);
}
@@ -529,7 +526,7 @@ static void reset_mac(struct net_device *dev)
* these are not descriptors sitting in memory.
*/
static void
-setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
+au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
{
int i;
@@ -582,11 +579,25 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
info->regdump_len = 0;
}
+static void au1000_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ aup->msg_enable = value;
+}
+
+static u32 au1000_get_msglevel(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ return aup->msg_enable;
+}
+
static const struct ethtool_ops au1000_ethtool_ops = {
.get_settings = au1000_get_settings,
.set_settings = au1000_set_settings,
.get_drvinfo = au1000_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_msglevel = au1000_get_msglevel,
+ .set_msglevel = au1000_set_msglevel,
};
@@ -606,11 +617,10 @@ static int au1000_init(struct net_device *dev)
int i;
u32 control;
- if (au1000_debug > 4)
- printk("%s: au1000_init\n", dev->name);
+ netif_dbg(aup, hw, dev, "au1000_init\n");
/* bring the device out of reset */
- enable_mac(dev, 1);
+ au1000_enable_mac(dev, 1);
spin_lock_irqsave(&aup->lock, flags);
@@ -649,7 +659,7 @@ static int au1000_init(struct net_device *dev)
return 0;
}
-static inline void update_rx_stats(struct net_device *dev, u32 status)
+static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
{
struct net_device_stats *ps = &dev->stats;
@@ -667,8 +677,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
ps->rx_crc_errors++;
if (status & RX_COLL)
ps->collisions++;
- }
- else
+ } else
ps->rx_bytes += status & RX_FRAME_LEN_MASK;
}
@@ -685,15 +694,14 @@ static int au1000_rx(struct net_device *dev)
db_dest_t *pDB;
u32 frmlen;
- if (au1000_debug > 5)
- printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
+ netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
prxd = aup->rx_dma_ring[aup->rx_head];
buff_stat = prxd->buff_stat;
while (buff_stat & RX_T_DONE) {
status = prxd->status;
pDB = aup->rx_db_inuse[aup->rx_head];
- update_rx_stats(dev, status);
+ au1000_update_rx_stats(dev, status);
if (!(status & RX_ERROR)) {
/* good frame */
@@ -701,9 +709,7 @@ static int au1000_rx(struct net_device *dev)
frmlen -= 4; /* Remove FCS */
skb = dev_alloc_skb(frmlen + 2);
if (skb == NULL) {
- printk(KERN_ERR
- "%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netdev_err(dev, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
continue;
}
@@ -713,8 +719,7 @@ static int au1000_rx(struct net_device *dev)
skb_put(skb, frmlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); /* pass the packet to upper layers */
- }
- else {
+ } else {
if (au1000_debug > 4) {
if (status & RX_MISSED_FRAME)
printk("rx miss\n");
@@ -747,7 +752,7 @@ static int au1000_rx(struct net_device *dev)
return 0;
}
-static void update_tx_stats(struct net_device *dev, u32 status)
+static void au1000_update_tx_stats(struct net_device *dev, u32 status)
{
struct au1000_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &dev->stats;
@@ -760,8 +765,7 @@ static void update_tx_stats(struct net_device *dev, u32 status)
ps->tx_errors++;
ps->tx_aborted_errors++;
}
- }
- else {
+ } else {
ps->tx_errors++;
ps->tx_aborted_errors++;
if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
@@ -783,7 +787,7 @@ static void au1000_tx_ack(struct net_device *dev)
ptxd = aup->tx_dma_ring[aup->tx_tail];
while (ptxd->buff_stat & TX_T_DONE) {
- update_tx_stats(dev, ptxd->status);
+ au1000_update_tx_stats(dev, ptxd->status);
ptxd->buff_stat &= ~TX_T_DONE;
ptxd->len = 0;
au_sync();
@@ -817,18 +821,18 @@ static int au1000_open(struct net_device *dev)
int retval;
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: open: dev=%p\n", dev->name, dev);
+ netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
- if ((retval = request_irq(dev->irq, au1000_interrupt, 0,
- dev->name, dev))) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
- dev->name, dev->irq);
+ retval = request_irq(dev->irq, au1000_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
return retval;
}
- if ((retval = au1000_init(dev))) {
- printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
+ retval = au1000_init(dev);
+ if (retval) {
+ netdev_err(dev, "error in au1000_init\n");
free_irq(dev->irq, dev);
return retval;
}
@@ -841,8 +845,7 @@ static int au1000_open(struct net_device *dev)
netif_start_queue(dev);
- if (au1000_debug > 4)
- printk("%s: open: Initialization done.\n", dev->name);
+ netif_dbg(aup, drv, dev, "open: Initialization done.\n");
return 0;
}
@@ -852,15 +855,14 @@ static int au1000_close(struct net_device *dev)
unsigned long flags;
struct au1000_private *const aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: close: dev=%p\n", dev->name, dev);
+ netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
if (aup->phy_dev)
phy_stop(aup->phy_dev);
spin_lock_irqsave(&aup->lock, flags);
- reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked (dev);
/* stop the device */
netif_stop_queue(dev);
@@ -884,9 +886,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
db_dest_t *pDB;
int i;
- if (au1000_debug > 5)
- printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
- dev->name, (unsigned)aup, skb->len,
+ netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
+ (unsigned)aup, skb->len,
skb->data, aup->tx_head);
ptxd = aup->tx_dma_ring[aup->tx_head];
@@ -896,9 +897,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
aup->tx_full = 1;
return NETDEV_TX_BUSY;
- }
- else if (buff_stat & TX_T_DONE) {
- update_tx_stats(dev, ptxd->status);
+ } else if (buff_stat & TX_T_DONE) {
+ au1000_update_tx_stats(dev, ptxd->status);
ptxd->len = 0;
}
@@ -910,12 +910,11 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
pDB = aup->tx_db_inuse[aup->tx_head];
skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
if (skb->len < ETH_ZLEN) {
- for (i=skb->len; i<ETH_ZLEN; i++) {
+ for (i = skb->len; i < ETH_ZLEN; i++) {
((char *)pDB->vaddr)[i] = 0;
}
ptxd->len = ETH_ZLEN;
- }
- else
+ } else
ptxd->len = skb->len;
ps->tx_packets++;
@@ -925,7 +924,6 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
au_sync();
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -935,10 +933,10 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
*/
static void au1000_tx_timeout(struct net_device *dev)
{
- printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
- reset_mac(dev);
+ netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
+ au1000_reset_mac(dev);
au1000_init(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -946,8 +944,7 @@ static void au1000_multicast_list(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags);
+ netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
aup->mac->control |= MAC_PROMISCUOUS;
@@ -955,14 +952,14 @@ static void au1000_multicast_list(struct net_device *dev)
netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
aup->mac->control |= MAC_PASS_ALL_MULTI;
aup->mac->control &= ~MAC_PROMISCUOUS;
- printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
+ netdev_info(dev, "Pass all multicast\n");
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[2]; /* Multicast hash filter */
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev)
- set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
(long *)mc_filter);
aup->mac->multi_hash_high = mc_filter[1];
aup->mac->multi_hash_low = mc_filter[0];
@@ -975,9 +972,11 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct au1000_private *aup = netdev_priv(dev);
- if (!netif_running(dev)) return -EINVAL;
+ if (!netif_running(dev))
+ return -EINVAL;
- if (!aup->phy_dev) return -EINVAL; // PHY not controllable
+ if (!aup->phy_dev)
+ return -EINVAL; /* PHY not controllable */
return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
}
@@ -996,7 +995,7 @@ static const struct net_device_ops au1000_netdev_ops = {
static int __devinit au1000_probe(struct platform_device *pdev)
{
- static unsigned version_printed = 0;
+ static unsigned version_printed;
struct au1000_private *aup = NULL;
struct au1000_eth_platform_data *pd;
struct net_device *dev = NULL;
@@ -1007,40 +1006,40 @@ static int __devinit au1000_probe(struct platform_device *pdev)
base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!base) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n");
+ dev_err(&pdev->dev, "failed to retrieve base register\n");
err = -ENODEV;
goto out;
}
macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!macen) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n");
+ dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
err = -ENODEV;
goto out;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n");
+ dev_err(&pdev->dev, "failed to retrieve IRQ\n");
err = -ENODEV;
goto out;
}
if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
- printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n");
+ dev_err(&pdev->dev, "failed to request memory region for base registers\n");
err = -ENXIO;
goto out;
}
if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
- printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n");
+ dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
err = -ENXIO;
goto err_request;
}
dev = alloc_etherdev(sizeof(struct au1000_private));
if (!dev) {
- printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
err = -ENOMEM;
goto err_alloc;
}
@@ -1050,6 +1049,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup = netdev_priv(dev);
spin_lock_init(&aup->lock);
+ aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug);
/* Allocate the data buffers */
/* Snooping works fine with eth on all au1xxx */
@@ -1057,7 +1057,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
(NUM_TX_BUFFS + NUM_RX_BUFFS),
&aup->dma_addr, 0);
if (!aup->vaddr) {
- printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n");
+ dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
goto err_vaddr;
}
@@ -1065,7 +1065,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* aup->mac is the base address of the MAC's registers */
aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
if (!aup->mac) {
- printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n");
+ dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
err = -ENXIO;
goto err_remap1;
}
@@ -1073,7 +1073,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* Setup some variables for quick register address access */
aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
if (!aup->enable) {
- printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n");
+ dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
err = -ENXIO;
goto err_remap2;
}
@@ -1083,14 +1083,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
if (prom_get_ethernet_addr(ethaddr) == 0)
memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
else {
- printk(KERN_INFO "%s: No MAC address found\n",
- dev->name);
+ netdev_info(dev, "No MAC address found\n");
/* Use the hard coded MAC addresses */
}
- setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
+ au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
} else if (pdev->id == 1)
- setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
+ au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
/*
* Assign to the Ethernet ports two consecutive MAC addresses
@@ -1104,7 +1103,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
pd = pdev->dev.platform_data;
if (!pd) {
- printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n");
+ dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n");
aup->phy1_search_mac0 = 1;
} else {
aup->phy_static_config = pd->phy_static_config;
@@ -1116,7 +1115,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
if (aup->phy_busid && aup->phy_busid > 0) {
- printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII"
+ dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII"
"bus not supported yet\n");
err = -ENODEV;
goto err_mdiobus_alloc;
@@ -1124,7 +1123,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->mii_bus = mdiobus_alloc();
if (aup->mii_bus == NULL) {
- printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n");
+ dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
err = -ENOMEM;
goto err_mdiobus_alloc;
}
@@ -1139,7 +1138,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
if (aup->mii_bus->irq == NULL)
goto err_out;
- for(i = 0; i < PHY_MAX_ADDR; ++i)
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
aup->mii_bus->irq[i] = PHY_POLL;
/* if known, set corresponding PHY IRQs */
if (aup->phy_static_config)
@@ -1148,11 +1147,11 @@ static int __devinit au1000_probe(struct platform_device *pdev)
err = mdiobus_register(aup->mii_bus);
if (err) {
- printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n");
+ dev_err(&pdev->dev, "failed to register MDIO bus\n");
goto err_mdiobus_reg;
}
- if (mii_probe(dev) != 0)
+ if (au1000_mii_probe(dev) != 0)
goto err_out;
pDBfree = NULL;
@@ -1168,7 +1167,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->pDBfree = pDBfree;
for (i = 0; i < NUM_RX_DMA; i++) {
- pDB = GetFreeDB(aup);
+ pDB = au1000_GetFreeDB(aup);
if (!pDB) {
goto err_out;
}
@@ -1176,7 +1175,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->rx_db_inuse[i] = pDB;
}
for (i = 0; i < NUM_TX_DMA; i++) {
- pDB = GetFreeDB(aup);
+ pDB = au1000_GetFreeDB(aup);
if (!pDB) {
goto err_out;
}
@@ -1195,17 +1194,16 @@ static int __devinit au1000_probe(struct platform_device *pdev)
* The boot code uses the ethernet controller, so reset it to start
* fresh. au1000_init() expects that the device is in reset state.
*/
- reset_mac(dev);
+ au1000_reset_mac(dev);
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n",
- dev->name);
+ netdev_err(dev, "Cannot register net device, aborting.\n");
goto err_out;
}
- printk("%s: Au1xx0 Ethernet found at 0x%lx, irq %d\n",
- dev->name, (unsigned long)base->start, irq);
+ netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
+ (unsigned long)base->start, irq);
if (version_printed++ == 0)
printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
@@ -1217,15 +1215,15 @@ err_out:
/* here we should have a valid dev plus aup-> register addresses
* so we can reset the mac properly.*/
- reset_mac(dev);
+ au1000_reset_mac(dev);
for (i = 0; i < NUM_RX_DMA; i++) {
if (aup->rx_db_inuse[i])
- ReleaseDB(aup, aup->rx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
}
for (i = 0; i < NUM_TX_DMA; i++) {
if (aup->tx_db_inuse[i])
- ReleaseDB(aup, aup->tx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
}
err_mdiobus_reg:
mdiobus_free(aup->mii_bus);
@@ -1261,11 +1259,11 @@ static int __devexit au1000_remove(struct platform_device *pdev)
for (i = 0; i < NUM_RX_DMA; i++)
if (aup->rx_db_inuse[i])
- ReleaseDB(aup, aup->rx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
for (i = 0; i < NUM_TX_DMA; i++)
if (aup->tx_db_inuse[i])
- ReleaseDB(aup, aup->tx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
dma_free_noncoherent(NULL, MAX_BUF_SIZE *
(NUM_TX_BUFFS + NUM_RX_BUFFS),
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index f9d29a2..d06ec00 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -35,7 +35,7 @@
#define NUM_TX_BUFFS 4
#define MAX_BUF_SIZE 2048
-#define ETH_TX_TIMEOUT HZ/4
+#define ETH_TX_TIMEOUT (HZ/4)
#define MAC_MIN_PKT_SIZE 64
#define MULTICAST_FILTER_LIMIT 64
@@ -125,4 +125,6 @@ struct au1000_private {
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
spinlock_t lock; /* Serialise access to device */
+
+ u32 msg_enable;
};
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b718dc6..55c9958 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -303,7 +303,6 @@ static void ax_block_output(struct net_device *dev, int count,
ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
/* definitions for accessing MII/EEPROM interface */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 69d9f3d..293f9c1 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1014,8 +1014,6 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
out_unlock:
spin_unlock_irqrestore(&bp->lock, flags);
@@ -1681,15 +1679,15 @@ static struct net_device_stats *b44_get_stats(struct net_device *dev)
static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i, num_ents;
num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i == num_ents)
break;
- __b44_cam_write(bp, mclist->dmi_addr, i++ + 1);
+ __b44_cam_write(bp, ha->addr, i++ + 1);
}
return i+1;
}
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 17460ab..faf5add 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -341,11 +341,9 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
}
skb_put(skb, len);
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
priv->stats.rx_packets++;
priv->stats.rx_bytes += len;
- dev->last_rx = jiffies;
netif_receive_skb(skb);
} while (--budget > 0);
@@ -567,7 +565,6 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_bytes += skb->len;
priv->stats.tx_packets++;
- dev->trans_start = jiffies;
ret = NETDEV_TX_OK;
out_unlock:
@@ -605,7 +602,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
static void bcm_enet_set_multicast_list(struct net_device *dev)
{
struct bcm_enet_priv *priv;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 val;
int i;
@@ -633,14 +630,14 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
}
i = 0;
- netdev_for_each_mc_addr(mc_list, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u8 *dmi_addr;
u32 tmp;
if (i == 3)
break;
/* update perfect match registers */
- dmi_addr = mc_list->dmi_addr;
+ dmi_addr = ha->addr;
tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
(dmi_addr[4] << 8) | dmi_addr[5];
enet_writel(priv, tmp, ENET_PML_REG(i + 1));
@@ -960,7 +957,9 @@ static int bcm_enet_open(struct net_device *dev)
/* all set, enable mac and interrupts, start dma engine and
* kick rx dma channel */
wmb();
- enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
+ val = enet_readl(priv, ENET_CTL_REG);
+ val |= ENET_CTL_ENABLE_MASK;
+ enet_writel(priv, val, ENET_CTL_REG);
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
ENETDMA_CHANCFG_REG(priv->rx_chan));
@@ -1647,7 +1646,6 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
- memset(priv, 0, sizeof(*priv));
ret = compute_hw_mtu(priv, dev->mtu);
if (ret)
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 56387b1..373c1a5 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -84,6 +84,8 @@ static inline char *nic_name(struct pci_dev *pdev)
#define FW_VER_LEN 32
+#define BE_MAX_VF 32
+
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -207,7 +209,7 @@ struct be_tx_obj {
/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
struct page *page;
- dma_addr_t bus;
+ DEFINE_DMA_UNMAP_ADDR(bus);
u16 page_offset;
bool last_page_user;
};
@@ -281,8 +283,15 @@ struct be_adapter {
u8 port_type;
u8 transceiver;
u8 generation; /* BladeEngine ASIC generation */
+
+ bool sriov_enabled;
+ u32 vf_if_handle[BE_MAX_VF];
+ u32 vf_pmac_id[BE_MAX_VF];
+ u8 base_eq_id;
};
+#define be_physfn(adapter) (!adapter->pdev->is_virtfn)
+
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d0ef4ac..e79bf8b 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -843,7 +843,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
* Uses mbox
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
+ u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
@@ -860,6 +861,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
+ req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
req->pmac_invalid = pmac_invalid;
@@ -1111,6 +1113,10 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_PROMISCUOUS, sizeof(*req));
+ /* In FW versions X.102.149/X.101.487 and later,
+ * the port setting associated only with the
+ * issuing pci function will take effect
+ */
if (port_num)
req->port1_promiscuous = en;
else
@@ -1157,13 +1163,13 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
req->interface_id = if_id;
if (netdev) {
int i;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
i = 0;
- netdev_for_each_mc_addr(mc, netdev)
- memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
} else {
req->promiscuous = 1;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index cce61f9..763dc19 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -878,7 +878,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, bool pmac_invalid,
- u32 *if_handle, u32 *pmac_id);
+ u32 *if_handle, u32 *pmac_id, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 51e1065..200e985 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -276,8 +276,6 @@ be_get_ethtool_stats(struct net_device *netdev,
data[i] = (et_stats[i].size == sizeof(u64)) ?
*(u64 *)p: *(u32 *)p;
}
-
- return;
}
static void
@@ -466,7 +464,6 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
else
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
- return;
}
static int
@@ -496,7 +493,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
&ddrdma_cmd.dma);
if (!ddrdma_cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory allocation failure \n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
return -ENOMEM;
}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 2d4a4b8..063026d 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -99,6 +99,9 @@
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+/********** SRIOV VF PCICFG OFFSET ********/
+#define SRIOV_VF_PCICFG_OFFSET (4096)
+
/* Flashrom related descriptors */
#define IMAGE_TYPE_FIRMWARE 160
#define IMAGE_TYPE_BOOTCODE 224
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index ec6ace8..058d7f9 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -26,8 +26,11 @@ MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");
static unsigned int rx_frag_size = 2048;
+static unsigned int num_vfs;
module_param(rx_frag_size, uint, S_IRUGO);
+module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -138,12 +141,19 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ /* MAC addr configuration will be done in hardware for VFs
+ * by their corresponding PFs. Just copy to netdev addr here
+ */
+ if (!be_physfn(adapter))
+ goto netdev_addr;
+
status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
if (status)
return status;
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
adapter->if_handle, &adapter->pmac_id);
+netdev_addr:
if (!status)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -386,26 +396,48 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}
+static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
+ bool unmap_single)
+{
+ dma_addr_t dma;
+
+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
+
+ dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
+ if (wrb->frag_len) {
+ if (unmap_single)
+ pci_unmap_single(pdev, dma, wrb->frag_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pdev, dma, wrb->frag_len,
+ PCI_DMA_TODEVICE);
+ }
+}
static int make_tx_wrbs(struct be_adapter *adapter,
struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
{
- u64 busaddr;
- u32 i, copied = 0;
+ dma_addr_t busaddr;
+ int i, copied = 0;
struct pci_dev *pdev = adapter->pdev;
struct sk_buff *first_skb = skb;
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_wrb *wrb;
struct be_eth_hdr_wrb *hdr;
+ bool map_single = false;
+ u16 map_head;
hdr = queue_head_node(txq);
- atomic_add(wrb_cnt, &txq->used);
queue_head_inc(txq);
+ map_head = txq->head;
if (skb->len > skb->data_len) {
- int len = skb->len - skb->data_len;
+ int len = skb_headlen(skb);
busaddr = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, busaddr))
+ goto dma_err;
+ map_single = true;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, len);
be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -419,6 +451,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
busaddr = pci_map_page(pdev, frag->page,
frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, busaddr))
+ goto dma_err;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, frag->size);
be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -438,6 +472,16 @@ static int make_tx_wrbs(struct be_adapter *adapter,
be_dws_cpu_to_le(hdr, sizeof(*hdr));
return copied;
+dma_err:
+ txq->head = map_head;
+ while (copied) {
+ wrb = queue_head_node(txq);
+ unmap_tx_frag(pdev, wrb, map_single);
+ map_single = false;
+ copied -= wrb->frag_len;
+ queue_head_inc(txq);
+ }
+ return 0;
}
static netdev_tx_t be_xmit(struct sk_buff *skb,
@@ -462,6 +506,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
* *BEFORE* ringing the tx doorbell, so that we serialze the
* tx compls of the current transmit which'll wake up the queue
*/
+ atomic_add(wrb_cnt, &txq->used);
if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
txq->len) {
netif_stop_queue(netdev);
@@ -541,6 +586,9 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ if (!be_physfn(adapter))
+ return;
+
adapter->vlan_tag[vid] = 1;
adapter->vlans_added++;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
@@ -551,6 +599,9 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ if (!be_physfn(adapter))
+ return;
+
adapter->vlan_tag[vid] = 0;
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
adapter->vlans_added--;
@@ -588,6 +639,28 @@ done:
return;
}
+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ return -EINVAL;
+
+ status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf],
+ adapter->vf_pmac_id[vf]);
+
+ status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf],
+ &adapter->vf_pmac_id[vf]);
+ if (!status)
+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
+ mac, vf);
+ return status;
+}
+
static void be_rx_rate_update(struct be_adapter *adapter)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -647,7 +720,7 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
BUG_ON(!rx_page_info->page);
if (rx_page_info->last_page_user) {
- pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
+ pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
adapter->big_page_size, PCI_DMA_FROMDEVICE);
rx_page_info->last_page_user = false;
}
@@ -757,7 +830,6 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
done:
be_rx_stats_update(adapter, pktsize, num_rcvd);
- return;
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -791,7 +863,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
- skb->dev = adapter->netdev;
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
@@ -812,8 +883,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
} else {
netif_receive_skb(skb);
}
-
- return;
}
/* Process the RX completion indicated by rxcp when GRO is enabled */
@@ -893,7 +962,6 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
}
be_rx_stats_update(adapter, pkt_size, num_rcvd);
- return;
}
static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -959,7 +1027,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
}
page_offset = page_info->page_offset;
page_info->page = pagep;
- pci_unmap_addr_set(page_info, bus, page_dmaaddr);
+ dma_unmap_addr_set(page_info, bus, page_dmaaddr);
frag_dmaaddr = page_dmaaddr + page_info->page_offset;
rxd = queue_head_node(rxq);
@@ -987,8 +1055,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
/* Let be_worker replenish when memory is available */
adapter->rx_post_starved = true;
}
-
- return;
}
static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
@@ -1012,35 +1078,26 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
struct be_eth_wrb *wrb;
struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
struct sk_buff *sent_skb;
- u64 busaddr;
- u16 cur_index, num_wrbs = 0;
+ u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
+ bool unmap_skb_hdr = true;
- cur_index = txq->tail;
- sent_skb = sent_skbs[cur_index];
+ sent_skb = sent_skbs[txq->tail];
BUG_ON(!sent_skb);
- sent_skbs[cur_index] = NULL;
- wrb = queue_tail_node(txq);
- be_dws_le_to_cpu(wrb, sizeof(*wrb));
- busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
- if (busaddr != 0) {
- pci_unmap_single(adapter->pdev, busaddr,
- wrb->frag_len, PCI_DMA_TODEVICE);
- }
- num_wrbs++;
+ sent_skbs[txq->tail] = NULL;
+
+ /* skip header wrb */
queue_tail_inc(txq);
- while (cur_index != last_index) {
+ do {
cur_index = txq->tail;
wrb = queue_tail_node(txq);
- be_dws_le_to_cpu(wrb, sizeof(*wrb));
- busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
- if (busaddr != 0) {
- pci_unmap_page(adapter->pdev, busaddr,
- wrb->frag_len, PCI_DMA_TODEVICE);
- }
+ unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
+ skb_headlen(sent_skb)));
+ unmap_skb_hdr = false;
+
num_wrbs++;
queue_tail_inc(txq);
- }
+ } while (cur_index != last_index);
atomic_sub(num_wrbs, &txq->used);
@@ -1255,6 +1312,8 @@ static int be_tx_queues_create(struct be_adapter *adapter)
/* Ask BE to create Tx Event queue */
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
+ adapter->base_eq_id = adapter->tx_eq.q.id;
+
/* Alloc TX eth compl queue */
cq = &adapter->tx_obj.cq;
if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
@@ -1382,7 +1441,7 @@ rx_eq_free:
/* There are 8 evt ids per func. Retruns the evt id's bit number */
static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
{
- return eq_id % 8;
+ return eq_id - adapter->base_eq_id;
}
static irqreturn_t be_intx(int irq, void *dev)
@@ -1557,7 +1616,27 @@ static void be_msix_enable(struct be_adapter *adapter)
BE_NUM_MSIX_VECTORS);
if (status == 0)
adapter->msix_enabled = true;
- return;
+}
+
+static void be_sriov_enable(struct be_adapter *adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ int status;
+ if (be_physfn(adapter) && num_vfs) {
+ status = pci_enable_sriov(adapter->pdev, num_vfs);
+ adapter->sriov_enabled = status ? false : true;
+ }
+#endif
+}
+
+static void be_sriov_disable(struct be_adapter *adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ if (adapter->sriov_enabled) {
+ pci_disable_sriov(adapter->pdev);
+ adapter->sriov_enabled = false;
+ }
+#endif
}
static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
@@ -1617,6 +1696,9 @@ static int be_irq_register(struct be_adapter *adapter)
status = be_msix_register(adapter);
if (status == 0)
goto done;
+ /* INTx is not supported for VF */
+ if (!be_physfn(adapter))
+ return status;
}
/* INTx */
@@ -1651,7 +1733,6 @@ static void be_irq_unregister(struct be_adapter *adapter)
be_free_irq(adapter, &adapter->rx_eq);
done:
adapter->isr_registered = false;
- return;
}
static int be_open(struct net_device *netdev)
@@ -1690,14 +1771,17 @@ static int be_open(struct net_device *netdev)
goto ret_sts;
be_link_status_update(adapter, link_up);
- status = be_vid_config(adapter);
+ if (be_physfn(adapter))
+ status = be_vid_config(adapter);
if (status)
goto ret_sts;
- status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
- if (status)
- goto ret_sts;
+ if (be_physfn(adapter)) {
+ status = be_cmd_set_flow_control(adapter,
+ adapter->tx_fc, adapter->rx_fc);
+ if (status)
+ goto ret_sts;
+ }
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
ret_sts:
@@ -1723,7 +1807,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
if (status) {
dev_err(&adapter->pdev->dev,
- "Could not enable Wake-on-lan \n");
+ "Could not enable Wake-on-lan\n");
pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
cmd.dma);
return status;
@@ -1745,22 +1829,48 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- u32 cap_flags, en_flags;
+ u32 cap_flags, en_flags, vf = 0;
int status;
+ u8 mac[ETH_ALEN];
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ if (be_physfn(adapter)) {
+ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ }
status = be_cmd_if_create(adapter, cap_flags, en_flags,
netdev->dev_addr, false/* pmac_invalid */,
- &adapter->if_handle, &adapter->pmac_id);
+ &adapter->if_handle, &adapter->pmac_id, 0);
if (status != 0)
goto do_none;
+ if (be_physfn(adapter)) {
+ while (vf < num_vfs) {
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
+ | BE_IF_FLAGS_BROADCAST;
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ mac, true, &adapter->vf_if_handle[vf],
+ NULL, vf+1);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Interface Create failed for VF %d\n", vf);
+ goto if_destroy;
+ }
+ vf++;
+ } while (vf < num_vfs);
+ } else if (!be_physfn(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ if (!status) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
+ }
+
status = be_tx_queues_create(adapter);
if (status != 0)
goto if_destroy;
@@ -1782,6 +1892,9 @@ rx_qs_destroy:
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_if_handle[vf])
+ be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]);
be_cmd_if_destroy(adapter, adapter->if_handle);
do_none:
return status;
@@ -2061,6 +2174,7 @@ static struct net_device_ops be_netdev_ops = {
.ndo_vlan_rx_register = be_vlan_register,
.ndo_vlan_rx_add_vid = be_vlan_add_vid,
.ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
+ .ndo_set_vf_mac = be_set_vf_mac
};
static void be_netdev_init(struct net_device *netdev)
@@ -2102,37 +2216,48 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
iounmap(adapter->csr);
if (adapter->db)
iounmap(adapter->db);
- if (adapter->pcicfg)
+ if (adapter->pcicfg && be_physfn(adapter))
iounmap(adapter->pcicfg);
}
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- int pcicfg_reg;
+ int pcicfg_reg, db_reg;
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
- pci_resource_len(adapter->pdev, 2));
- if (addr == NULL)
- return -ENOMEM;
- adapter->csr = addr;
-
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
- 128 * 1024);
- if (addr == NULL)
- goto pci_map_err;
- adapter->db = addr;
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
+ pci_resource_len(adapter->pdev, 2));
+ if (addr == NULL)
+ return -ENOMEM;
+ adapter->csr = addr;
+ }
- if (adapter->generation == BE_GEN2)
+ if (adapter->generation == BE_GEN2) {
pcicfg_reg = 1;
- else
+ db_reg = 4;
+ } else {
pcicfg_reg = 0;
-
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
- pci_resource_len(adapter->pdev, pcicfg_reg));
+ if (be_physfn(adapter))
+ db_reg = 4;
+ else
+ db_reg = 0;
+ }
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
+ pci_resource_len(adapter->pdev, db_reg));
if (addr == NULL)
goto pci_map_err;
- adapter->pcicfg = addr;
+ adapter->db = addr;
+
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(
+ pci_resource_start(adapter->pdev, pcicfg_reg),
+ pci_resource_len(adapter->pdev, pcicfg_reg));
+ if (addr == NULL)
+ goto pci_map_err;
+ adapter->pcicfg = addr;
+ } else
+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
return 0;
pci_map_err:
@@ -2246,6 +2371,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_ctrl_cleanup(adapter);
+ be_sriov_disable(adapter);
+
be_msix_disable(adapter);
pci_set_drvdata(pdev, NULL);
@@ -2270,16 +2397,20 @@ static int be_get_config(struct be_adapter *adapter)
return status;
memset(mac, 0, ETH_ALEN);
- status = be_cmd_mac_addr_query(adapter, mac,
+
+ if (be_physfn(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
- if (status)
- return status;
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
+ if (status)
+ return status;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
if (adapter->cap & 0x400)
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
@@ -2296,6 +2427,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
struct be_adapter *adapter;
struct net_device *netdev;
+
status = pci_enable_device(pdev);
if (status)
goto do_none;
@@ -2344,24 +2476,28 @@ static int __devinit be_probe(struct pci_dev *pdev,
}
}
+ be_sriov_enable(adapter);
+
status = be_ctrl_init(adapter);
if (status)
goto free_netdev;
/* sync up with fw's ready state */
- status = be_cmd_POST(adapter);
- if (status)
- goto ctrl_clean;
+ if (be_physfn(adapter)) {
+ status = be_cmd_POST(adapter);
+ if (status)
+ goto ctrl_clean;
+
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto ctrl_clean;
+ }
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
goto ctrl_clean;
- status = be_cmd_reset_function(adapter);
- if (status)
- goto ctrl_clean;
-
status = be_stats_init(adapter);
if (status)
goto ctrl_clean;
@@ -2391,6 +2527,7 @@ ctrl_clean:
be_ctrl_cleanup(adapter);
free_netdev:
be_msix_disable(adapter);
+ be_sriov_disable(adapter);
free_netdev(adapter->netdev);
pci_set_drvdata(pdev, NULL);
rel_reg:
@@ -2474,8 +2611,6 @@ static void be_shutdown(struct pci_dev *pdev)
be_setup_wol(adapter, true);
pci_disable_device(pdev);
-
- return;
}
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
@@ -2557,7 +2692,6 @@ static void be_eeh_resume(struct pci_dev *pdev)
return;
err:
dev_err(&adapter->pdev->dev, "EEH resume failed\n");
- return;
}
static struct pci_error_handlers be_eeh_handlers = {
@@ -2587,6 +2721,13 @@ static int __init be_init_module(void)
rx_frag_size = 2048;
}
+ if (num_vfs > 32) {
+ printk(KERN_WARNING DRV_NAME
+ " : Module param num_vfs must not be greater than 32."
+ "Using 32\n");
+ num_vfs = 32;
+ }
+
return pci_register_driver(&be_driver);
}
module_init(be_init_module);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 587f93c..39a54ba 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -33,6 +33,7 @@
#include <asm/dma.h>
#include <linux/dma-mapping.h>
+#include <asm/div64.h>
#include <asm/dpmc.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
@@ -80,9 +81,6 @@ static u16 pin_req[] = P_RMII0;
static u16 pin_req[] = P_MII0;
#endif
-static void bfin_mac_disable(void);
-static void bfin_mac_enable(void);
-
static void desc_list_free(void)
{
struct net_dma_desc_rx *r;
@@ -202,6 +200,11 @@ static int desc_list_init(void)
goto init_error;
}
skb_reserve(new_skb, NET_IP_ALIGN);
+ /* Invidate the data cache of skb->data range when it is write back
+ * cache. It will prevent overwritting the new data from DMA
+ */
+ blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
+ (unsigned long)new_skb->end);
r->skb = new_skb;
/*
@@ -254,7 +257,7 @@ init_error:
* MII operations
*/
/* Wait until the previous MDC/MDIO transaction has completed */
-static void bfin_mdio_poll(void)
+static int bfin_mdio_poll(void)
{
int timeout_cnt = MAX_TIMEOUT_CNT;
@@ -264,22 +267,30 @@ static void bfin_mdio_poll(void)
if (timeout_cnt-- < 0) {
printk(KERN_ERR DRV_NAME
": wait MDC/MDIO transaction to complete timeout\n");
- break;
+ return -ETIMEDOUT;
}
}
+
+ return 0;
}
/* Read an off-chip register in a PHY through the MDC/MDIO port */
static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
- bfin_mdio_poll();
+ int ret;
+
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
/* read mode */
bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
SET_REGAD((u16) regnum) |
STABUSY);
- bfin_mdio_poll();
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
return (int) bfin_read_EMAC_STADAT();
}
@@ -288,7 +299,11 @@ static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
u16 value)
{
- bfin_mdio_poll();
+ int ret;
+
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
bfin_write_EMAC_STADAT((u32) value);
@@ -298,9 +313,7 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
STAOP |
STABUSY);
- bfin_mdio_poll();
-
- return 0;
+ return bfin_mdio_poll();
}
static int bfin_mdiobus_reset(struct mii_bus *bus)
@@ -458,6 +471,14 @@ static int mii_probe(struct net_device *dev)
* Ethtool support
*/
+/*
+ * interrupt routine for magic packet wakeup
+ */
+static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
static int
bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -492,11 +513,57 @@ static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
strcpy(info->bus_info, dev_name(&dev->dev));
}
+static void bfin_mac_ethtool_getwol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
+ wolinfo->supported = WAKE_MAGIC;
+ wolinfo->wolopts = lp->wol;
+}
+
+static int bfin_mac_ethtool_setwol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ int rc;
+
+ if (wolinfo->wolopts & (WAKE_MAGICSECURE |
+ WAKE_UCAST |
+ WAKE_MCAST |
+ WAKE_BCAST |
+ WAKE_ARP))
+ return -EOPNOTSUPP;
+
+ lp->wol = wolinfo->wolopts;
+
+ if (lp->wol && !lp->irq_wake_requested) {
+ /* register wake irq handler */
+ rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
+ IRQF_DISABLED, "EMAC_WAKE", dev);
+ if (rc)
+ return rc;
+ lp->irq_wake_requested = true;
+ }
+
+ if (!lp->wol && lp->irq_wake_requested) {
+ free_irq(IRQ_MAC_WAKEDET, dev);
+ lp->irq_wake_requested = false;
+ }
+
+ /* Make sure the PHY driver doesn't suspend */
+ device_init_wakeup(&dev->dev, lp->wol);
+
+ return 0;
+}
+
static const struct ethtool_ops bfin_mac_ethtool_ops = {
.get_settings = bfin_mac_ethtool_getsettings,
.set_settings = bfin_mac_ethtool_setsettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
+ .get_wol = bfin_mac_ethtool_getwol,
+ .set_wol = bfin_mac_ethtool_setwol,
};
/**************************************************************************/
@@ -509,10 +576,11 @@ void setup_system_regs(struct net_device *dev)
* Configure checksum support and rcve frame word alignment
*/
sysctl = bfin_read_EMAC_SYSCTL();
+ sysctl |= RXDWA;
#if defined(BFIN_MAC_CSUM_OFFLOAD)
- sysctl |= RXDWA | RXCKS;
+ sysctl |= RXCKS;
#else
- sysctl |= RXDWA;
+ sysctl &= ~RXCKS;
#endif
bfin_write_EMAC_SYSCTL(sysctl);
@@ -551,6 +619,309 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
+#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
+
+static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u16 ptpctl;
+ u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ if ((config.tx_type != HWTSTAMP_TX_OFF) &&
+ (config.tx_type != HWTSTAMP_TX_ON))
+ return -ERANGE;
+
+ ptpctl = bfin_read_EMAC_PTP_CTL();
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /*
+ * Dont allow any timestamping
+ */
+ ptpfv3 = 0xFFFFFFFF;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ /*
+ * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
+ * to enable all the field matches.
+ */
+ ptpctl &= ~0x1F00;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of the EMAC_PTP_FOFF register.
+ */
+ ptpfoff = 0x4A24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
+ * registers.
+ */
+ ptpfv1 = 0x11040800;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * The default value (0xFFFC) allows the timestamping of both
+ * received Sync messages and Delay_Req messages.
+ */
+ ptpfv3 = 0xFFFFFFFC;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Clear all five comparison mask bits (bits[12:8]) in the
+ * EMAC_PTP_CTL register to enable all the field matches.
+ */
+ ptpctl &= ~0x1F00;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of the EMAC_PTP_FOFF register, except set
+ * the PTPCOF field to 0x2A.
+ */
+ ptpfoff = 0x2A24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
+ * registers.
+ */
+ ptpfv1 = 0x11040800;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
+ * the value to 0xFFF0.
+ */
+ ptpfv3 = 0xFFFFFFF0;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ /*
+ * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
+ * EFTM and PTPCM field comparison.
+ */
+ ptpctl &= ~0x1100;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of all the fields of the EMAC_PTP_FOFF
+ * register, except set the PTPCOF field to 0x0E.
+ */
+ ptpfoff = 0x0E24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
+ * corresponds to PTP messages on the MAC layer.
+ */
+ ptpfv1 = 0x110488F7;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * To allow the timestamping of Pdelay_Req and Pdelay_Resp
+ * messages, set the value to 0xFFF0.
+ */
+ ptpfv3 = 0xFFFFFFF0;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.tx_type == HWTSTAMP_TX_OFF &&
+ bfin_mac_hwtstamp_is_none(config.rx_filter)) {
+ ptpctl &= ~PTP_EN;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+
+ SSYNC();
+ } else {
+ ptpctl |= PTP_EN;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+
+ /*
+ * clear any existing timestamp
+ */
+ bfin_read_EMAC_PTP_RXSNAPLO();
+ bfin_read_EMAC_PTP_RXSNAPHI();
+
+ bfin_read_EMAC_PTP_TXSNAPLO();
+ bfin_read_EMAC_PTP_TXSNAPHI();
+
+ /*
+ * Set registers so that rollover occurs soon to test this.
+ */
+ bfin_write_EMAC_PTP_TIMELO(0x00000000);
+ bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
+
+ SSYNC();
+
+ lp->compare.last_update = 0;
+ timecounter_init(&lp->clock,
+ &lp->cycles,
+ ktime_to_ns(ktime_get_real()));
+ timecompare_update(&lp->compare, 0);
+ }
+
+ lp->stamp_cfg = config;
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
+{
+ ktime_t sys = ktime_get_real();
+
+ pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
+ __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
+ sys.tv.nsec, cmp->offset, cmp->skew);
+}
+
+static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ union skb_shared_tx *shtx = skb_tx(skb);
+
+ if (shtx->hardware) {
+ int timeout_cnt = MAX_TIMEOUT_CNT;
+
+ /* When doing time stamping, keep the connection to the socket
+ * a while longer
+ */
+ shtx->in_progress = 1;
+
+ /*
+ * The timestamping is done at the EMAC module's MII/RMII interface
+ * when the module sees the Start of Frame of an event message packet. This
+ * interface is the closest possible place to the physical Ethernet transmission
+ * medium, providing the best timing accuracy.
+ */
+ while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
+ udelay(1);
+ if (timeout_cnt == 0)
+ printk(KERN_ERR DRV_NAME
+ ": fails to timestamp the TX packet\n");
+ else {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 ns;
+ u64 regval;
+
+ regval = bfin_read_EMAC_PTP_TXSNAPLO();
+ regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ns = timecounter_cyc2time(&lp->clock,
+ regval);
+ timecompare_update(&lp->compare, ns);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ shhwtstamps.syststamp =
+ timecompare_transform(&lp->compare, ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
+ }
+ }
+}
+
+static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u32 valid;
+ u64 regval, ns;
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
+ return;
+
+ valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
+ if (!valid)
+ return;
+
+ shhwtstamps = skb_hwtstamps(skb);
+
+ regval = bfin_read_EMAC_PTP_RXSNAPLO();
+ regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
+ ns = timecounter_cyc2time(&lp->clock, regval);
+ timecompare_update(&lp->compare, ns);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
+
+ bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
+}
+
+/*
+ * bfin_read_clock - read raw cycle counter (to be used by time counter)
+ */
+static cycle_t bfin_read_clock(const struct cyclecounter *tc)
+{
+ u64 stamp;
+
+ stamp = bfin_read_EMAC_PTP_TIMELO();
+ stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
+
+ return stamp;
+}
+
+#define PTP_CLK 25000000
+
+static void bfin_mac_hwtstamp_init(struct net_device *netdev)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u64 append;
+
+ /* Initialize hardware timer */
+ append = PTP_CLK * (1ULL << 32);
+ do_div(append, get_sclk());
+ bfin_write_EMAC_PTP_ADDEND((u32)append);
+
+ memset(&lp->cycles, 0, sizeof(lp->cycles));
+ lp->cycles.read = bfin_read_clock;
+ lp->cycles.mask = CLOCKSOURCE_MASK(64);
+ lp->cycles.mult = 1000000000 / PTP_CLK;
+ lp->cycles.shift = 0;
+
+ /* Synchronize our NIC clock against system wall clock */
+ memset(&lp->compare, 0, sizeof(lp->compare));
+ lp->compare.source = &lp->clock;
+ lp->compare.target = ktime_get_real;
+ lp->compare.num_samples = 10;
+
+ /* Initialize hwstamp config */
+ lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+}
+
+#else
+# define bfin_mac_hwtstamp_is_none(cfg) 0
+# define bfin_mac_hwtstamp_init(dev)
+# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
+# define bfin_rx_hwtstamp(dev, skb)
+# define bfin_tx_hwtstamp(dev, skb)
+#endif
+
static void adjust_tx_list(void)
{
int timeout_cnt = MAX_TIMEOUT_CNT;
@@ -608,18 +979,32 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
{
u16 *data;
u32 data_align = (unsigned long)(skb->data) & 0x3;
+ union skb_shared_tx *shtx = skb_tx(skb);
+
current_tx_ptr->skb = skb;
if (data_align == 0x2) {
/* move skb->data to current_tx_ptr payload */
data = (u16 *)(skb->data) - 1;
- *data = (u16)(skb->len);
+ *data = (u16)(skb->len);
+ /*
+ * When transmitting an Ethernet packet, the PTP_TSYNC module requires
+ * a DMA_Length_Word field associated with the packet. The lower 12 bits
+ * of this field are the length of the packet payload in bytes and the higher
+ * 4 bits are the timestamping enable field.
+ */
+ if (shtx->hardware)
+ *data |= 0x1000;
+
current_tx_ptr->desc_a.start_addr = (u32)data;
/* this is important! */
blackfin_dcache_flush_range((u32)data,
(u32)((u8 *)data + skb->len + 4));
} else {
*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
+ /* enable timestamping for the sent packet */
+ if (shtx->hardware)
+ *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
skb->len);
current_tx_ptr->desc_a.start_addr =
@@ -653,20 +1038,42 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
out:
adjust_tx_list();
+
+ bfin_tx_hwtstamp(dev, skb);
+
current_tx_ptr = current_tx_ptr->next;
- dev->trans_start = jiffies;
dev->stats.tx_packets++;
dev->stats.tx_bytes += (skb->len);
return NETDEV_TX_OK;
}
+#define IP_HEADER_OFF 0
+#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
+ RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
+
static void bfin_mac_rx(struct net_device *dev)
{
struct sk_buff *skb, *new_skb;
unsigned short len;
+ struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ unsigned int i;
+ unsigned char fcs[ETH_FCS_LEN + 1];
+#endif
+
+ /* check if frame status word reports an error condition
+ * we which case we simply drop the packet
+ */
+ if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
+ printk(KERN_NOTICE DRV_NAME
+ ": rx: receive error - packet dropped\n");
+ dev->stats.rx_dropped++;
+ goto out;
+ }
/* allocate a new skb for next time receive */
skb = current_rx_ptr->skb;
+
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
printk(KERN_NOTICE DRV_NAME
@@ -676,34 +1083,59 @@ static void bfin_mac_rx(struct net_device *dev)
}
/* reserve 2 bytes for RXDWA padding */
skb_reserve(new_skb, NET_IP_ALIGN);
- current_rx_ptr->skb = new_skb;
- current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
-
/* Invidate the data cache of skb->data range when it is write back
* cache. It will prevent overwritting the new data from DMA
*/
blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
(unsigned long)new_skb->end);
+ current_rx_ptr->skb = new_skb;
+ current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
+
len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
+ /* Deduce Ethernet FCS length from Ethernet payload length */
+ len -= ETH_FCS_LEN;
skb_put(skb, len);
- blackfin_dcache_invalidate_range((unsigned long)skb->head,
- (unsigned long)skb->tail);
skb->protocol = eth_type_trans(skb, dev);
+
+ bfin_rx_hwtstamp(dev, skb);
+
#if defined(BFIN_MAC_CSUM_OFFLOAD)
- skb->csum = current_rx_ptr->status.ip_payload_csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
+ /* Checksum offloading only works for IPv4 packets with the standard IP header
+ * length of 20 bytes, because the blackfin MAC checksum calculation is
+ * based on that assumption. We must NOT use the calculated checksum if our
+ * IP version or header break that assumption.
+ */
+ if (skb->data[IP_HEADER_OFF] == 0x45) {
+ skb->csum = current_rx_ptr->status.ip_payload_csum;
+ /*
+ * Deduce Ethernet FCS from hardware generated IP payload checksum.
+ * IP checksum is based on 16-bit one's complement algorithm.
+ * To deduce a value from checksum is equal to add its inversion.
+ * If the IP payload len is odd, the inversed FCS should also
+ * begin from odd address and leave first byte zero.
+ */
+ if (skb->len % 2) {
+ fcs[0] = 0;
+ for (i = 0; i < ETH_FCS_LEN; i++)
+ fcs[i + 1] = ~skb->data[skb->len + i];
+ skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
+ } else {
+ for (i = 0; i < ETH_FCS_LEN; i++)
+ fcs[i] = ~skb->data[skb->len + i];
+ skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
+ }
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
#endif
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
+out:
current_rx_ptr->status.status_word = 0x00000000;
current_rx_ptr = current_rx_ptr->next;
-
-out:
- return;
}
/* interrupt routine to handle rx and error signal */
@@ -755,8 +1187,9 @@ static void bfin_mac_disable(void)
/*
* Enable Interrupts, Receive, and Transmit
*/
-static void bfin_mac_enable(void)
+static int bfin_mac_enable(void)
{
+ int ret;
u32 opmode;
pr_debug("%s: %s\n", DRV_NAME, __func__);
@@ -766,7 +1199,9 @@ static void bfin_mac_enable(void)
bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
/* Wait MII done */
- bfin_mdio_poll();
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
/* We enable only RX here */
/* ASTP : Enable Automatic Pad Stripping
@@ -790,6 +1225,8 @@ static void bfin_mac_enable(void)
#endif
/* Turn on the EMAC rx */
bfin_write_EMAC_OPMODE(opmode);
+
+ return 0;
}
/* Our watchdog timed out. Called by the networking layer */
@@ -805,21 +1242,21 @@ static void bfin_mac_timeout(struct net_device *dev)
bfin_mac_enable();
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
static void bfin_mac_multicast_hash(struct net_device *dev)
{
u32 emac_hashhi, emac_hashlo;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
emac_hashhi = emac_hashlo = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* skip non-multicast addresses */
if (!(*addrs & 1))
@@ -836,8 +1273,6 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
bfin_write_EMAC_HASHHI(emac_hashhi);
bfin_write_EMAC_HASHLO(emac_hashlo);
-
- return;
}
/*
@@ -853,7 +1288,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
sysctl = bfin_read_EMAC_OPMODE();
- sysctl |= RAF;
+ sysctl |= PR;
bfin_write_EMAC_OPMODE(sysctl);
} else if (dev->flags & IFF_ALLMULTI) {
/* accept all multicast */
@@ -874,6 +1309,16 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
}
}
+static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
/*
* this puts the device in an inactive state
*/
@@ -894,7 +1339,7 @@ static void bfin_mac_shutdown(struct net_device *dev)
static int bfin_mac_open(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
- int retval;
+ int ret;
pr_debug("%s: %s\n", dev->name, __func__);
/*
@@ -908,18 +1353,21 @@ static int bfin_mac_open(struct net_device *dev)
}
/* initial rx and tx list */
- retval = desc_list_init();
-
- if (retval)
- return retval;
+ ret = desc_list_init();
+ if (ret)
+ return ret;
phy_start(lp->phydev);
phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
setup_mac_addr(dev->dev_addr);
+
bfin_mac_disable();
- bfin_mac_enable();
+ ret = bfin_mac_enable();
+ if (ret)
+ return ret;
pr_debug("hardware init finished\n");
+
netif_start_queue(dev);
netif_carrier_on(dev);
@@ -958,6 +1406,7 @@ static const struct net_device_ops bfin_mac_netdev_ops = {
.ndo_set_mac_address = bfin_mac_set_mac_address,
.ndo_tx_timeout = bfin_mac_timeout,
.ndo_set_multicast_list = bfin_mac_set_multicast_list,
+ .ndo_do_ioctl = bfin_mac_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1017,6 +1466,11 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
}
pd = pdev->dev.platform_data;
lp->mii_bus = platform_get_drvdata(pd);
+ if (!lp->mii_bus) {
+ dev_err(&pdev->dev, "Cannot get mii_bus!\n");
+ rc = -ENODEV;
+ goto out_err_mii_bus_probe;
+ }
lp->mii_bus->priv = ndev;
rc = mii_probe(ndev);
@@ -1049,6 +1503,8 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
goto out_err_reg_ndev;
}
+ bfin_mac_hwtstamp_init(ndev);
+
/* now, print out the card info, in a short format.. */
dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
@@ -1060,6 +1516,7 @@ out_err_request_irq:
out_err_mii_probe:
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
+out_err_mii_bus_probe:
peripheral_free_list(pin_req);
out_err_probe_mac:
platform_set_drvdata(pdev, NULL);
@@ -1092,9 +1549,16 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct bfin_mac_local *lp = netdev_priv(net_dev);
- if (netif_running(net_dev))
- bfin_mac_close(net_dev);
+ if (lp->wol) {
+ bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
+ bfin_write_EMAC_WKUP_CTL(MPKE);
+ enable_irq_wake(IRQ_MAC_WAKEDET);
+ } else {
+ if (netif_running(net_dev))
+ bfin_mac_close(net_dev);
+ }
return 0;
}
@@ -1102,9 +1566,16 @@ static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
static int bfin_mac_resume(struct platform_device *pdev)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct bfin_mac_local *lp = netdev_priv(net_dev);
- if (netif_running(net_dev))
- bfin_mac_open(net_dev);
+ if (lp->wol) {
+ bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
+ bfin_write_EMAC_WKUP_CTL(0);
+ disable_irq_wake(IRQ_MAC_WAKEDET);
+ } else {
+ if (netif_running(net_dev))
+ bfin_mac_open(net_dev);
+ }
return 0;
}
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 052b5dc..1ae7b82 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -7,6 +7,12 @@
*
* Licensed under the GPL-2 or later.
*/
+#ifndef _BFIN_MAC_H_
+#define _BFIN_MAC_H_
+
+#include <linux/net_tstamp.h>
+#include <linux/clocksource.h>
+#include <linux/timecompare.h>
#define BFIN_MAC_CSUM_OFFLOAD
@@ -60,6 +66,9 @@ struct bfin_mac_local {
unsigned char Mac[6]; /* MAC address of the board */
spinlock_t lock;
+ int wol; /* Wake On Lan */
+ int irq_wake_requested;
+
/* MII and PHY stuffs */
int old_link; /* used by bf537_adjust_link */
int old_speed;
@@ -67,6 +76,15 @@ struct bfin_mac_local {
struct phy_device *phydev;
struct mii_bus *mii_bus;
+
+#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ struct timecompare compare;
+ struct hwtstamp_config stamp_cfg;
+#endif
};
extern void bfin_get_ether_addr(char *addr);
+
+#endif
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 598b007..39250b2 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -167,7 +167,6 @@ static inline void
dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
{
__asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
- return;
}
static inline unsigned long
@@ -382,8 +381,6 @@ bmac_init_registers(struct net_device *dev)
bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
bmwrite(dev, INTDISABLE, EnableNormal);
-
- return;
}
#if 0
@@ -972,7 +969,7 @@ bmac_remove_multi(struct net_device *dev,
*/
static void bmac_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct bmac_data *bp = netdev_priv(dev);
int num_addrs = netdev_mc_count(dev);
unsigned short rx_cfg;
@@ -1001,8 +998,8 @@ static void bmac_set_multicast(struct net_device *dev)
rx_cfg = bmac_rx_on(dev, 0, 0);
XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
} else {
- netdev_for_each_mc_addr(dmi, dev)
- bmac_addhash(bp, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ bmac_addhash(bp, ha->addr);
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
@@ -1016,7 +1013,7 @@ static void bmac_set_multicast(struct net_device *dev)
static void bmac_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i;
unsigned short rx_cfg;
@@ -1040,8 +1037,8 @@ static void bmac_set_multicast(struct net_device *dev)
for(i = 0; i < 4; i++) hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if(!(*addrs & 1))
continue;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ac90a38..188e356 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,11 +58,11 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.0.9"
-#define DRV_MODULE_RELDATE "April 27, 2010"
+#define DRV_MODULE_VERSION "2.0.15"
+#define DRV_MODULE_RELDATE "May 4, 2010"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
@@ -656,19 +656,11 @@ bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
if (stop_cnic)
bnx2_cnic_stop(bp);
if (netif_running(bp->dev)) {
- int i;
-
bnx2_napi_disable(bp);
netif_tx_disable(bp->dev);
- /* prevent tx timeout */
- for (i = 0; i < bp->dev->num_tx_queues; i++) {
- struct netdev_queue *txq;
-
- txq = netdev_get_tx_queue(bp->dev, i);
- txq->trans_start = jiffies;
- }
}
bnx2_disable_int_sync(bp);
+ netif_carrier_off(bp->dev); /* prevent tx timeout */
}
static void
@@ -677,6 +669,10 @@ bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
netif_tx_wake_all_queues(bp->dev);
+ spin_lock_bh(&bp->phy_lock);
+ if (bp->link_up)
+ netif_carrier_on(bp->dev);
+ spin_unlock_bh(&bp->phy_lock);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
if (start_cnic)
@@ -2672,7 +2668,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
rx_pg->page = page;
- pci_unmap_addr_set(rx_pg, mapping, mapping);
+ dma_unmap_addr_set(rx_pg, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
return 0;
@@ -2687,7 +2683,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
if (!page)
return;
- pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
+ pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
PCI_DMA_FROMDEVICE);
__free_page(page);
@@ -2719,7 +2715,8 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
rx_buf->skb = skb;
- pci_unmap_addr_set(rx_buf, mapping, mapping);
+ rx_buf->desc = (struct l2_fhdr *) skb->data;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2818,7 +2815,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
}
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
tx_buf->skb = NULL;
@@ -2828,7 +2825,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = NEXT_TX_BD(sw_cons);
pci_unmap_page(bp->pdev,
- pci_unmap_addr(
+ dma_unmap_addr(
&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
skb_shinfo(skb)->frags[i].size,
@@ -2910,8 +2907,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
if (prod != cons) {
prod_rx_pg->page = cons_rx_pg->page;
cons_rx_pg->page = NULL;
- pci_unmap_addr_set(prod_rx_pg, mapping,
- pci_unmap_addr(cons_rx_pg, mapping));
+ dma_unmap_addr_set(prod_rx_pg, mapping,
+ dma_unmap_addr(cons_rx_pg, mapping));
prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2935,18 +2932,19 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
prod_rx_buf = &rxr->rx_buf_ring[prod];
pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(cons_rx_buf, mapping),
+ dma_unmap_addr(cons_rx_buf, mapping),
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
rxr->rx_prod_bseq += bp->rx_buf_use_size;
prod_rx_buf->skb = skb;
+ prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
if (cons == prod)
return;
- pci_unmap_addr_set(prod_rx_buf, mapping,
- pci_unmap_addr(cons_rx_buf, mapping));
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3019,7 +3017,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
/* Don't unmap yet. If we're unable to allocate a new
* page, we need to recycle the page and the DMA addr.
*/
- mapping_old = pci_unmap_addr(rx_pg, mapping);
+ mapping_old = dma_unmap_addr(rx_pg, mapping);
if (i == pages - 1)
frag_len -= 4;
@@ -3074,6 +3072,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
struct l2_fhdr *rx_hdr;
int rx_pkt = 0, pg_ring_used = 0;
+ struct pci_dev *pdev = bp->pdev;
hw_cons = bnx2_get_hw_rx_cons(bnapi);
sw_cons = rxr->rx_cons;
@@ -3086,7 +3085,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
while (sw_cons != hw_cons) {
unsigned int len, hdr_len;
u32 status;
- struct sw_bd *rx_buf;
+ struct sw_bd *rx_buf, *next_rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
u16 vtag = 0;
@@ -3097,16 +3096,23 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
skb = rx_buf->skb;
+ prefetchw(skb);
+ if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
+ next_rx_buf =
+ &rxr->rx_buf_ring[
+ RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ prefetch(next_rx_buf->desc);
+ }
rx_buf->skb = NULL;
- dma_addr = pci_unmap_addr(rx_buf, mapping);
+ dma_addr = dma_unmap_addr(rx_buf, mapping);
pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
PCI_DMA_FROMDEVICE);
- rx_hdr = (struct l2_fhdr *) skb->data;
+ rx_hdr = rx_buf->desc;
len = rx_hdr->l2_fhdr_pkt_len;
status = rx_hdr->l2_fhdr_status;
@@ -3207,10 +3213,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
#ifdef BCM_VLAN
if (hw_vlan)
- vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
+ vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&bnapi->napi, skb);
rx_pkt++;
@@ -3548,7 +3554,6 @@ bnx2_set_rx_mode(struct net_device *dev)
}
else {
/* Accept one or more multicast(s). */
- struct dev_mc_list *mclist;
u32 mc_filter[NUM_MC_HASH_REGISTERS];
u32 regidx;
u32 bit;
@@ -3556,8 +3561,8 @@ bnx2_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
- netdev_for_each_mc_addr(mclist, dev) {
- crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & 0xff;
regidx = (bit & 0xe0) >> 5;
bit &= 0x1f;
@@ -5318,7 +5323,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
}
pci_unmap_single(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
+ dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
@@ -5329,7 +5334,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
for (k = 0; k < last; k++, j++) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
pci_unmap_page(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
+ dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[k].size,
PCI_DMA_TODEVICE);
}
@@ -5359,7 +5364,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
continue;
pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
@@ -5765,11 +5770,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
rx_buf = &rxr->rx_buf_ring[rx_start_idx];
rx_skb = rx_buf->skb;
- rx_hdr = (struct l2_fhdr *) rx_skb->data;
+ rx_hdr = rx_buf->desc;
skb_reserve(rx_skb, BNX2_RX_OFFSET);
pci_dma_sync_single_for_cpu(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, PCI_DMA_FROMDEVICE);
if (rx_hdr->l2_fhdr_status &
@@ -6292,14 +6297,23 @@ static void
bnx2_dump_state(struct bnx2 *bp)
{
struct net_device *dev = bp->dev;
+ u32 mcp_p0, mcp_p1;
netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
- netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
+ netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
REG_RD(bp, BNX2_EMAC_TX_STATUS),
+ REG_RD(bp, BNX2_EMAC_RX_STATUS));
+ netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ mcp_p0 = BNX2_MCP_STATE_P0;
+ mcp_p1 = BNX2_MCP_STATE_P1;
+ } else {
+ mcp_p0 = BNX2_MCP_STATE_P0_5708;
+ mcp_p1 = BNX2_MCP_STATE_P1_5708;
+ }
netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
- bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
- bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
+ bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
if (bp->flags & BNX2_FLAG_USING_MSIX)
@@ -6429,7 +6443,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = skb;
- pci_unmap_addr_set(tx_buf, mapping, mapping);
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
txbd = &txr->tx_desc_ring[ring_prod];
@@ -6454,7 +6468,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(bp->pdev, mapping))
goto dma_error;
- pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
+ dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
mapping);
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6491,7 +6505,7 @@ dma_error:
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = NULL;
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
/* unmap remaining mapped pages */
@@ -6499,7 +6513,7 @@ dma_error:
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
- pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
}
@@ -8297,7 +8311,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(dev->dev_addr, bp->mac_addr, 6);
memcpy(dev->perm_addr, bp->mac_addr, 6);
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
dev->features |= NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index cd4b0e4..ddaa3fc 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6347,6 +6347,8 @@ struct l2_fhdr {
#define BNX2_MCP_SCRATCH 0x00160000
#define BNX2_MCP_STATE_P1 0x0016f9c8
#define BNX2_MCP_STATE_P0 0x0016fdc8
+#define BNX2_MCP_STATE_P1_5708 0x001699c8
+#define BNX2_MCP_STATE_P0_5708 0x00169dc8
#define BNX2_SHM_HDR_SIGNATURE BNX2_MCP_SCRATCH
#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK 0xffff0000
@@ -6551,17 +6553,18 @@ struct l2_fhdr {
struct sw_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ struct l2_fhdr *desc;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_pg {
struct page *page;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_tx_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
unsigned short is_gso;
unsigned short nr_frags;
};
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 3c48a7a..8bd2368 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -24,16 +24,25 @@
#define BCM_VLAN 1
#endif
+#define BNX2X_MULTI_QUEUE
+
+#define BNX2X_NEW_NAPI
+
+
+
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
-#define BNX2X_MULTI_QUEUE
-
-#define BNX2X_NEW_NAPI
-
+#ifdef BCM_CNIC
+#define BNX2X_MIN_MSIX_VEC_CNT 3
+#define BNX2X_MSIX_VEC_FP_START 2
+#else
+#define BNX2X_MIN_MSIX_VEC_CNT 2
+#define BNX2X_MSIX_VEC_FP_START 1
+#endif
#include <linux/mdio.h>
#include "bnx2x_reg.h"
@@ -83,7 +92,12 @@ do { \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
##__args); \
-} while (0)
+ } while (0)
+
+#define BNX2X_ERROR(__fmt, __args...) do { \
+ pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
+ } while (0)
+
/* before we have a dev->name use dev_info() */
#define BNX2X_DEV_INFO(__fmt, __args...) \
@@ -155,15 +169,21 @@ do { \
#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
+#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field)
+#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val)
+
#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
/* fast path */
struct sw_rx_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_tx_bd {
@@ -176,7 +196,7 @@ struct sw_tx_bd {
struct sw_rx_page {
struct page *page;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
union db_prod {
@@ -261,7 +281,7 @@ struct bnx2x_eth_q_stats {
u32 hw_csum_err;
};
-#define BNX2X_NUM_Q_STATS 11
+#define BNX2X_NUM_Q_STATS 13
#define Q_STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
@@ -767,7 +787,7 @@ struct bnx2x_eth_stats {
u32 nig_timer_max;
};
-#define BNX2X_NUM_STATS 41
+#define BNX2X_NUM_STATS 43
#define STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
@@ -818,6 +838,12 @@ struct attn_route {
u32 sig[4];
};
+typedef enum {
+ BNX2X_RECOVERY_DONE,
+ BNX2X_RECOVERY_INIT,
+ BNX2X_RECOVERY_WAIT,
+} bnx2x_recovery_state_t;
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
@@ -835,6 +861,9 @@ struct bnx2x {
struct pci_dev *pdev;
atomic_t intr_sem;
+
+ bnx2x_recovery_state_t recovery_state;
+ int is_leader;
#ifdef BCM_CNIC
struct msix_entry msix_table[MAX_CONTEXT+2];
#else
@@ -842,7 +871,6 @@ struct bnx2x {
#endif
#define INT_MODE_INTx 1
#define INT_MODE_MSI 2
-#define INT_MODE_MSIX 3
int tx_ring_size;
@@ -924,8 +952,7 @@ struct bnx2x {
int mrrs;
struct delayed_work sp_task;
- struct work_struct reset_task;
-
+ struct delayed_work reset_task;
struct timer_list timer;
int current_interval;
@@ -961,6 +988,8 @@ struct bnx2x {
u16 rx_quick_cons_trip;
u16 rx_ticks_int;
u16 rx_ticks;
+/* Maximal coalescing timeout in us */
+#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
u32 lin_cnt;
@@ -1075,6 +1104,7 @@ struct bnx2x {
#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
+ char fw_ver[32];
const struct firmware *firmware;
};
@@ -1125,6 +1155,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define LOAD_DIAG 2
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
+#define UNLOAD_RECOVERY 2
/* DMAE command defines */
@@ -1152,7 +1183,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
#define DMAE_LEN32_RD_MAX 0x80
-#define DMAE_LEN32_WR_MAX 0x400
+#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
#define DMAE_COMP_VAL 0xe0d0d0ae
@@ -1294,8 +1325,12 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
-#define MULTI_FLAGS(bp) \
+#define RSS_FLAGS(bp) \
(TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
@@ -1333,6 +1368,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
#endif
+#define BNX2X_VPD_LEN 128
+#define VENDOR_ID_LEN 4
+
/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index 32e79c3..ff70be8 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1594,7 +1594,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
pause_result);
bnx2x_pause_resolve(vars, pause_result);
if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
@@ -1616,7 +1616,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
bnx2x_pause_resolve(vars, pause_result);
- DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
pause_result);
}
}
@@ -1974,7 +1974,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
}
}
- DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x \n",
+ DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
gp_status, vars->phy_link_up, vars->line_speed);
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
" autoneg 0x%x\n",
@@ -3852,7 +3852,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
SPEED_AUTO_NEG) &&
((params->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
- DP(NETIF_MSG_LINK, "Setting 1G clause37 \n");
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
bnx2x_cl45_write(bp, params->port, ext_phy_type,
ext_phy_addr, MDIO_AN_DEVAD,
MDIO_AN_REG_ADV, 0x20);
@@ -4234,14 +4234,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x \n", tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
} else if ((params->req_line_speed ==
SPEED_AUTO_NEG) &&
((params->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
- DP(NETIF_MSG_LINK, "Setting 1G clause37 \n");
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
bnx2x_cl45_write(bp, params->port, ext_phy_type,
ext_phy_addr, MDIO_AN_DEVAD,
MDIO_PMA_REG_8727_MISC_CTRL, 0);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6c042a7..57ff5b3 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
#include "bnx2x_init_ops.h"
#include "bnx2x_dump.h"
-#define DRV_MODULE_VERSION "1.52.1-7"
-#define DRV_MODULE_RELDATE "2010/02/28"
+#define DRV_MODULE_VERSION "1.52.53-1"
+#define DRV_MODULE_RELDATE "2010/18/04"
#define BNX2X_BC_VER 0x040200
#include <linux/firmware.h>
@@ -102,7 +102,8 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
static int int_mode;
module_param(int_mode, int, 0);
-MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
+ "(1 INT#x; 2 MSI)");
static int dropless_fc;
module_param(dropless_fc, int, 0);
@@ -352,13 +353,14 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len)
{
+ int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
int offset = 0;
- while (len > DMAE_LEN32_WR_MAX) {
+ while (len > dmae_wr_max) {
bnx2x_write_dmae(bp, phys_addr + offset,
- addr + offset, DMAE_LEN32_WR_MAX);
- offset += DMAE_LEN32_WR_MAX * 4;
- len -= DMAE_LEN32_WR_MAX;
+ addr + offset, dmae_wr_max);
+ offset += dmae_wr_max * 4;
+ len -= dmae_wr_max;
}
bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
@@ -508,26 +510,31 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
static void bnx2x_fw_dump(struct bnx2x *bp)
{
+ u32 addr;
u32 mark, offset;
__be32 data[9];
int word;
- mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
- mark = ((mark + 0x3) & ~0x3);
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERR("NO MCP - can not dump\n");
+ return;
+ }
+
+ addr = bp->common.shmem_base - 0x0800 + 4;
+ mark = REG_RD(bp, addr);
+ mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
pr_err("begin fw dump (mark 0x%x)\n", mark);
pr_err("");
- for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
+ for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
- data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
- offset + 4*word));
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
- for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
+ for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
for (word = 0; word < 8; word++)
- data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
- offset + 4*word));
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
@@ -546,9 +553,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
/* Indices */
/* Common */
- BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
- " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
- " spq_prod_idx(%u)\n",
+ BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
+ " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
+ " spq_prod_idx(0x%x)\n",
bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
@@ -556,14 +563,14 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
- " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
- " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
+ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
+ " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
+ " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
- BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
- " fp_u_idx(%x) *sb_u_idx(%x)\n",
+ BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
+ " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
fp->rx_sge_prod, fp->last_max_sge,
le16_to_cpu(fp->fp_u_idx),
fp->status_blk->u_status_block.status_block_index);
@@ -573,12 +580,13 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
- " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
+ " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
+ " *tx_cons_sb(0x%x)\n",
i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
- BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
- " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
+ BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
+ " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
fp->status_blk->c_status_block.status_block_index,
fp->tx_db.data.prod);
}
@@ -764,6 +772,40 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
* General service functions
*/
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5)
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ else
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return true;
+
+ DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+ return false;
+}
+
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
u8 storm, u16 index, u8 op, u8 update)
{
@@ -842,7 +884,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* unmap first bd */
DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
- pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -872,8 +914,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
- pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
- BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
+ dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
if (--nbd)
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
@@ -1023,7 +1065,8 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
default:
BNX2X_ERR("unexpected MC reply (%d) "
- "fp->state is %x\n", command, fp->state);
+ "fp[%d] state is %x\n",
+ command, fp->index, fp->state);
break;
}
mb(); /* force bnx2x_wait_ramrod() to see the change */
@@ -1086,7 +1129,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
if (!page)
return;
- pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
+ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT);
@@ -1115,15 +1158,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
if (unlikely(page == NULL))
return -ENOMEM;
- mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_page(&bp->pdev->dev, page, 0,
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
return -ENOMEM;
}
sw_buf->page = page;
- pci_unmap_addr_set(sw_buf, mapping, mapping);
+ dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1143,15 +1186,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
if (unlikely(skb == NULL))
return -ENOMEM;
- mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
return -ENOMEM;
}
rx_buf->skb = skb;
- pci_unmap_addr_set(rx_buf, mapping, mapping);
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1173,13 +1216,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
- pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(cons_rx_buf, mapping),
- RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&bp->pdev->dev,
+ dma_unmap_addr(cons_rx_buf, mapping),
+ RX_COPY_THRESH, DMA_FROM_DEVICE);
prod_rx_buf->skb = cons_rx_buf->skb;
- pci_unmap_addr_set(prod_rx_buf, mapping,
- pci_unmap_addr(cons_rx_buf, mapping));
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
*prod_bd = *cons_bd;
}
@@ -1283,9 +1326,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
/* move empty skb from pool to prod and map it */
prod_rx_buf->skb = fp->tpa_pool[queue].skb;
- mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
+ mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
+ bp->rx_buf_size, DMA_FROM_DEVICE);
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
/* move partial skb from cons to pool (don't unmap yet) */
fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1302,7 +1345,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
-#ifdef __powerpc64__
+#ifdef _ASM_GENERIC_INT_L64_H
DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
#else
DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
@@ -1331,8 +1374,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
max(frag_size, (u32)len_on_bd));
#ifdef BNX2X_STOP_ON_ERROR
- if (pages >
- min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx);
BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
@@ -1361,8 +1403,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
/* Unmap the page as we r going to pass it to the stack */
- pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1389,8 +1432,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
- pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
@@ -1441,12 +1484,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
#ifdef BCM_VLAN
if ((bp->vlgrp != NULL) && is_vlan_cqe &&
(!is_not_hwaccel_vlan_cqe))
- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
- le16_to_cpu(cqe->fast_path_cqe.
- vlan_tag));
+ vlan_gro_receive(&fp->napi, bp->vlgrp,
+ le16_to_cpu(cqe->fast_path_cqe.
+ vlan_tag), skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&fp->napi, skb);
} else {
DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
" - dropping packet!\n");
@@ -1539,7 +1582,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
struct sw_rx_bd *rx_buf = NULL;
struct sk_buff *skb;
union eth_rx_cqe *cqe;
- u8 cqe_fp_flags;
+ u8 cqe_fp_flags, cqe_fp_status_flags;
u16 len, pad;
comp_ring_cons = RCQ_BD(sw_comp_cons);
@@ -1555,6 +1598,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
cqe = &fp->rx_comp_ring[comp_ring_cons];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
" queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
@@ -1573,7 +1617,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
rx_buf = &fp->rx_buf_ring[bd_cons];
skb = rx_buf->skb;
prefetch(skb);
- prefetch((u8 *)skb + 256);
len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
pad = cqe->fast_path_cqe.placement_offset;
@@ -1620,11 +1663,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
}
}
- pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- pad + RX_COPY_THRESH,
- PCI_DMA_FROMDEVICE);
- prefetch(skb);
+ dma_sync_single_for_device(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
prefetch(((char *)(skb)) + 128);
/* is this an error packet? */
@@ -1665,10 +1707,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
} else
if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_reserve(skb, pad);
skb_put(skb, len);
@@ -1684,6 +1726,12 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe_fp_status_flags &
+ ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ skb->rxhash = le32_to_cpu(
+ cqe->fast_path_cqe.rss_hash_result);
+
skb->ip_summed = CHECKSUM_NONE;
if (bp->rx_csum) {
if (likely(BNX2X_RX_CSUM_OK(cqe)))
@@ -1699,11 +1747,11 @@ reuse_rx:
if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
PARSING_FLAGS_VLAN))
- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
- le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
+ vlan_gro_receive(&fp->napi, bp->vlgrp,
+ le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&fp->napi, skb);
next_rx:
@@ -1831,8 +1879,8 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
return IRQ_HANDLED;
}
- if (status)
- DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
status);
return IRQ_HANDLED;
@@ -1900,6 +1948,8 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
int func = BP_FUNC(bp);
u32 hw_lock_control_reg;
+ DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
+
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
DP(NETIF_MSG_HW,
@@ -2254,11 +2304,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
static u8 bnx2x_link_test(struct bnx2x *bp)
{
- u8 rc;
+ u8 rc = 0;
- bnx2x_acquire_phy_lock(bp);
- rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
- bnx2x_release_phy_lock(bp);
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not test link\n");
return rc;
}
@@ -2387,10 +2440,10 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
than zero */
m_fair_vn.vn_credit_delta =
- max((u32)(vn_min_rate * (T_FAIR_COEF /
- (8 * bp->vn_weight_sum))),
- (u32)(bp->cmng.fair_vars.fair_threshold * 2));
- DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
+ max_t(u32, (vn_min_rate * (T_FAIR_COEF /
+ (8 * bp->vn_weight_sum))),
+ (bp->cmng.fair_vars.fair_threshold * 2));
+ DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
m_fair_vn.vn_credit_delta);
}
@@ -2410,6 +2463,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
+ u32 prev_link_status = bp->link_vars.link_status;
/* Make sure that we are synced with the current statistics */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -2442,8 +2496,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- /* indicate link status */
- bnx2x_link_report(bp);
+ /* indicate link status only if link status actually changed */
+ if (prev_link_status != bp->link_vars.link_status)
+ bnx2x_link_report(bp);
if (IS_E1HMF(bp)) {
int port = BP_PORT(bp);
@@ -2560,7 +2615,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
return rc;
}
-static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
static void bnx2x_set_rx_mode(struct net_device *dev);
@@ -2696,12 +2750,6 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
{
struct eth_spe *spe;
- DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
- "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
- (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
- (void *)bp->spq_prod_bd - (void *)bp->spq), command,
- HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
-
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return -EIO;
@@ -2720,8 +2768,8 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
/* CID needs port number to be encoded int it */
spe->hdr.conn_and_cmd_data =
- cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
- HW_CID(bp, cid)));
+ cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+ HW_CID(bp, cid));
spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
if (common)
spe->hdr.type |=
@@ -2732,6 +2780,13 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
bp->spq_left--;
+ DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
+ "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
+ bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+ (u32)(U64_LO(bp->spq_mapping) +
+ (void *)bp->spq_prod_bd - (void *)bp->spq), command,
+ HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
+
bnx2x_sp_prod_update(bp);
spin_unlock_bh(&bp->spq_lock);
return 0;
@@ -2740,12 +2795,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
/* acquire split MCP access lock register */
static int bnx2x_acquire_alr(struct bnx2x *bp)
{
- u32 i, j, val;
+ u32 j, val;
int rc = 0;
might_sleep();
- i = 100;
- for (j = 0; j < i*10; j++) {
+ for (j = 0; j < 1000; j++) {
val = (1UL << 31);
REG_WR(bp, GRCBASE_MCP + 0x9c, val);
val = REG_RD(bp, GRCBASE_MCP + 0x9c);
@@ -2765,9 +2819,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
/* release split MCP access lock register */
static void bnx2x_release_alr(struct bnx2x *bp)
{
- u32 val = 0;
-
- REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+ REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
}
static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
@@ -2823,7 +2875,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
aeu_mask, asserted);
- aeu_mask &= ~(asserted & 0xff);
+ aeu_mask &= ~(asserted & 0x3ff);
DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
REG_WR(bp, aeu_addr, aeu_mask);
@@ -2910,8 +2962,9 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
bp->link_params.ext_phy_config);
/* log the failure */
- netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
- "Please contact Dell Support for assistance.\n");
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
+ " the driver to shutdown the card to prevent permanent"
+ " damage. Please contact OEM Support for assistance\n");
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3104,10 +3157,311 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
}
}
-static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+
+
+#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
+#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
+#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
+#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
+#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
+#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ val &= ~(1 << RESET_DONE_FLAG_SHIFT);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ val |= (1 << 16);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+ return (val & RESET_DONE_FLAG_MASK) ? false : true;
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
+ REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
+ REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+ barrier();
+ mmiowb();
+
+ return val1;
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
+{
+ return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
+}
+
+static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
+}
+
+static inline void _print_next_block(int idx, const char *blk)
+{
+ if (idx)
+ pr_cont(", ");
+ pr_cont("%s", blk);
+}
+
+static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ _print_next_block(par_num++, "BRB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ _print_next_block(par_num++, "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ _print_next_block(par_num++, "SEARCHER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "TSEMI");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ _print_next_block(par_num++, "PBCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+ _print_next_block(par_num++, "QM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "XSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "XSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+ _print_next_block(par_num++, "DOORBELLQ");
+ break;
+ case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+ _print_next_block(par_num++, "VAUX PCI CORE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+ _print_next_block(par_num++, "DEBUG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+ _print_next_block(par_num++, "USDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "USEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+ _print_next_block(par_num++, "UPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "CSDM");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "CSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ _print_next_block(par_num++, "PXP");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ _print_next_block(par_num++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ _print_next_block(par_num++, "CFC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ _print_next_block(par_num++, "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ _print_next_block(par_num++, "MISC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+ _print_next_block(par_num++, "MCP ROM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+ _print_next_block(par_num++, "MCP UMP RX");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+ _print_next_block(par_num++, "MCP UMP TX");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+ _print_next_block(par_num++, "MCP SCPAD");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
+ u32 sig2, u32 sig3)
+{
+ if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
+ (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
+ int par_num = 0;
+ DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
+ "[0]:0x%08x [1]:0x%08x "
+ "[2]:0x%08x [3]:0x%08x\n",
+ sig0 & HW_PRTY_ASSERT_SET_0,
+ sig1 & HW_PRTY_ASSERT_SET_1,
+ sig2 & HW_PRTY_ASSERT_SET_2,
+ sig3 & HW_PRTY_ASSERT_SET_3);
+ printk(KERN_ERR"%s: Parity errors detected in blocks: ",
+ bp->dev->name);
+ par_num = bnx2x_print_blocks_with_parity0(
+ sig0 & HW_PRTY_ASSERT_SET_0, par_num);
+ par_num = bnx2x_print_blocks_with_parity1(
+ sig1 & HW_PRTY_ASSERT_SET_1, par_num);
+ par_num = bnx2x_print_blocks_with_parity2(
+ sig2 & HW_PRTY_ASSERT_SET_2, par_num);
+ par_num = bnx2x_print_blocks_with_parity3(
+ sig3 & HW_PRTY_ASSERT_SET_3, par_num);
+ printk("\n");
+ return true;
+ } else
+ return false;
+}
+
+static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
{
struct attn_route attn;
- struct attn_route group_mask;
+ int port = BP_PORT(bp);
+
+ attn.sig[0] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+ port*4);
+ attn.sig[1] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+ port*4);
+ attn.sig[2] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+ port*4);
+ attn.sig[3] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+ port*4);
+
+ return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
+ attn.sig[3]);
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+ struct attn_route attn, *group_mask;
int port = BP_PORT(bp);
int index;
u32 reg_addr;
@@ -3118,6 +3472,19 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
try to handle this event */
bnx2x_acquire_alr(bp);
+ if (bnx2x_chk_parity_attn(bp)) {
+ bp->recovery_state = BNX2X_RECOVERY_INIT;
+ bnx2x_set_reset_in_progress(bp);
+ schedule_delayed_work(&bp->reset_task, 0);
+ /* Disable HW interrupts */
+ bnx2x_int_disable(bp);
+ bnx2x_release_alr(bp);
+ /* In case of parity errors don't handle attentions so that
+ * other function would "see" parity errors.
+ */
+ return;
+ }
+
attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
@@ -3127,28 +3494,20 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
if (deasserted & (1 << index)) {
- group_mask = bp->attn_group[index];
+ group_mask = &bp->attn_group[index];
DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
- index, group_mask.sig[0], group_mask.sig[1],
- group_mask.sig[2], group_mask.sig[3]);
+ index, group_mask->sig[0], group_mask->sig[1],
+ group_mask->sig[2], group_mask->sig[3]);
bnx2x_attn_int_deasserted3(bp,
- attn.sig[3] & group_mask.sig[3]);
+ attn.sig[3] & group_mask->sig[3]);
bnx2x_attn_int_deasserted1(bp,
- attn.sig[1] & group_mask.sig[1]);
+ attn.sig[1] & group_mask->sig[1]);
bnx2x_attn_int_deasserted2(bp,
- attn.sig[2] & group_mask.sig[2]);
+ attn.sig[2] & group_mask->sig[2]);
bnx2x_attn_int_deasserted0(bp,
- attn.sig[0] & group_mask.sig[0]);
-
- if ((attn.sig[0] & group_mask.sig[0] &
- HW_PRTY_ASSERT_SET_0) ||
- (attn.sig[1] & group_mask.sig[1] &
- HW_PRTY_ASSERT_SET_1) ||
- (attn.sig[2] & group_mask.sig[2] &
- HW_PRTY_ASSERT_SET_2))
- BNX2X_ERR("FATAL HW block parity attention\n");
+ attn.sig[0] & group_mask->sig[0]);
}
}
@@ -3172,7 +3531,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
aeu_mask, deasserted);
- aeu_mask |= (deasserted & 0xff);
+ aeu_mask |= (deasserted & 0x3ff);
DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
REG_WR(bp, reg_addr, aeu_mask);
@@ -3216,7 +3575,6 @@ static void bnx2x_sp_task(struct work_struct *work)
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
u16 status;
-
/* Return here if interrupt is disabled */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
@@ -3227,11 +3585,23 @@ static void bnx2x_sp_task(struct work_struct *work)
/* if (status == 0) */
/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
- DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
+ DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
/* HW attentions */
- if (status & 0x1)
+ if (status & 0x1) {
bnx2x_attn_int(bp);
+ status &= ~0x1;
+ }
+
+ /* CStorm events: STAT_QUERY */
+ if (status & 0x2) {
+ DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
+ status &= ~0x2;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
IGU_INT_NOP, 1);
@@ -3243,7 +3613,6 @@ static void bnx2x_sp_task(struct work_struct *work)
IGU_INT_NOP, 1);
bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
IGU_INT_ENABLE, 1);
-
}
static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3947,7 +4316,6 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
u32 lo;
u32 hi;
} diff;
- u32 nig_timer_max;
if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
bnx2x_bmac_stats_update(bp);
@@ -3978,10 +4346,14 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
pstats->host_port_stats_start = ++pstats->host_port_stats_end;
- nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
- if (nig_timer_max != estats->nig_timer_max) {
- estats->nig_timer_max = nig_timer_max;
- BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
+ if (!BP_NOMCP(bp)) {
+ u32 nig_timer_max =
+ SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+ if (nig_timer_max != estats->nig_timer_max) {
+ estats->nig_timer_max = nig_timer_max;
+ BNX2X_ERR("NIG timer max (%u)\n",
+ estats->nig_timer_max);
+ }
}
return 0;
@@ -4025,21 +4397,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
- " xstorm counter (%d) != stats_counter (%d)\n",
+ " xstorm counter (0x%x) != stats_counter (0x%x)\n",
i, xclient->stats_counter, bp->stats_counter);
return -1;
}
if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
- " tstorm counter (%d) != stats_counter (%d)\n",
+ " tstorm counter (0x%x) != stats_counter (0x%x)\n",
i, tclient->stats_counter, bp->stats_counter);
return -2;
}
if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
- " ustorm counter (%d) != stats_counter (%d)\n",
+ " ustorm counter (0x%x) != stats_counter (0x%x)\n",
i, uclient->stats_counter, bp->stats_counter);
return -4;
}
@@ -4059,6 +4431,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
qstats->total_bytes_received_lo,
le32_to_cpu(tclient->rcv_unicast_bytes.lo));
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
+
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
+
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
+
qstats->valid_bytes_received_hi =
qstats->total_bytes_received_hi;
qstats->valid_bytes_received_lo =
@@ -4307,47 +4694,43 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_drv_stats_update(bp);
if (netif_msg_timer(bp)) {
- struct bnx2x_fastpath *fp0_rx = bp->fp;
- struct bnx2x_fastpath *fp0_tx = bp->fp;
- struct tstorm_per_client_stats *old_tclient =
- &bp->fp->old_tclient;
- struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
- struct net_device_stats *nstats = &bp->dev->stats;
int i;
- netdev_printk(KERN_DEBUG, bp->dev, "\n");
- printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
- " tx pkt (%lx)\n",
- bnx2x_tx_avail(fp0_tx),
- le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
- printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
- " rx pkt (%lx)\n",
- (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
- fp0_rx->rx_comp_cons),
- le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
- printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
- "brb truncate %u\n",
- (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
- qstats->driver_xoff,
+ printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
+ bp->dev->name,
estats->brb_drop_lo, estats->brb_truncate_lo);
- printk(KERN_DEBUG "tstats: checksum_discard %u "
- "packets_too_big_discard %lu no_buff_discard %lu "
- "mac_discard %u mac_filter_discard %u "
- "xxovrflow_discard %u brb_truncate_discard %u "
- "ttl0_discard %u\n",
- le32_to_cpu(old_tclient->checksum_discard),
- bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
- bnx2x_hilo(&qstats->no_buff_discard_hi),
- estats->mac_discard, estats->mac_filter_discard,
- estats->xxoverflow_discard, estats->brb_truncate_discard,
- le32_to_cpu(old_tclient->ttl0_discard));
for_each_queue(bp, i) {
- printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
- bnx2x_fp(bp, i, tx_pkt),
- bnx2x_fp(bp, i, rx_pkt),
- bnx2x_fp(bp, i, rx_calls));
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+ printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
+ " rx pkt(%lu) rx calls(%lu %lu)\n",
+ fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
+ fp->rx_comp_cons),
+ le16_to_cpu(*fp->rx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_received_hi),
+ fp->rx_calls, fp->rx_pkt);
+ }
+
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct netdev_queue *txq =
+ netdev_get_tx_queue(bp->dev, i);
+
+ printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
+ " tx pkt(%lu) tx calls (%lu)"
+ " %s (Xoff events %u)\n",
+ fp->name, bnx2x_tx_avail(fp),
+ le16_to_cpu(*fp->tx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_transmitted_hi),
+ fp->tx_pkt,
+ (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
+ qstats->driver_xoff);
}
}
@@ -4468,6 +4851,9 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state = bp->stats_state;
+ if (unlikely(bp->panic))
+ return;
+
bnx2x_stats_stm[state][event].action(bp);
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
@@ -4940,9 +5326,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
}
if (fp->tpa_state[i] == BNX2X_TPA_START)
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_buf->skb = NULL;
@@ -4978,7 +5364,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->disable_tpa = 1;
break;
}
- pci_unmap_addr_set((struct sw_rx_bd *)
+ dma_unmap_addr_set((struct sw_rx_bd *)
&bp->fp->tpa_pool[i],
mapping, 0);
fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -5072,8 +5458,8 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->rx_bd_prod = ring_prod;
/* must not have more available CQEs than BDs */
- fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
- cqe_ring_prod);
+ fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+ cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
/* Warning!
@@ -5179,8 +5565,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
context->ustorm_st_context.common.flags |=
USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
context->ustorm_st_context.common.sge_buff_size =
- (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
- (u32)0xffff);
+ (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
+ 0xffff);
context->ustorm_st_context.common.sge_page_base_hi =
U64_HI(fp->rx_sge_mapping);
context->ustorm_st_context.common.sge_page_base_lo =
@@ -5369,10 +5755,10 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
u32 offset;
u16 max_agg_size;
- if (is_multi(bp)) {
- tstorm_config.config_flags = MULTI_FLAGS(bp);
+ tstorm_config.config_flags = RSS_FLAGS(bp);
+
+ if (is_multi(bp))
tstorm_config.rss_result_mask = MULTI_MASK;
- }
/* Enable TPA if needed */
if (bp->flags & TPA_ENABLE_FLAG)
@@ -5477,10 +5863,8 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
}
/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
- max_agg_size =
- min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
- SGE_PAGE_SIZE * PAGES_PER_SGE),
- (u32)0xffff);
+ max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
+ SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -5566,7 +5950,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
}
- /* Store it to internal memory */
+ /* Store cmng structures to internal memory */
if (bp->port.pmf)
for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -5658,8 +6042,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
static int bnx2x_gunzip_init(struct bnx2x *bp)
{
- bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
- &bp->gunzip_mapping);
+ bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+ &bp->gunzip_mapping, GFP_KERNEL);
if (bp->gunzip_buf == NULL)
goto gunzip_nomem1;
@@ -5679,12 +6063,13 @@ gunzip_nomem3:
bp->strm = NULL;
gunzip_nomem2:
- pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
- bp->gunzip_mapping);
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
bp->gunzip_buf = NULL;
gunzip_nomem1:
- netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
+ netdev_err(bp->dev, "Cannot allocate firmware buffer for"
+ " un-compression\n");
return -ENOMEM;
}
@@ -5696,8 +6081,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
bp->strm = NULL;
if (bp->gunzip_buf) {
- pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
- bp->gunzip_mapping);
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
bp->gunzip_buf = NULL;
}
}
@@ -5735,8 +6120,9 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
if (bp->gunzip_outlen & 0x3)
- netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
- bp->gunzip_outlen);
+ netdev_err(bp->dev, "Firmware decompression error:"
+ " gunzip_outlen (%d) not aligned\n",
+ bp->gunzip_outlen);
bp->gunzip_outlen >>= 2;
zlib_inflateEnd(bp->strm);
@@ -5962,6 +6348,50 @@ static void enable_blocks_attention(struct bnx2x *bp)
REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
}
+static const struct {
+ u32 addr;
+ u32 mask;
+} bnx2x_parity_mask[] = {
+ {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
+ {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
+ {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
+ {HC_REG_HC_PRTY_MASK, 0xffffffff},
+ {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
+ {QM_REG_QM_PRTY_MASK, 0x0},
+ {DORQ_REG_DORQ_PRTY_MASK, 0x0},
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
+ {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
+ {CDU_REG_CDU_PRTY_MASK, 0x0},
+ {CFC_REG_CFC_PRTY_MASK, 0x0},
+ {DBG_REG_DBG_PRTY_MASK, 0x0},
+ {DMAE_REG_DMAE_PRTY_MASK, 0x0},
+ {BRB1_REG_BRB1_PRTY_MASK, 0x0},
+ {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
+ {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
+ {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
+ {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
+ {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
+ {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
+ {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
+ {USEM_REG_USEM_PRTY_MASK_0, 0x0},
+ {USEM_REG_USEM_PRTY_MASK_1, 0x0},
+ {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
+ {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
+ {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
+ {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
+};
+
+static void enable_blocks_parity(struct bnx2x *bp)
+{
+ int i, mask_arr_len =
+ sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
+
+ for (i = 0; i < mask_arr_len; i++)
+ REG_WR(bp, bnx2x_parity_mask[i].addr,
+ bnx2x_parity_mask[i].mask);
+}
+
static void bnx2x_reset_common(struct bnx2x *bp)
{
@@ -5992,10 +6422,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
{
+ int is_required;
u32 val;
- u8 port;
- u8 is_required = 0;
+ int port;
+
+ if (BP_NOMCP(bp))
+ return;
+ is_required = 0;
val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
SHARED_HW_CFG_FAN_FAILURE_MASK;
@@ -6034,7 +6468,7 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
/* set to active low mode */
val = REG_RD(bp, MISC_REG_SPIO_INT);
val |= ((1 << MISC_REGISTERS_SPIO_5) <<
- MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+ MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
REG_WR(bp, MISC_REG_SPIO_INT, val);
/* enable interrupt to signal the IGU */
@@ -6200,10 +6634,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
REG_WR(bp, SRC_REG_SOFT_RST, 1);
- for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
- REG_WR(bp, i, 0xc0cac01a);
- /* TODO: replace with something meaningful */
- }
+ for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
+ REG_WR(bp, i, random32());
bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
#ifdef BCM_CNIC
REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -6221,7 +6653,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
if (sizeof(union cdu_context) != 1024)
/* we currently assume that a context is 1024 bytes */
- pr_alert("please adjust the size of cdu_context(%ld)\n",
+ dev_alert(&bp->pdev->dev, "please adjust the size "
+ "of cdu_context(%ld)\n",
(long)sizeof(union cdu_context));
bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
@@ -6305,6 +6738,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
enable_blocks_attention(bp);
+ if (CHIP_PARITY_SUPPORTED(bp))
+ enable_blocks_parity(bp);
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
@@ -6323,7 +6758,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
u32 low, high;
u32 val;
- DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
+ DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
@@ -6342,6 +6777,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
#endif
+
bnx2x_init_block(bp, DQ_BLOCK, init_stage);
bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
@@ -6534,7 +6970,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
u32 addr, val;
int i;
- DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
+ DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
/* set MSI reconfigure capability */
addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
@@ -6692,7 +7128,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
#define BNX2X_PCI_FREE(x, y, size) \
do { \
if (x) { \
- pci_free_consistent(bp->pdev, size, x, y); \
+ dma_free_coherent(&bp->pdev->dev, size, x, y); \
x = NULL; \
y = 0; \
} \
@@ -6773,7 +7209,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
#define BNX2X_PCI_ALLOC(x, y, size) \
do { \
- x = pci_alloc_consistent(bp->pdev, size, y); \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
memset(x, 0, size); \
@@ -6906,9 +7342,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
if (skb == NULL)
continue;
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
rx_buf->skb = NULL;
dev_kfree_skb(skb);
@@ -6987,7 +7423,31 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
BNX2X_NUM_QUEUES(bp) + offset);
- if (rc) {
+
+ /*
+ * reconfigure number of tx/rx queues according to available
+ * MSI-X vectors
+ */
+ if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+ /* vectors available for FP */
+ int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
+
+ DP(NETIF_MSG_IFUP,
+ "Trying to use less MSI-X vectors: %d\n", rc);
+
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
+
+ if (rc) {
+ DP(NETIF_MSG_IFUP,
+ "MSI-X is not attainable rc %d\n", rc);
+ return rc;
+ }
+
+ bp->num_queues = min(bp->num_queues, fp_vec);
+
+ DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+ bp->num_queues);
+ } else if (rc) {
DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
return rc;
}
@@ -7028,10 +7488,11 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_QUEUES(bp);
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
- bp->msix_table[0].vector,
- 0, bp->msix_table[offset].vector,
- i - 1, bp->msix_table[offset + i - 1].vector);
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
+ " ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
return 0;
}
@@ -7409,8 +7870,6 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues = 1;
DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
break;
-
- case INT_MODE_MSIX:
default:
/* Set number of queues according to bp->multi_mode value */
bnx2x_set_num_queues_msix(bp);
@@ -7656,6 +8115,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
+ bnx2x_inc_load_cnt(bp);
return 0;
@@ -7843,33 +8303,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
}
}
-/* must be called with rtnl_lock */
-static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
{
int port = BP_PORT(bp);
u32 reset_code = 0;
int i, cnt, rc;
-#ifdef BCM_CNIC
- bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
- bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
-
- /* Set "drop all" */
- bp->rx_mode = BNX2X_RX_MODE_NONE;
- bnx2x_set_storm_rx_mode(bp);
-
- /* Disable HW interrupts, NAPI and Tx */
- bnx2x_netif_stop(bp, 1);
-
- del_timer_sync(&bp->timer);
- SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
- (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* Release IRQs */
- bnx2x_free_irq(bp, false);
-
/* Wait until tx fastpath tasks complete */
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -8010,6 +8449,70 @@ unload_error:
if (!BP_NOMCP(bp))
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+}
+
+static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+
+ if (CHIP_IS_E1(bp)) {
+ int port = BP_PORT(bp);
+ u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ val = REG_RD(bp, addr);
+ val &= ~(0x300);
+ REG_WR(bp, addr, val);
+ } else if (CHIP_IS_E1H(bp)) {
+ val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+ val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+ MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+ }
+}
+
+/* must be called with rtnl_lock */
+static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+{
+ int i;
+
+ if (bp->state == BNX2X_STATE_CLOSED) {
+ /* Interface has been removed - nothing to recover */
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ bp->is_leader = 0;
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+ smp_wmb();
+
+ return -EINVAL;
+ }
+
+#ifdef BCM_CNIC
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+
+ /* Set "drop all" */
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* Disable HW interrupts, NAPI and Tx */
+ bnx2x_netif_stop(bp, 1);
+ netif_carrier_off(bp->dev);
+
+ del_timer_sync(&bp->timer);
+ SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
+ (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp, false);
+
+ /* Cleanup the chip if needed */
+ if (unload_mode != UNLOAD_RECOVERY)
+ bnx2x_chip_cleanup(bp, unload_mode);
+
bp->port.pmf = 0;
/* Free SKBs, SGEs, TPA pool and driver internals */
@@ -8022,19 +8525,448 @@ unload_error:
bp->state = BNX2X_STATE_CLOSED;
- netif_carrier_off(bp->dev);
+ /* The last driver must disable a "close the gate" if there is no
+ * parity attention or "process kill" pending.
+ */
+ if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
+ bnx2x_reset_is_done(bp))
+ bnx2x_disable_close_the_gate(bp);
+
+ /* Reset MCP mail box sequence if there is on going recovery */
+ if (unload_mode == UNLOAD_RECOVERY)
+ bp->fw_seq = 0;
+
+ return 0;
+}
+
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+ u32 val, addr;
+
+ /* Gates #2 and #4a are closed/opened for "not E1" only */
+ if (!CHIP_IS_E1(bp)) {
+ /* #4 */
+ val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
+ REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
+ close ? (val | 0x1) : (val & (~(u32)1)));
+ /* #2 */
+ val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
+ REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
+ close ? (val | 0x1) : (val & (~(u32)1)));
+ }
+
+ /* #3 */
+ addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ val = REG_RD(bp, addr);
+ REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
+
+ DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+ close ? "closing" : "opening");
+ mmiowb();
+}
+
+#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ /* Do some magic... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ *magic_val = val & SHARED_MF_CLP_MAGIC;
+ MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/* Restore the value of the `magic' bit.
+ *
+ * @param pdev Device handle.
+ * @param magic_val Old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+ /* Restore the `magic' bit value... */
+ /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
+ SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ MF_CFG_WR(bp, shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/* Prepares for MCP reset: takes care of CLP configurations.
+ *
+ * @param bp
+ * @param magic_val Old value of 'magic' bit.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ u32 shmem;
+ u32 validity_offset;
+
+ DP(NETIF_MSG_HW, "Starting\n");
+
+ /* Set `magic' bit in order to save MF config */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_prep(bp, magic_val);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Clear validity map flags */
+ if (shmem > 0)
+ REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT 100 /* 100 ms */
+
+/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
+ * depending on the HW type.
+ *
+ * @param bp
+ */
+static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+ /* special handling for emulation and FPGA,
+ wait 10 times longer */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(MCP_ONE_TIMEOUT*10);
+ else
+ msleep(MCP_ONE_TIMEOUT);
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+ u32 shmem, cnt, validity_offset, val;
+ int rc = 0;
+
+ msleep(100);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ if (shmem == 0) {
+ BNX2X_ERR("Shmem 0 return failure\n");
+ rc = -ENOTTY;
+ goto exit_lbl;
+ }
+
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Wait for MCP to come up */
+ for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
+ /* TBD: its best to check validity map of last port.
+ * currently checks on port 0.
+ */
+ val = REG_RD(bp, shmem + validity_offset);
+ DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
+ shmem + validity_offset, val);
+
+ /* check that shared memory is valid. */
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ break;
+
+ bnx2x_mcp_wait_one(bp);
+ }
+
+ DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
+
+ /* Check that shared memory is valid. This indicates that MCP is up. */
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
+ (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
+ BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
+ rc = -ENOTTY;
+ goto exit_lbl;
+ }
+
+exit_lbl:
+ /* Restore the `magic' bit value */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_done(bp, magic_val);
+
+ return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+ REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+ REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
+ mmiowb();
+ }
+}
+
+/*
+ * Reset the whole chip except for:
+ * - PCIE core
+ * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ * one reset bit)
+ * - IGU
+ * - MISC (including AEU)
+ * - GRC
+ * - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
+{
+ u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+
+ not_reset_mask1 =
+ MISC_REGISTERS_RESET_REG_1_RST_HC |
+ MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+ MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+ not_reset_mask2 =
+ MISC_REGISTERS_RESET_REG_2_RST_MDIO |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+ MISC_REGISTERS_RESET_REG_2_RST_GRC |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
+
+ reset_mask1 = 0xffffffff;
+
+ if (CHIP_IS_E1(bp))
+ reset_mask2 = 0xffff;
+ else
+ reset_mask2 = 0x1ffff;
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ reset_mask1 & (~not_reset_mask1));
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ reset_mask2 & (~not_reset_mask2));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
+ mmiowb();
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp)
+{
+ int cnt = 1000;
+ u32 val = 0;
+ u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+
+
+ /* Empty the Tetris buffer, wait for 1s */
+ do {
+ sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+ blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+ port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+ port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+ pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+ if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+ ((port_is_idle_0 & 0x1) == 0x1) &&
+ ((port_is_idle_1 & 0x1) == 0x1) &&
+ (pgl_exp_rom2 == 0xffffffff))
+ break;
+ msleep(1);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
+ " are still"
+ " outstanding read requests after 1s!\n");
+ DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
+ " port_is_idle_0=0x%08x,"
+ " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+ sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+ pgl_exp_rom2);
+ return -EAGAIN;
+ }
+
+ barrier();
+
+ /* Close gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, true);
+
+ /* TBD: Indicate that "process kill" is in progress to MCP */
+
+ /* Clear "unprepared" bit */
+ REG_WR(bp, MISC_REG_UNPREPARED, 0);
+ barrier();
+
+ /* Make sure all is written to the chip before the reset */
+ mmiowb();
+
+ /* Wait for 1ms to empty GLUE and PCI-E core queues,
+ * PSWHST, GRC and PSWRD Tetris buffer.
+ */
+ msleep(1);
+
+ /* Prepare to chip reset: */
+ /* MCP */
+ bnx2x_reset_mcp_prep(bp, &val);
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+ barrier();
+
+ /* reset the chip */
+ bnx2x_process_kill_chip_reset(bp);
+ barrier();
+
+ /* Recover after reset: */
+ /* MCP */
+ if (bnx2x_reset_mcp_comp(bp, val))
+ return -EAGAIN;
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+
+ /* Open the gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, false);
+
+ /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+ * reset state, re-enable attentions. */
return 0;
}
+static int bnx2x_leader_reset(struct bnx2x *bp)
+{
+ int rc = 0;
+ /* Try to recover after the failure */
+ if (bnx2x_process_kill(bp)) {
+ printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
+ bp->dev->name);
+ rc = -EAGAIN;
+ goto exit_leader_reset;
+ }
+
+ /* Clear "reset is in progress" bit and update the driver state */
+ bnx2x_set_reset_done(bp);
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+
+exit_leader_reset:
+ bp->is_leader = 0;
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+ smp_wmb();
+ return rc;
+}
+
+static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+/* Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_reset_task() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+ DP(NETIF_MSG_HW, "Handling parity\n");
+ while (1) {
+ switch (bp->recovery_state) {
+ case BNX2X_RECOVERY_INIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+ /* Try to get a LEADER_LOCK HW lock */
+ if (bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08))
+ bp->is_leader = 1;
+
+ /* Stop the driver */
+ /* If interface has been removed - break */
+ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+ return;
+
+ bp->recovery_state = BNX2X_RECOVERY_WAIT;
+ /* Ensure "is_leader" and "recovery_state"
+ * update values are seen on other CPUs
+ */
+ smp_wmb();
+ break;
+
+ case BNX2X_RECOVERY_WAIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+ if (bp->is_leader) {
+ u32 load_counter = bnx2x_get_load_cnt(bp);
+ if (load_counter) {
+ /* Wait until all other functions get
+ * down.
+ */
+ schedule_delayed_work(&bp->reset_task,
+ HZ/10);
+ return;
+ } else {
+ /* If all other functions got down -
+ * try to bring the chip back to
+ * normal. In any case it's an exit
+ * point for a leader.
+ */
+ if (bnx2x_leader_reset(bp) ||
+ bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ printk(KERN_ERR"%s: Recovery "
+ "has failed. Power cycle is "
+ "needed.\n", bp->dev->name);
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+ /* Block ifup for all function
+ * of this ASIC until
+ * "process kill" or power
+ * cycle.
+ */
+ bnx2x_set_reset_in_progress(bp);
+ /* Shut down the power */
+ bnx2x_set_power_state(bp,
+ PCI_D3hot);
+ return;
+ }
+
+ return;
+ }
+ } else { /* non-leader */
+ if (!bnx2x_reset_is_done(bp)) {
+ /* Try to get a LEADER_LOCK HW lock as
+ * long as a former leader may have
+ * been unloaded by the user or
+ * released a leadership by another
+ * reason.
+ */
+ if (bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08)) {
+ /* I'm a leader now! Restart a
+ * switch case.
+ */
+ bp->is_leader = 1;
+ break;
+ }
+
+ schedule_delayed_work(&bp->reset_task,
+ HZ/10);
+ return;
+
+ } else { /* A leader has completed
+ * the "process kill". It's an exit
+ * point for a non-leader.
+ */
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+ bp->recovery_state =
+ BNX2X_RECOVERY_DONE;
+ smp_wmb();
+ return;
+ }
+ }
+ default:
+ return;
+ }
+ }
+}
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
static void bnx2x_reset_task(struct work_struct *work)
{
- struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
+ struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
#ifdef BNX2X_STOP_ON_ERROR
BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
" so reset not done to allow debug dump,\n"
- " you will need to reboot when done\n");
+ KERN_ERR " you will need to reboot when done\n");
return;
#endif
@@ -8043,8 +8975,12 @@ static void bnx2x_reset_task(struct work_struct *work)
if (!netif_running(bp->dev))
goto reset_task_exit;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
+ bnx2x_parity_recover(bp);
+ else {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+ }
reset_task_exit:
rtnl_unlock();
@@ -8264,7 +9200,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
- BNX2X_ERR("BAD MCP validity signature\n");
+ BNX2X_ERROR("BAD MCP validity signature\n");
bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -8288,8 +9224,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
if (val < BNX2X_BC_VER) {
/* for now only warn
* later we might need to enforce this */
- BNX2X_ERR("This driver needs bc_ver %X but found %X,"
- " please upgrade BC\n", BNX2X_BC_VER, val);
+ BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
+ "please upgrade BC\n", BNX2X_BC_VER, val);
}
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
@@ -8310,7 +9246,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
- pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
+ dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+ val, val2, val3, val4);
}
static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8588,11 +9525,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8604,11 +9541,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8619,11 +9556,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8635,11 +9572,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_100baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8650,11 +9587,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_1000baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8665,11 +9602,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_2500baseX_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8682,19 +9619,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
default:
- BNX2X_ERR("NVRAM config error. "
- "BAD link speed link_config 0x%x\n",
- bp->port.link_config);
+ BNX2X_ERROR("NVRAM config error. "
+ "BAD link speed link_config 0x%x\n",
+ bp->port.link_config);
bp->link_params.req_line_speed = SPEED_AUTO_NEG;
bp->port.advertising = bp->port.supported;
break;
@@ -8823,7 +9760,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->e1hov = 0;
bp->e1hmf = 0;
- if (CHIP_IS_E1H(bp)) {
+ if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
bp->mf_config =
SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
@@ -8844,14 +9781,14 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
"(0x%04x)\n",
func, bp->e1hov, bp->e1hov);
} else {
- BNX2X_ERR("!!! No valid E1HOV for func %d,"
- " aborting\n", func);
+ BNX2X_ERROR("No valid E1HOV for func %d,"
+ " aborting\n", func);
rc = -EPERM;
}
} else {
if (BP_E1HVN(bp)) {
- BNX2X_ERR("!!! VN %d in single function mode,"
- " aborting\n", BP_E1HVN(bp));
+ BNX2X_ERROR("VN %d in single function mode,"
+ " aborting\n", BP_E1HVN(bp));
rc = -EPERM;
}
}
@@ -8887,7 +9824,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
if (BP_NOMCP(bp)) {
/* only supposed to happen on emulation/FPGA */
- BNX2X_ERR("warning random MAC workaround active\n");
+ BNX2X_ERROR("warning: random MAC workaround active\n");
random_ether_addr(bp->dev->dev_addr);
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
}
@@ -8895,6 +9832,70 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
return rc;
}
+static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+ int cnt, i, block_end, rodi;
+ char vpd_data[BNX2X_VPD_LEN+1];
+ char str_id_reg[VENDOR_ID_LEN+1];
+ char str_id_cap[VENDOR_ID_LEN+1];
+ u8 len;
+
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+ if (cnt < BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
+
+
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(&vpd_data[i]);
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+
+ if (block_end > BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (rodi < 0)
+ goto out_not_found;
+
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ if (len != VENDOR_ID_LEN)
+ goto out_not_found;
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ /* vendor specific info */
+ snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+ if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+ !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (rodi >= 0) {
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], len);
+ bp->fw_ver[len] = ' ';
+ }
+ }
+ return;
+ }
+out_not_found:
+ return;
+}
+
static int __devinit bnx2x_init_bp(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
@@ -8912,29 +9913,34 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
#endif
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
- INIT_WORK(&bp->reset_task, bnx2x_reset_task);
+ INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
rc = bnx2x_get_hwinfo(bp);
+ bnx2x_read_fwinfo(bp);
/* need to reset chip if undi was active */
if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp);
if (CHIP_REV_IS_FPGA(bp))
- pr_err("FPGA detected\n");
+ dev_err(&bp->pdev->dev, "FPGA detected\n");
if (BP_NOMCP(bp) && (func == 0))
- pr_err("MCP disabled, must load devices in order!\n");
+ dev_err(&bp->pdev->dev, "MCP disabled, "
+ "must load devices in order!\n");
/* Set multi queue mode */
if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
- pr_err("Multi disabled since int_mode requested is not MSI-X\n");
+ dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
+ "requested is not MSI-X\n");
multi_mode = ETH_RSS_MODE_DISABLED;
}
bp->multi_mode = multi_mode;
+ bp->dev->features |= NETIF_F_GRO;
+
/* Set TPA flags */
if (disable_tpa) {
bp->flags &= ~TPA_ENABLE_FLAG;
@@ -9304,11 +10310,13 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_release_phy_lock(bp);
}
- snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
+ strncpy(info->fw_version, bp->fw_ver, 32);
+ snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
- ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
strcpy(info->bus_info, pci_name(bp->pdev));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
@@ -9842,19 +10850,18 @@ static int bnx2x_get_coalesce(struct net_device *dev,
return 0;
}
-#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
static int bnx2x_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct bnx2x *bp = netdev_priv(dev);
- bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
- if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
- bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
+ bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+ if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
- bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
- if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
- bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
+ bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+ if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
if (netif_running(dev))
bnx2x_update_coalesce(bp);
@@ -9885,6 +10892,11 @@ static int bnx2x_set_ringparam(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
if ((ering->rx_pending > MAX_RX_AVAIL) ||
(ering->tx_pending > MAX_TX_AVAIL) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4))
@@ -9970,6 +10982,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
int changed = 0;
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
/* TPA requires Rx CSUM offloading */
if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
if (!disable_tpa) {
@@ -9986,6 +11003,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
changed = 1;
}
+ if (data & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+
if (changed && netif_running(dev)) {
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
rc = bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -10006,6 +11028,11 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
bp->rx_csum = data;
/* Disable TPA, when Rx CSUM is disabled. Otherwise all
@@ -10050,9 +11077,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
u32 wr_val = 0;
int port = BP_PORT(bp);
static const struct {
- u32 offset0;
- u32 offset1;
- u32 mask;
+ u32 offset0;
+ u32 offset1;
+ u32 mask;
} reg_tbl[] = {
/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
{ DORQ_REG_DB_ADDR0, 4, 0xffffffff },
@@ -10119,15 +11146,19 @@ static int bnx2x_test_registers(struct bnx2x *bp)
save_val = REG_RD(bp, offset);
- REG_WR(bp, offset, wr_val);
+ REG_WR(bp, offset, (wr_val & mask));
val = REG_RD(bp, offset);
/* Restore the original register's value */
REG_WR(bp, offset, save_val);
- /* verify that value is as expected value */
- if ((val & mask) != (wr_val & mask))
+ /* verify value is as expected */
+ if ((val & mask) != (wr_val & mask)) {
+ DP(NETIF_MSG_PROBE,
+ "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+ offset, val, wr_val, mask);
goto test_reg_exit;
+ }
}
}
@@ -10267,8 +11298,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
bd_prod = TX_BD(fp_tx->tx_bd_prod);
tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
- mapping = pci_map_single(bp->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -10344,6 +11375,9 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
{
int rc = 0, res;
+ if (BP_NOMCP(bp))
+ return rc;
+
if (!netif_running(bp->dev))
return BNX2X_LOOPBACK_FAILED;
@@ -10391,6 +11425,9 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
int i, rc;
u32 magic, crc;
+ if (BP_NOMCP(bp))
+ return 0;
+
rc = bnx2x_nvram_read(bp, 0, data, 4);
if (rc) {
DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
@@ -10468,6 +11505,12 @@ static void bnx2x_self_test(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
if (!netif_running(dev))
@@ -10556,7 +11599,11 @@ static const struct {
/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%d]: tx_packets" }
+ 8, "[%d]: tx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, "[%d]: tx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, "[%d]: tx_bcast_packets" }
};
static const struct {
@@ -10618,16 +11665,20 @@ static const struct {
{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8, STATS_FLAGS_PORT, "tx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_packets" },
+ 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8, STATS_FLAGS_PORT, "tx_mac_errors" },
{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8, STATS_FLAGS_PORT, "tx_carrier_errors" },
- { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_single_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_multi_collisions" },
-/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+ { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8, STATS_FLAGS_PORT, "tx_deferred" },
{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8, STATS_FLAGS_PORT, "tx_excess_collisions" },
@@ -10643,11 +11694,11 @@ static const struct {
8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
- { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
-/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
+ { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
{ STATS_OFFSET32(pause_frames_sent_hi),
8, STATS_FLAGS_PORT, "tx_pause_frames" }
@@ -10664,7 +11715,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
struct bnx2x *bp = netdev_priv(dev);
int i, num_stats;
- switch(stringset) {
+ switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
@@ -10893,6 +11944,14 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
break;
case PCI_D3hot:
+ /* If there are other clients above don't
+ shut down the power */
+ if (atomic_read(&bp->pdev->enable_cnt) != 1)
+ return 0;
+ /* Don't shut down the power for emulation and FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ return 0;
+
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= 3;
@@ -11182,6 +12241,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
u8 hlen = 0;
__le16 pkt_size = 0;
+ struct ethhdr *eth;
+ u8 mac_type = UNICAST_ADDRESS;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -11205,6 +12266,16 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+ eth = (struct ethhdr *)skb->data;
+
+ /* set flag according to packet type (UNICAST_ADDRESS is default)*/
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (is_broadcast_ether_addr(eth->h_dest))
+ mac_type = BROADCAST_ADDRESS;
+ else
+ mac_type = MULTICAST_ADDRESS;
+ }
+
#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
/* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K
@@ -11238,8 +12309,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- tx_start_bd->general_data = (UNICAST_ADDRESS <<
- ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+ tx_start_bd->general_data = (mac_type <<
+ ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
/* header nbd */
tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
@@ -11314,8 +12385,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- mapping = pci_map_single(bp->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11372,8 +12443,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (total_pkt_bd == NULL)
total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
- mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ mapping = dma_map_page(&bp->pdev->dev, frag->page,
+ frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11452,6 +12524,40 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D0);
+ if (!bnx2x_reset_is_done(bp)) {
+ do {
+ /* Reset MCP mail box sequence if there is on going
+ * recovery
+ */
+ bp->fw_seq = 0;
+
+ /* If it's the first function to load and reset done
+ * is still not cleared it may mean that. We don't
+ * check the attention state here because it may have
+ * already been cleared by a "common" reset but we
+ * shell proceed with "process kill" anyway.
+ */
+ if ((bnx2x_get_load_cnt(bp) == 0) &&
+ bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08) &&
+ (!bnx2x_leader_reset(bp))) {
+ DP(NETIF_MSG_HW, "Recovered in open\n");
+ break;
+ }
+
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ printk(KERN_ERR"%s: Recovery flow hasn't been properly"
+ " completed yet. Try again later. If u still see this"
+ " message after a few retries then power cycle is"
+ " required.\n", bp->dev->name);
+
+ return -EAGAIN;
+ } while (0);
+ }
+
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+
return bnx2x_nic_load(bp, LOAD_OPEN);
}
@@ -11462,9 +12568,7 @@ static int bnx2x_close(struct net_device *dev)
/* Unload the driver, release IRQs */
bnx2x_nic_unload(bp, UNLOAD_CLOSE);
- if (atomic_read(&bp->pdev->enable_cnt) == 1)
- if (!CHIP_REV_IS_SLOW(bp))
- bnx2x_set_power_state(bp, PCI_D3hot);
+ bnx2x_set_power_state(bp, PCI_D3hot);
return 0;
}
@@ -11494,21 +12598,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
else { /* some multicasts */
if (CHIP_IS_E1(bp)) {
int i, old, offset;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct mac_configuration_cmd *config =
bnx2x_sp(bp, mcast_config);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
config->config_table[i].
cam_entry.msb_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[0]);
+ swab16(*(u16 *)&ha->addr[0]);
config->config_table[i].
cam_entry.middle_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[2]);
+ swab16(*(u16 *)&ha->addr[2]);
config->config_table[i].
cam_entry.lsb_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[4]);
+ swab16(*(u16 *)&ha->addr[4]);
config->config_table[i].cam_entry.flags =
cpu_to_le16(port);
config->config_table[i].
@@ -11562,18 +12666,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
0);
} else { /* E1H */
/* Accept one or more multicasts */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[MC_HASH_SIZE];
u32 crc, bit, regidx;
int i;
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
- mclist->dmi_addr);
+ ha->addr);
- crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
+ crc = crc32c_le(0, ha->addr, ETH_ALEN);
bit = (crc >> 24) & 0xff;
regidx = bit >> 5;
bit &= 0x1f;
@@ -11690,6 +12794,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
return -EINVAL;
@@ -11717,7 +12826,7 @@ static void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic();
#endif
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_work(&bp->reset_task);
+ schedule_delayed_work(&bp->reset_task, 0);
}
#ifdef BCM_VLAN
@@ -11789,18 +12898,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
rc = pci_enable_device(pdev);
if (rc) {
- pr_err("Cannot enable PCI device, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot enable PCI device, aborting\n");
goto err_out;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- pr_err("Cannot find PCI device base address, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- pr_err("Cannot find second PCI device base address, aborting\n");
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device"
+ " base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11808,7 +12920,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (atomic_read(&pdev->enable_cnt) == 1) {
rc = pci_request_regions(pdev, DRV_MODULE_NAME);
if (rc) {
- pr_err("Cannot obtain PCI resources, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot obtain PCI resources, aborting\n");
goto err_out_disable;
}
@@ -11818,28 +12931,32 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
- pr_err("Cannot find power management capability, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (bp->pcie_cap == 0) {
- pr_err("Cannot find PCI Express capability, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI Express capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG;
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
- pr_err("pci_set_consistent_dma_mask failed, aborting\n");
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
+ dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
+ " failed, aborting\n");
rc = -EIO;
goto err_out_release;
}
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- pr_err("System does not support DMA, aborting\n");
+ } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&bp->pdev->dev,
+ "System does not support DMA, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -11852,7 +12969,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->regview = pci_ioremap_bar(pdev, 0);
if (!bp->regview) {
- pr_err("Cannot map register space, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot map register space, aborting\n");
rc = -ENOMEM;
goto err_out_release;
}
@@ -11861,7 +12979,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
min_t(u64, BNX2X_DB_SIZE,
pci_resource_len(pdev, 2)));
if (!bp->doorbells) {
- pr_err("Cannot map doorbell space, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
goto err_out_unmap;
}
@@ -11876,6 +12995,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
+ /* Reset the load counter */
+ bnx2x_clear_load_cnt(bp);
+
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11963,7 +13085,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
offset = be32_to_cpu(sections[i].offset);
len = be32_to_cpu(sections[i].len);
if (offset + len > firmware->size) {
- pr_err("Section %d length is out of bounds\n", i);
+ dev_err(&bp->pdev->dev,
+ "Section %d length is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11975,7 +13098,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
if (be16_to_cpu(ops_offsets[i]) > num_ops) {
- pr_err("Section offset %d is out of bounds\n", i);
+ dev_err(&bp->pdev->dev,
+ "Section offset %d is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11987,7 +13111,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
(fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
(fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
(fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
- pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+ dev_err(&bp->pdev->dev,
+ "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
fw_ver[0], fw_ver[1], fw_ver[2],
fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
@@ -12022,8 +13147,8 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
for (i = 0, j = 0; i < n/8; i++, j += 2) {
tmp = be32_to_cpu(source[j]);
target[i].op = (tmp >> 24) & 0xff;
- target[i].offset = tmp & 0xffffff;
- target[i].raw_data = be32_to_cpu(source[j+1]);
+ target[i].offset = tmp & 0xffffff;
+ target[i].raw_data = be32_to_cpu(source[j + 1]);
}
}
@@ -12057,20 +13182,24 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
if (CHIP_IS_E1(bp))
fw_file_name = FW_FILE_NAME_E1;
- else
+ else if (CHIP_IS_E1H(bp))
fw_file_name = FW_FILE_NAME_E1H;
+ else {
+ dev_err(dev, "Unsupported chip revision\n");
+ return -EINVAL;
+ }
- pr_info("Loading %s\n", fw_file_name);
+ dev_info(dev, "Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, dev);
if (rc) {
- pr_err("Can't load firmware file %s\n", fw_file_name);
+ dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
rc = bnx2x_check_firmware(bp);
if (rc) {
- pr_err("Corrupt firmware file %s\n", fw_file_name);
+ dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
@@ -12129,7 +13258,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
if (!dev) {
- pr_err("Cannot allocate net device\n");
+ dev_err(&pdev->dev, "Cannot allocate net device\n");
return -ENOMEM;
}
@@ -12151,7 +13280,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* Set init arrays */
rc = bnx2x_init_firmware(bp, &pdev->dev);
if (rc) {
- pr_err("Error loading firmware\n");
+ dev_err(&pdev->dev, "Error loading firmware\n");
goto init_one_exit;
}
@@ -12162,11 +13291,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
}
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
- board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
- dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
+ " IRQ %d, ", board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
+ dev->base_addr, bp->pdev->irq);
+ pr_cont("node addr %pM\n", dev->dev_addr);
return 0;
@@ -12194,13 +13324,16 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
struct bnx2x *bp;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return;
}
bp = netdev_priv(dev);
unregister_netdev(dev);
+ /* Make sure RESET task is not scheduled before continuing */
+ cancel_delayed_work_sync(&bp->reset_task);
+
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
kfree(bp->init_data);
@@ -12227,7 +13360,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
struct bnx2x *bp;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
@@ -12259,11 +13392,16 @@ static int bnx2x_resume(struct pci_dev *pdev)
int rc;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
rtnl_lock();
pci_restore_state(pdev);
@@ -12292,6 +13430,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bp->rx_mode = BNX2X_RX_MODE_NONE;
bnx2x_netif_stop(bp, 0);
+ netif_carrier_off(bp->dev);
del_timer_sync(&bp->timer);
bp->stats_state = STATS_STATE_DISABLED;
@@ -12318,8 +13457,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bp->state = BNX2X_STATE_CLOSED;
- netif_carrier_off(bp->dev);
-
return 0;
}
@@ -12430,6 +13567,11 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return;
+ }
+
rtnl_lock();
bnx2x_eeh_recover(bp);
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 944964e..a1f3bf0 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -766,6 +766,8 @@
#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
#define MCP_REG_MCPR_NVM_WRITE 0x86408
#define MCP_REG_MCPR_SCRATCH 0xa0000
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
/* [R 32] read first 32 bit after inversion of function 0. mapped as
follows: [0] NIG attention for function0; [1] NIG attention for
function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
@@ -1249,6 +1251,8 @@
#define MISC_REG_E1HMF_MODE 0xa5f8
/* [RW 32] Debug only: spare RW register reset by core reset */
#define MISC_REG_GENERIC_CR_0 0xa460
+/* [RW 32] Debug only: spare RW register reset by por reset */
+#define MISC_REG_GENERIC_POR_1 0xa474
/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
these bits is written as a '1'; the corresponding SPIO bit will turn off
it's drivers and become an input. This is the reset state of all GPIO
@@ -1438,7 +1442,7 @@
(~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
- in this register. addres 0 - timer 1; address - timer 2�address 7 -
+ in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 -
timer 8 */
#define MISC_REG_SW_TIMER_VAL 0xa5c0
/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -2407,10 +2411,16 @@
/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
this client is waiting for the arbiter. */
#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
+/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
+ block. Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
should update accoring to 'hst_discard_doorbells' register when the state
machine is idle */
#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
+/* [RW 1] When 1; new internal writes arriving to the block are discarded.
+ Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
means this PSWHST is discarding inputs from this client. Each bit should
update accoring to 'hst_discard_internal_writes' register when the state
@@ -4422,11 +4432,21 @@
#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
#define MISC_REGISTERS_GPIO_SET_POS 8
#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
#define MISC_REGISTERS_RESET_REG_2_SET 0x594
#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
@@ -4454,6 +4474,7 @@
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RESERVED_08 8
#define HW_LOCK_RESOURCE_SPIO 2
#define HW_LOCK_RESOURCE_UNDI 5
#define PRS_FLAG_OVERETH_IPV4 1
@@ -4474,6 +4495,10 @@
#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5)
#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9)
#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30)
#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15)
#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14)
#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 6dd64cf..969ffed 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -37,7 +37,6 @@
static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
{
struct inet6_dev *idev;
- struct inet6_ifaddr *ifa;
if (!dev)
return;
@@ -47,10 +46,12 @@ static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
return;
read_lock_bh(&idev->lock);
- ifa = idev->addr_list;
- if (ifa)
+ if (!list_empty(&idev->addr_list)) {
+ struct inet6_ifaddr *ifa
+ = list_first_entry(&idev->addr_list,
+ struct inet6_ifaddr, if_list);
ipv6_addr_copy(addr, &ifa->addr);
- else
+ } else
ipv6_addr_set(addr, 0, 0, 0, 0);
read_unlock_bh(&idev->lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0075514..5e12462 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,6 +59,7 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
@@ -430,7 +431,18 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
}
skb->priority = 1;
- dev_queue_xmit(skb);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
+ struct netpoll *np = bond->dev->npinfo->netpoll;
+ slave_dev->npinfo = bond->dev->npinfo;
+ np->real_dev = np->dev = skb->dev;
+ slave_dev->priv_flags |= IFF_IN_NETPOLL;
+ netpoll_send_skb(np, skb);
+ slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
+ np->dev = bond->dev;
+ } else
+#endif
+ dev_queue_xmit(skb);
return 0;
}
@@ -762,32 +774,6 @@ static int bond_check_dev_link(struct bonding *bond,
/*----------------------------- Multicast list ------------------------------*/
/*
- * Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
- */
-static inline int bond_is_dmi_same(const struct dev_mc_list *dmi1,
- const struct dev_mc_list *dmi2)
-{
- return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 &&
- dmi1->dmi_addrlen == dmi2->dmi_addrlen;
-}
-
-/*
- * returns dmi entry if found, NULL otherwise
- */
-static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi,
- struct dev_mc_list *mc_list)
-{
- struct dev_mc_list *idmi;
-
- for (idmi = mc_list; idmi; idmi = idmi->next) {
- if (bond_is_dmi_same(dmi, idmi))
- return idmi;
- }
-
- return NULL;
-}
-
-/*
* Push the promiscuity flag down to appropriate slaves
*/
static int bond_set_promiscuity(struct bonding *bond, int inc)
@@ -839,18 +825,18 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
* Add a Multicast address to slaves
* according to mode
*/
-static void bond_mc_add(struct bonding *bond, void *addr, int alen)
+static void bond_mc_add(struct bonding *bond, void *addr)
{
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave)
- dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0);
+ dev_mc_add(bond->curr_active_slave->dev, addr);
} else {
struct slave *slave;
int i;
bond_for_each_slave(bond, slave, i)
- dev_mc_add(slave->dev, addr, alen, 0);
+ dev_mc_add(slave->dev, addr);
}
}
@@ -858,18 +844,17 @@ static void bond_mc_add(struct bonding *bond, void *addr, int alen)
* Remove a multicast address from slave
* according to mode
*/
-static void bond_mc_delete(struct bonding *bond, void *addr, int alen)
+static void bond_mc_del(struct bonding *bond, void *addr)
{
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave)
- dev_mc_delete(bond->curr_active_slave->dev, addr,
- alen, 0);
+ dev_mc_del(bond->curr_active_slave->dev, addr);
} else {
struct slave *slave;
int i;
bond_for_each_slave(bond, slave, i) {
- dev_mc_delete(slave->dev, addr, alen, 0);
+ dev_mc_del(slave->dev, addr);
}
}
}
@@ -896,66 +881,22 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
}
/*
- * Totally destroys the mc_list in bond
- */
-static void bond_mc_list_destroy(struct bonding *bond)
-{
- struct dev_mc_list *dmi;
-
- dmi = bond->mc_list;
- while (dmi) {
- bond->mc_list = dmi->next;
- kfree(dmi);
- dmi = bond->mc_list;
- }
-
- bond->mc_list = NULL;
-}
-
-/*
- * Copy all the Multicast addresses from src to the bonding device dst
- */
-static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
- gfp_t gfp_flag)
-{
- struct dev_mc_list *dmi, *new_dmi;
-
- for (dmi = mc_list; dmi; dmi = dmi->next) {
- new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
-
- if (!new_dmi) {
- /* FIXME: Potential memory leak !!! */
- return -ENOMEM;
- }
-
- new_dmi->next = bond->mc_list;
- bond->mc_list = new_dmi;
- new_dmi->dmi_addrlen = dmi->dmi_addrlen;
- memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen);
- new_dmi->dmi_users = dmi->dmi_users;
- new_dmi->dmi_gusers = dmi->dmi_gusers;
- }
-
- return 0;
-}
-
-/*
* flush all members of flush->mc_list from device dev->mc_list
*/
static void bond_mc_list_flush(struct net_device *bond_dev,
struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond_dev)
+ dev_mc_del(slave_dev, ha->addr);
if (bond->params.mode == BOND_MODE_8023AD) {
/* del lacpdu mc addr from mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
- dev_mc_delete(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
+ dev_mc_del(slave_dev, lacpdu_multicast);
}
}
@@ -969,7 +910,7 @@ static void bond_mc_list_flush(struct net_device *bond_dev,
static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
struct slave *old_active)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (!USES_PRIMARY(bond->params.mode))
/* nothing to do - mc list is already up-to-date on
@@ -984,9 +925,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_delete(old_active->dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond->dev)
+ dev_mc_del(old_active->dev, ha->addr);
}
if (new_active) {
@@ -997,9 +937,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_add(new_active->dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond->dev)
+ dev_mc_add(new_active->dev, ha->addr);
bond_resend_igmp_join_requests(bond);
}
}
@@ -1329,6 +1268,61 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
bond->slave_cnt--;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * You must hold read lock on bond->lock before calling this.
+ */
+static bool slaves_support_netpoll(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+ int i = 0;
+ bool ret = true;
+
+ bond_for_each_slave(bond, slave, i) {
+ if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
+ !slave->dev->netdev_ops->ndo_poll_controller)
+ ret = false;
+ }
+ return i != 0 && ret;
+}
+
+static void bond_poll_controller(struct net_device *bond_dev)
+{
+ struct net_device *dev = bond_dev->npinfo->netpoll->real_dev;
+ if (dev != bond_dev)
+ netpoll_poll_dev(dev);
+}
+
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+ const struct net_device_ops *ops;
+ int i;
+
+ read_lock(&bond->lock);
+ bond_dev->npinfo = NULL;
+ bond_for_each_slave(bond, slave, i) {
+ if (slave->dev) {
+ ops = slave->dev->netdev_ops;
+ if (ops->ndo_netpoll_cleanup)
+ ops->ndo_netpoll_cleanup(slave->dev);
+ else
+ slave->dev->npinfo = NULL;
+ }
+ }
+ read_unlock(&bond->lock);
+}
+
+#else
+
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
+{
+}
+
+#endif
+
/*---------------------------------- IOCTL ----------------------------------*/
static int bond_sethwaddr(struct net_device *bond_dev,
@@ -1411,7 +1405,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct sockaddr addr;
int link_reporting;
int old_features = bond_dev->features;
@@ -1485,14 +1479,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->name,
bond_dev->type, slave_dev->type);
- netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE);
+ res = netdev_bonding_change(bond_dev,
+ NETDEV_PRE_TYPE_CHANGE);
+ res = notifier_to_errno(res);
+ if (res) {
+ pr_err("%s: refused to change device type\n",
+ bond_dev->name);
+ res = -EBUSY;
+ goto err_undo_flags;
+ }
+
+ /* Flush unicast and multicast addresses */
+ dev_uc_flush(bond_dev);
+ dev_mc_flush(bond_dev);
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
else
ether_setup(bond_dev);
- netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE);
+ netdev_bonding_change(bond_dev,
+ NETDEV_POST_TYPE_CHANGE);
}
} else if (bond_dev->type != slave_dev->type) {
pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1593,9 +1600,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_lock_bh(bond_dev);
/* upload master's mc_list to new slave */
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_add(slave_dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond_dev)
+ dev_mc_add(slave_dev, ha->addr);
netif_addr_unlock_bh(bond_dev);
}
@@ -1603,7 +1609,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* add lacpdu mc addr to mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
- dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
+ dev_mc_add(slave_dev, lacpdu_multicast);
}
bond_add_vlans_on_slave(bond, slave_dev);
@@ -1735,6 +1741,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (slaves_support_netpoll(bond_dev)) {
+ bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ if (bond_dev->npinfo)
+ slave_dev->npinfo = bond_dev->npinfo;
+ } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
+ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ pr_info("New slave device %s does not support netpoll\n",
+ slave_dev->name);
+ pr_info("Disabling netpoll support for %s\n", bond_dev->name);
+ }
+#endif
read_unlock(&bond->lock);
res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1801,6 +1819,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
return -EINVAL;
}
+ netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -1929,6 +1948,17 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netdev_set_master(slave_dev, NULL);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ read_lock_bh(&bond->lock);
+ if (slaves_support_netpoll(bond_dev))
+ bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ read_unlock_bh(&bond->lock);
+ if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
+ slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
+ else
+ slave_dev->npinfo = NULL;
+#endif
+
/* close slave before restoring its mac address */
dev_close(slave_dev);
@@ -3905,10 +3935,24 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return res;
}
+static bool bond_addr_in_mc_list(unsigned char *addr,
+ struct netdev_hw_addr_list *list,
+ int addrlen)
+{
+ struct netdev_hw_addr *ha;
+
+ netdev_hw_addr_list_for_each(ha, list)
+ if (!memcmp(ha->addr, addr, addrlen))
+ return true;
+
+ return false;
+}
+
static void bond_set_multicast_list(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
+ bool found;
/*
* Do promisc before checking multicast_mode
@@ -3943,20 +3987,25 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
bond->flags = bond_dev->flags;
/* looking for addresses to add to slaves' mc list */
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
- if (!bond_mc_list_find_dmi(dmi, bond->mc_list))
- bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen);
+ netdev_for_each_mc_addr(ha, bond_dev) {
+ found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
+ bond_dev->addr_len);
+ if (!found)
+ bond_mc_add(bond, ha->addr);
}
/* looking for addresses to delete from slaves' list */
- for (dmi = bond->mc_list; dmi; dmi = dmi->next) {
- if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list))
- bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen);
+ netdev_hw_addr_list_for_each(ha, &bond->mc_list) {
+ found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc,
+ bond_dev->addr_len);
+ if (!found)
+ bond_mc_del(bond, ha->addr);
}
/* save master's multicast list */
- bond_mc_list_destroy(bond);
- bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC);
+ __hw_addr_flush(&bond->mc_list);
+ __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc,
+ bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST);
read_unlock(&bond->lock);
}
@@ -4448,6 +4497,10 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_vlan_rx_register = bond_vlan_rx_register,
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_cleanup = bond_netpoll_cleanup,
+ .ndo_poll_controller = bond_poll_controller,
+#endif
};
static void bond_destructor(struct net_device *bond_dev)
@@ -4541,6 +4594,8 @@ static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ bond_netpoll_cleanup(bond_dev);
+
/* Release the bonded slaves */
bond_release_all(bond_dev);
@@ -4550,9 +4605,7 @@ static void bond_uninit(struct net_device *bond_dev)
bond_remove_proc_entry(bond);
- netif_addr_lock_bh(bond_dev);
- bond_mc_list_destroy(bond);
- netif_addr_unlock_bh(bond_dev);
+ __hw_addr_flush(&bond->mc_list);
}
/*------------------------- Module initialization ---------------------------*/
@@ -4683,13 +4736,13 @@ static int bond_check_params(struct bond_params *params)
}
if (num_grat_arp < 0 || num_grat_arp > 255) {
- pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n",
+ pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n",
num_grat_arp);
num_grat_arp = 1;
}
if (num_unsol_na < 0 || num_unsol_na > 255) {
- pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n",
+ pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
num_unsol_na);
num_unsol_na = 1;
}
@@ -4924,6 +4977,8 @@ static int bond_init(struct net_device *bond_dev)
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
+
+ __hw_addr_init(&bond->mc_list);
return 0;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 257a7a4..2aa3367 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -202,7 +202,7 @@ struct bonding {
char proc_file_name[IFNAMSIZ];
#endif /* CONFIG_PROC_FS */
struct list_head bond_list;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr_list mc_list;
int (*xmit_hash_policy)(struct sk_buff *, int);
__be32 master_ip;
u16 flags;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
new file mode 100644
index 0000000..0b28e01
--- /dev/null
+++ b/drivers/net/caif/Kconfig
@@ -0,0 +1,17 @@
+#
+# CAIF physical drivers
+#
+
+if CAIF
+
+comment "CAIF transport drivers"
+
+config CAIF_TTY
+ tristate "CAIF TTY transport driver"
+ default n
+ ---help---
+ The CAIF TTY transport driver is a Line Discipline (ldisc)
+ identified as N_CAIF. When this ldisc is opened from user space
+ it will redirect the TTY's traffic into the CAIF stack.
+
+endif # CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
new file mode 100644
index 0000000..52b6d1f
--- /dev/null
+++ b/drivers/net/caif/Makefile
@@ -0,0 +1,12 @@
+ifeq ($(CONFIG_CAIF_DEBUG),1)
+CAIF_DBG_FLAGS := -DDEBUG
+endif
+
+KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers
+
+ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
+clean-dirs:= .tmp_versions
+clean-files:= Module.symvers modules.order *.cmd *~ \
+
+# Serial interface
+obj-$(CONFIG_CAIF_TTY) += caif_serial.o
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
new file mode 100644
index 0000000..09257ca
--- /dev/null
+++ b/drivers/net/caif/caif_serial.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/tty.h>
+#include <linux/file.h>
+#include <linux/if_arp.h>
+#include <net/caif/caif_device.h>
+#include <net/caif/cfcnfg.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland@stericsson.com>");
+MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_CAIF);
+
+#define SEND_QUEUE_LOW 10
+#define SEND_QUEUE_HIGH 100
+#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
+#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
+#define MAX_WRITE_CHUNK 4096
+#define ON 1
+#define OFF 0
+#define CAIF_MAX_MTU 4096
+
+/*This list is protected by the rtnl lock. */
+static LIST_HEAD(ser_list);
+
+static int ser_loop;
+module_param(ser_loop, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
+
+static int ser_use_stx = 1;
+module_param(ser_use_stx, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
+
+static int ser_use_fcs = 1;
+
+module_param(ser_use_fcs, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
+
+static int ser_write_chunk = MAX_WRITE_CHUNK;
+module_param(ser_write_chunk, int, S_IRUGO);
+
+MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
+
+static struct dentry *debugfsdir;
+
+static int caif_net_open(struct net_device *dev);
+static int caif_net_close(struct net_device *dev);
+
+struct ser_device {
+ struct caif_dev_common common;
+ struct list_head node;
+ struct net_device *dev;
+ struct sk_buff_head head;
+ struct tty_struct *tty;
+ bool tx_started;
+ unsigned long state;
+ char *tty_name;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_tty_dir;
+ struct debugfs_blob_wrapper tx_blob;
+ struct debugfs_blob_wrapper rx_blob;
+ u8 rx_data[128];
+ u8 tx_data[128];
+ u8 tty_status;
+
+#endif
+};
+
+static void caifdev_setup(struct net_device *dev);
+static void ldisc_tx_wakeup(struct tty_struct *tty);
+#ifdef CONFIG_DEBUG_FS
+static inline void update_tty_status(struct ser_device *ser)
+{
+ ser->tty_status =
+ ser->tty->stopped << 5 |
+ ser->tty->hw_stopped << 4 |
+ ser->tty->flow_stopped << 3 |
+ ser->tty->packet << 2 |
+ ser->tty->low_latency << 1 |
+ ser->tty->warned;
+}
+static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
+{
+ ser->debugfs_tty_dir =
+ debugfs_create_dir(tty->name, debugfsdir);
+ if (!IS_ERR(ser->debugfs_tty_dir)) {
+ debugfs_create_blob("last_tx_msg", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->tx_blob);
+
+ debugfs_create_blob("last_rx_msg", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->rx_blob);
+
+ debugfs_create_x32("ser_state", S_IRUSR,
+ ser->debugfs_tty_dir,
+ (u32 *)&ser->state);
+
+ debugfs_create_x8("tty_status", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->tty_status);
+
+ }
+ ser->tx_blob.data = ser->tx_data;
+ ser->tx_blob.size = 0;
+ ser->rx_blob.data = ser->rx_data;
+ ser->rx_blob.size = 0;
+}
+
+static inline void debugfs_deinit(struct ser_device *ser)
+{
+ debugfs_remove_recursive(ser->debugfs_tty_dir);
+}
+
+static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
+{
+ if (size > sizeof(ser->rx_data))
+ size = sizeof(ser->rx_data);
+ memcpy(ser->rx_data, data, size);
+ ser->rx_blob.data = ser->rx_data;
+ ser->rx_blob.size = size;
+}
+
+static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
+{
+ if (size > sizeof(ser->tx_data))
+ size = sizeof(ser->tx_data);
+ memcpy(ser->tx_data, data, size);
+ ser->tx_blob.data = ser->tx_data;
+ ser->tx_blob.size = size;
+}
+#else
+static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
+{
+}
+
+static inline void debugfs_deinit(struct ser_device *ser)
+{
+}
+
+static inline void update_tty_status(struct ser_device *ser)
+{
+}
+
+static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
+{
+}
+
+static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
+{
+}
+
+#endif
+
+static void ldisc_receive(struct tty_struct *tty, const u8 *data,
+ char *flags, int count)
+{
+ struct sk_buff *skb = NULL;
+ struct ser_device *ser;
+ int ret;
+ u8 *p;
+ ser = tty->disc_data;
+
+ /*
+ * NOTE: flags may contain information about break or overrun.
+ * This is not yet handled.
+ */
+
+
+ /*
+ * Workaround for garbage at start of transmission,
+ * only enable if STX handling is not enabled.
+ */
+ if (!ser->common.use_stx && !ser->tx_started) {
+ dev_info(&ser->dev->dev,
+ "Bytes received before initial transmission -"
+ "bytes discarded.\n");
+ return;
+ }
+
+ BUG_ON(ser->dev == NULL);
+
+ /* Get a suitable caif packet and copy in data. */
+ skb = netdev_alloc_skb(ser->dev, count+1);
+ if (skb == NULL)
+ return;
+ p = skb_put(skb, count);
+ memcpy(p, data, count);
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = ser->dev;
+ debugfs_rx(ser, data, count);
+ /* Push received packet up the stack. */
+ ret = netif_rx_ni(skb);
+ if (!ret) {
+ ser->dev->stats.rx_packets++;
+ ser->dev->stats.rx_bytes += count;
+ } else
+ ++ser->dev->stats.rx_dropped;
+ update_tty_status(ser);
+}
+
+static int handle_tx(struct ser_device *ser)
+{
+ struct tty_struct *tty;
+ struct sk_buff *skb;
+ int tty_wr, len, room;
+ tty = ser->tty;
+ ser->tx_started = true;
+
+ /* Enter critical section */
+ if (test_and_set_bit(CAIF_SENDING, &ser->state))
+ return 0;
+
+ /* skb_peek is safe because handle_tx is called after skb_queue_tail */
+ while ((skb = skb_peek(&ser->head)) != NULL) {
+
+ /* Make sure you don't write too much */
+ len = skb->len;
+ room = tty_write_room(tty);
+ if (!room)
+ break;
+ if (room > ser_write_chunk)
+ room = ser_write_chunk;
+ if (len > room)
+ len = room;
+
+ /* Write to tty or loopback */
+ if (!ser_loop) {
+ tty_wr = tty->ops->write(tty, skb->data, len);
+ update_tty_status(ser);
+ } else {
+ tty_wr = len;
+ ldisc_receive(tty, skb->data, NULL, len);
+ }
+ ser->dev->stats.tx_packets++;
+ ser->dev->stats.tx_bytes += tty_wr;
+
+ /* Error on TTY ?! */
+ if (tty_wr < 0)
+ goto error;
+ /* Reduce buffer written, and discard if empty */
+ skb_pull(skb, tty_wr);
+ if (skb->len == 0) {
+ struct sk_buff *tmp = skb_dequeue(&ser->head);
+ BUG_ON(tmp != skb);
+ if (in_interrupt())
+ dev_kfree_skb_irq(skb);
+ else
+ kfree_skb(skb);
+ }
+ }
+ /* Send flow off if queue is empty */
+ if (ser->head.qlen <= SEND_QUEUE_LOW &&
+ test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
+ ser->common.flowctrl != NULL)
+ ser->common.flowctrl(ser->dev, ON);
+ clear_bit(CAIF_SENDING, &ser->state);
+ return 0;
+error:
+ clear_bit(CAIF_SENDING, &ser->state);
+ return tty_wr;
+}
+
+static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ser_device *ser;
+ BUG_ON(dev == NULL);
+ ser = netdev_priv(dev);
+
+ /* Send flow off once, on high water mark */
+ if (ser->head.qlen > SEND_QUEUE_HIGH &&
+ !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
+ ser->common.flowctrl != NULL)
+
+ ser->common.flowctrl(ser->dev, OFF);
+
+ skb_queue_tail(&ser->head, skb);
+ return handle_tx(ser);
+}
+
+
+static void ldisc_tx_wakeup(struct tty_struct *tty)
+{
+ struct ser_device *ser;
+ ser = tty->disc_data;
+ BUG_ON(ser == NULL);
+ BUG_ON(ser->tty != tty);
+ handle_tx(ser);
+}
+
+
+static int ldisc_open(struct tty_struct *tty)
+{
+ struct ser_device *ser;
+ struct net_device *dev;
+ char name[64];
+ int result;
+
+ /* No write no play */
+ if (tty->ops->write == NULL)
+ return -EOPNOTSUPP;
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
+ return -EPERM;
+
+ sprintf(name, "cf%s", tty->name);
+ dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
+ ser = netdev_priv(dev);
+ ser->tty = tty_kref_get(tty);
+ ser->dev = dev;
+ debugfs_init(ser, tty);
+ tty->receive_room = N_TTY_BUF_SIZE;
+ tty->disc_data = ser;
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ rtnl_lock();
+ result = register_netdevice(dev);
+ if (result) {
+ rtnl_unlock();
+ free_netdev(dev);
+ return -ENODEV;
+ }
+
+ list_add(&ser->node, &ser_list);
+ rtnl_unlock();
+ netif_stop_queue(dev);
+ update_tty_status(ser);
+ return 0;
+}
+
+static void ldisc_close(struct tty_struct *tty)
+{
+ struct ser_device *ser = tty->disc_data;
+ /* Remove may be called inside or outside of rtnl_lock */
+ int islocked = rtnl_is_locked();
+ if (!islocked)
+ rtnl_lock();
+ /* device is freed automagically by net-sysfs */
+ dev_close(ser->dev);
+ unregister_netdevice(ser->dev);
+ list_del(&ser->node);
+ debugfs_deinit(ser);
+ tty_kref_put(ser->tty);
+ if (!islocked)
+ rtnl_unlock();
+}
+
+/* The line discipline structure. */
+static struct tty_ldisc_ops caif_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_caif",
+ .open = ldisc_open,
+ .close = ldisc_close,
+ .receive_buf = ldisc_receive,
+ .write_wakeup = ldisc_tx_wakeup
+};
+
+static int register_ldisc(void)
+{
+ int result;
+ result = tty_register_ldisc(N_CAIF, &caif_ldisc);
+ if (result < 0) {
+ pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
+ result);
+ return result;
+ }
+ return result;
+}
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = caif_net_open,
+ .ndo_stop = caif_net_close,
+ .ndo_start_xmit = caif_xmit
+};
+
+static void caifdev_setup(struct net_device *dev)
+{
+ struct ser_device *serdev = netdev_priv(dev);
+ dev->features = 0;
+ dev->netdev_ops = &netdev_ops;
+ dev->type = ARPHRD_CAIF;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->mtu = CAIF_MAX_MTU;
+ dev->hard_header_len = CAIF_NEEDED_HEADROOM;
+ dev->tx_queue_len = 0;
+ dev->destructor = free_netdev;
+ skb_queue_head_init(&serdev->head);
+ serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
+ serdev->common.use_frag = true;
+ serdev->common.use_stx = ser_use_stx;
+ serdev->common.use_fcs = ser_use_fcs;
+ serdev->dev = dev;
+}
+
+
+static int caif_net_open(struct net_device *dev)
+{
+ struct ser_device *ser;
+ ser = netdev_priv(dev);
+ netif_wake_queue(dev);
+ return 0;
+}
+
+static int caif_net_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int __init caif_ser_init(void)
+{
+ int ret;
+ ret = register_ldisc();
+ debugfsdir = debugfs_create_dir("caif_serial", NULL);
+ return ret;
+}
+
+static void __exit caif_ser_exit(void)
+{
+ struct ser_device *ser = NULL;
+ struct list_head *node;
+ struct list_head *_tmp;
+ list_for_each_safe(node, _tmp, &ser_list) {
+ ser = list_entry(node, struct ser_device, node);
+ dev_close(ser->dev);
+ unregister_netdevice(ser->dev);
+ list_del(node);
+ }
+ tty_unregister_ldisc(N_CAIF);
+ debugfs_remove_recursive(debugfsdir);
+}
+
+module_init(caif_ser_init);
+module_exit(caif_ser_exit);
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index a2f29a3..2d8bd86 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -35,7 +35,6 @@
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -376,7 +375,6 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
at91_write(priv, AT91_MCR(mb), reg_mcr);
stats->tx_bytes += cf->can_dlc;
- dev->trans_start = jiffies;
/* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
@@ -662,7 +660,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
at91_poll_err_frame(dev, cf, reg_sr);
netif_receive_skb(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
@@ -899,7 +896,6 @@ static void at91_irq_err(struct net_device *dev)
at91_irq_err_state(dev, cf, new_state);
netif_rx(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 0348986..b6e890d 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -18,7 +18,6 @@
#include <linux/skbuff.h>
#include <linux/platform_device.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -270,8 +269,6 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fill data length code */
bfin_write16(&reg->chl[TRANSMIT_CHL].dlc, dlc);
- dev->trans_start = jiffies;
-
can_put_echo_skb(skb, dev, 0);
/* set transmit request */
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index b39b108..b11a0cb 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -58,7 +58,6 @@
*
*/
-#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/platform/mcp251x.h>
@@ -476,7 +475,6 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
netif_stop_queue(net);
priv->tx_skb = skb;
- net->trans_start = jiffies;
queue_work(priv->wq, &priv->tx_work);
return NETDEV_TX_OK;
@@ -923,12 +921,16 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
struct net_device *net;
struct mcp251x_priv *priv;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ int model = spi_get_device_id(spi)->driver_data;
int ret = -ENODEV;
if (!pdata)
/* Platform data is required for osc freq */
goto error_out;
+ if (model)
+ pdata->model = model;
+
/* Allocate can/net device */
net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
if (!net) {
@@ -1118,6 +1120,15 @@ static int mcp251x_can_resume(struct spi_device *spi)
#define mcp251x_can_resume NULL
#endif
+static struct spi_device_id mcp251x_id_table[] = {
+ { "mcp251x", 0 /* Use pdata.model */ },
+ { "mcp2510", CAN_MCP251X_MCP2510 },
+ { "mcp2515", CAN_MCP251X_MCP2515 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+
static struct spi_driver mcp251x_can_driver = {
.driver = {
.name = DEVICE_NAME,
@@ -1125,6 +1136,7 @@ static struct spi_driver mcp251x_can_driver = {
.owner = THIS_MODULE,
},
+ .id_table = mcp251x_id_table,
.probe = mcp251x_can_probe,
.remove = __devexit_p(mcp251x_can_remove),
.suspend = mcp251x_can_suspend,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 03e7c48..225fd14 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -25,7 +25,6 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 6b7dd57..64c378c 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -28,7 +28,6 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/list.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 9e277d6..ae3505a 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -53,7 +53,9 @@ config CAN_PLX_PCI
Driver supports now:
- Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
- Adlink PCI-7841/cPCI-7841 SE card
+ - esd CAN-PCI/CPCI/PCI104/200 (http://www.esd.eu/)
+ - esd CAN-PCI/PMC/266
+ - esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
-
endif
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 5f53da0..36f4f97 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -24,7 +24,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 441e776..ed004ce 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -36,7 +36,6 @@
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 4aff407..437b5c7 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -27,7 +27,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
@@ -41,7 +40,10 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"Adlink PCI-7841/cPCI-7841 SE, "
"Marathon CAN-bus-PCI, "
- "TEWS TECHNOLOGIES TPMC810");
+ "TEWS TECHNOLOGIES TPMC810, "
+ "esd CAN-PCI/CPCI/PCI104/200, "
+ "esd CAN-PCI/PMC/266, "
+ "esd CAN-PCIe/2000")
MODULE_LICENSE("GPL v2");
#define PLX_PCI_MAX_CHAN 2
@@ -50,11 +52,14 @@ struct plx_pci_card {
int channels; /* detected channels count */
struct net_device *net_dev[PLX_PCI_MAX_CHAN];
void __iomem *conf_addr;
+
+ /* Pointer to device-dependent reset function */
+ void (*reset_func)(struct pci_dev *pdev);
};
#define PLX_PCI_CAN_CLOCK (16000000 / 2)
-/* PLX90xx registers */
+/* PLX9030/9050/9052 registers */
#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
* Serial EEPROM, and Initialization
@@ -66,6 +71,14 @@ struct plx_pci_card {
#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
+/* PLX9056 registers */
+#define PLX9056_INTCSR 0x68 /* Interrupt Control/Status */
+#define PLX9056_CNTRL 0x6c /* Control / Software Reset */
+
+#define PLX9056_LINTI (1 << 11)
+#define PLX9056_PCI_INT_EN (1 << 8)
+#define PLX9056_PCI_RCR (1 << 29) /* Read Configuration Registers */
+
/*
* The board configuration is probably following:
* RX1 is connected to ground.
@@ -101,6 +114,13 @@ struct plx_pci_card {
#define ADLINK_PCI_VENDOR_ID 0x144A
#define ADLINK_PCI_DEVICE_ID 0x7841
+#define ESD_PCI_SUB_SYS_ID_PCI200 0x0004
+#define ESD_PCI_SUB_SYS_ID_PCI266 0x0009
+#define ESD_PCI_SUB_SYS_ID_PMC266 0x000e
+#define ESD_PCI_SUB_SYS_ID_CPCI200 0x010b
+#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200
+#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501
+
#define MARATHON_PCI_DEVICE_ID 0x2715
#define TEWS_PCI_VENDOR_ID 0x1498
@@ -108,6 +128,7 @@ struct plx_pci_card {
static void plx_pci_reset_common(struct pci_dev *pdev);
static void plx_pci_reset_marathon(struct pci_dev *pdev);
+static void plx9056_pci_reset_common(struct pci_dev *pdev);
struct plx_pci_channel_map {
u32 bar;
@@ -148,6 +169,30 @@ static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
/* based on PLX9052 */
};
+static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = {
+ "esd CAN-PCI/CPCI/PCI104/200", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030/9050 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = {
+ "esd CAN-PCI/PMC/266", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx9056_pci_reset_common
+ /* based on PLX9056 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = {
+ "esd CAN-PCIe/2000", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx9056_pci_reset_common
+ /* based on PEX8311 */
+};
+
static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
"Marathon CAN-bus-PCI", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -180,6 +225,48 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
(kernel_ulong_t)&plx_pci_card_info_adlink_se
},
{
+ /* esd CAN-PCI/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-CPCI/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-PCI104/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-PCI/266 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd266
+ },
+ {
+ /* esd CAN-PMC/266 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd266
+ },
+ {
+ /* esd CAN-PCIE/2000 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd2000
+ },
+ {
/* Marathon CAN-bus-PCI card */
PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
PCI_ANY_ID, PCI_ANY_ID,
@@ -242,7 +329,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
}
/*
- * PLX90xx software reset
+ * PLX9030/50/52 software reset
* Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
* For most cards it's enough for reset the SJA1000 chips.
*/
@@ -259,6 +346,38 @@ static void plx_pci_reset_common(struct pci_dev *pdev)
iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
};
+/*
+ * PLX9056 software reset
+ * Assert LRESET# and reset device(s) on the Local Bus (if wired).
+ */
+static void plx9056_pci_reset_common(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ u32 cntrl;
+
+ /* issue a local bus reset */
+ cntrl = ioread32(card->conf_addr + PLX9056_CNTRL);
+ cntrl |= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+ udelay(100);
+ cntrl ^= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+ /* reload local configuration from EEPROM */
+ cntrl |= PLX9056_PCI_RCR;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+ /*
+ * There is no safe way to poll for the end
+ * of reconfiguration process. Waiting for 10ms
+ * is safe.
+ */
+ mdelay(10);
+
+ cntrl ^= PLX9056_PCI_RCR;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+};
+
/* Special reset function for Marathon card */
static void plx_pci_reset_marathon(struct pci_dev *pdev)
{
@@ -302,13 +421,16 @@ static void plx_pci_del_card(struct pci_dev *pdev)
free_sja1000dev(dev);
}
- plx_pci_reset_common(pdev);
+ card->reset_func(pdev);
/*
- * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
- * Local_2 interrupts
+ * Disable interrupts from PCI-card and disable local
+ * interrupts
*/
- iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056)
+ iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+ else
+ iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
if (card->conf_addr)
pci_iounmap(pdev, card->conf_addr);
@@ -367,6 +489,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
card->conf_addr = addr + ci->conf_map.offset;
ci->reset_func(pdev);
+ card->reset_func = ci->reset_func;
/* Detect available channels */
for (i = 0; i < ci->channel_count; i++) {
@@ -438,10 +561,17 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
* Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
* Local_2 interrupts from the SJA1000 chips
*/
- val = ioread32(card->conf_addr + PLX_INTCSR);
- val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
- iowrite32(val, card->conf_addr + PLX_INTCSR);
-
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
+ val = ioread32(card->conf_addr + PLX_INTCSR);
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
+ val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
+ else
+ val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
+ iowrite32(val, card->conf_addr + PLX_INTCSR);
+ } else {
+ iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN,
+ card->conf_addr + PLX9056_INTCSR);
+ }
return 0;
failure_cleanup:
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 145b1a7..85f7cbf 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -60,7 +60,6 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -84,6 +83,20 @@ static struct can_bittiming_const sja1000_bittiming_const = {
.brp_inc = 1,
};
+static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
+{
+ unsigned long flags;
+
+ /*
+ * The command register needs some locking and time to settle
+ * the write_reg() operation - especially on SMP systems.
+ */
+ spin_lock_irqsave(&priv->cmdreg_lock, flags);
+ priv->write_reg(priv, REG_CMR, val);
+ priv->read_reg(priv, REG_SR);
+ spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
+}
+
static int sja1000_probe_chip(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
@@ -293,11 +306,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
for (i = 0; i < dlc; i++)
priv->write_reg(priv, dreg++, cf->data[i]);
- dev->trans_start = jiffies;
-
can_put_echo_skb(skb, dev, 0);
- priv->write_reg(priv, REG_CMR, CMD_TR);
+ sja1000_write_cmdreg(priv, CMD_TR);
return NETDEV_TX_OK;
}
@@ -346,7 +357,7 @@ static void sja1000_rx(struct net_device *dev)
cf->can_id = id;
/* release receive buffer */
- priv->write_reg(priv, REG_CMR, CMD_RRB);
+ sja1000_write_cmdreg(priv, CMD_RRB);
netif_rx(skb);
@@ -374,7 +385,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_over_errors++;
stats->rx_errors++;
- priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
+ sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
}
if (isrc & IRQ_EI) {
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 97a622b..de8e778 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -167,6 +167,7 @@ struct sja1000_priv {
void __iomem *reg_base; /* ioremap'ed address to registers */
unsigned long irq_flags; /* for request_irq() */
+ spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
u16 flags; /* custom mode flags */
u8 ocr; /* output control register */
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index a6a51f1..496223e 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 9dd076a..34e79ef 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -38,7 +38,6 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/of_platform.h>
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 628374c..d9fadc4 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -24,7 +24,6 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
#include <linux/io.h>
@@ -37,16 +36,36 @@ MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
MODULE_LICENSE("GPL v2");
-static u8 sp_read_reg(const struct sja1000_priv *priv, int reg)
+static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
{
return ioread8(priv->reg_base + reg);
}
-static void sp_write_reg(const struct sja1000_priv *priv, int reg, u8 val)
+static void sp_write_reg8(const struct sja1000_priv *priv, int reg, u8 val)
{
iowrite8(val, priv->reg_base + reg);
}
+static u8 sp_read_reg16(const struct sja1000_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg * 2);
+}
+
+static void sp_write_reg16(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ iowrite8(val, priv->reg_base + reg * 2);
+}
+
+static u8 sp_read_reg32(const struct sja1000_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg * 4);
+}
+
+static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ iowrite8(val, priv->reg_base + reg * 4);
+}
+
static int sp_probe(struct platform_device *pdev)
{
int err;
@@ -90,14 +109,29 @@ static int sp_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
dev->irq = res_irq->start;
- priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
priv->reg_base = addr;
- priv->read_reg = sp_read_reg;
- priv->write_reg = sp_write_reg;
- priv->can.clock.freq = pdata->clock;
+ /* The CAN clock frequency is half the oscillator clock frequency */
+ priv->can.clock.freq = pdata->osc_freq / 2;
priv->ocr = pdata->ocr;
priv->cdr = pdata->cdr;
+ switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_32BIT:
+ priv->read_reg = sp_read_reg32;
+ priv->write_reg = sp_write_reg32;
+ break;
+ case IORESOURCE_MEM_16BIT:
+ priv->read_reg = sp_read_reg16;
+ priv->write_reg = sp_write_reg16;
+ break;
+ case IORESOURCE_MEM_8BIT:
+ default:
+ priv->read_reg = sp_read_reg8;
+ priv->write_reg = sp_write_reg8;
+ break;
+ }
+
dev_set_drvdata(&pdev->dev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 0c3d2ba..4d07f1e 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -47,7 +47,6 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/can/platform/ti_hecc.h>
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index d800b59..1fc0871 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -300,8 +300,6 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
else if (err)
dev_err(netdev->dev.parent,
"failed resubmitting intr urb: %d\n", err);
-
- return;
}
static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -497,8 +495,6 @@ resubmit_urb:
else if (retval)
dev_err(netdev->dev.parent,
"failed resubmitting read bulk urb: %d\n", retval);
-
- return;
}
/*
@@ -516,8 +512,8 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
netdev = dev->netdev;
/* free up our allocated buffer */
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
atomic_dec(&dev->active_tx_urbs);
@@ -614,8 +610,8 @@ static int ems_usb_start(struct ems_usb *dev)
return -ENOMEM;
}
- buf = usb_buffer_alloc(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buf) {
dev_err(netdev->dev.parent,
"No memory left for USB buffer\n");
@@ -635,8 +631,8 @@ static int ems_usb_start(struct ems_usb *dev)
netif_device_detach(dev->netdev);
usb_unanchor_urb(urb);
- usb_buffer_free(dev->udev, RX_BUFFER_SIZE, buf,
- urb->transfer_dma);
+ usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
+ urb->transfer_dma);
break;
}
@@ -777,7 +773,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
goto nomem;
}
- buf = usb_buffer_alloc(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
if (!buf) {
dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
usb_free_urb(urb);
@@ -820,7 +816,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
*/
if (!context) {
usb_unanchor_urb(urb);
- usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
+ usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
dev_warn(netdev->dev.parent, "couldn't find free context\n");
@@ -845,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
can_free_echo_skb(netdev, context->echo_index);
usb_unanchor_urb(urb);
- usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
+ usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
dev_kfree_skb(skb);
atomic_dec(&dev->active_tx_urbs);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 9bd155e..04a03f7 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2889,7 +2889,6 @@ static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
return NETDEV_TX_BUSY;
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -2957,20 +2956,20 @@ static void cas_process_mc_list(struct cas *cp)
{
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i = 1;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, cp->dev) {
+ netdev_for_each_mc_addr(ha, cp->dev) {
if (i <= CAS_MC_EXACT_MATCH_SIZE) {
/* use the alternate mac address registers for the
* first 15 multicast addresses
*/
- writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
+ writel((ha->addr[4] << 8) | ha->addr[5],
cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
+ writel((ha->addr[2] << 8) | ha->addr[3],
cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
+ writel((ha->addr[0] << 8) | ha->addr[1],
cp->regs + REG_MAC_ADDRN(i*3 + 2));
i++;
}
@@ -2978,7 +2977,7 @@ static void cas_process_mc_list(struct cas *cp)
/* use hw hash table for the next series of
* multicast addresses
*/
- crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
crc >>= 24;
hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
}
@@ -4825,7 +4824,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
default:
break;
- };
+ }
mutex_unlock(&cp->pm_mutex);
return rc;
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 9e631b9..7dbb16d 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -377,12 +377,13 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
} else if (t1_rx_mode_mc_cnt(rm)) {
/* Accept one or more multicast(s). */
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int bit;
u16 mc_filter[4] = { 0, };
- netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) {
- bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */
+ netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
+ /* bit[23:28] */
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index df3a141..f01cfdb 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -162,14 +162,14 @@ struct respQ_e {
*/
struct cmdQ_ce {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
- DECLARE_PCI_UNMAP_LEN(dma_len);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct freelQ_ce {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
- DECLARE_PCI_UNMAP_LEN(dma_len);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
/*
@@ -460,7 +460,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
again:
for (i = 0; i < MAX_NPORTS; i++) {
- s->port = ++s->port & (MAX_NPORTS - 1);
+ s->port = (s->port + 1) & (MAX_NPORTS - 1);
skbq = &s->p[s->port].skbq;
skb = skb_peek(skbq);
@@ -518,8 +518,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
while (q->credits--) {
struct freelQ_ce *ce = &q->centries[cidx];
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(ce->skb);
ce->skb = NULL;
@@ -633,9 +633,9 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
q->in_use -= n;
ce = &q->centries[cidx];
while (n--) {
- if (likely(pci_unmap_len(ce, dma_len))) {
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ if (likely(dma_unmap_len(ce, dma_len))) {
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_TODEVICE);
if (q->sop)
q->sop = 0;
@@ -851,8 +851,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
skb_reserve(skb, sge->rx_pkt_pad);
ce->skb = skb;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, dma_len);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, dma_len);
e->addr_lo = (u32)mapping;
e->addr_hi = (u64)mapping >> 32;
e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
@@ -1059,13 +1059,13 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
skb_reserve(skb, 2); /* align IP header */
skb_put(skb, len);
pci_dma_sync_single_for_cpu(pdev,
- pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(ce->skb, skb->data, len);
pci_dma_sync_single_for_device(pdev,
- pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
recycle_fl_buf(fl, fl->cidx);
return skb;
@@ -1077,8 +1077,8 @@ use_orig_buf:
return NULL;
}
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
skb = ce->skb;
prefetch(skb->data);
@@ -1100,8 +1100,8 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
struct freelQ_ce *ce = &fl->centries[fl->cidx];
struct sk_buff *skb = ce->skb;
- pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx);
@@ -1123,7 +1123,7 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
unsigned int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int i, len = skb->len - skb->data_len;
+ unsigned int i, len = skb_headlen(skb);
while (len > SGE_TX_DESC_MAX_PLEN) {
count++;
len -= SGE_TX_DESC_MAX_PLEN;
@@ -1182,7 +1182,7 @@ static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
*gen, nfrags == 0 && *desc_len == 0);
ce1->skb = NULL;
- pci_unmap_len_set(ce1, dma_len, 0);
+ dma_unmap_len_set(ce1, dma_len, 0);
*desc_mapping += SGE_TX_DESC_MAX_PLEN;
if (*desc_len) {
ce1++;
@@ -1219,10 +1219,10 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
ce = &q->centries[pidx];
mapping = pci_map_single(adapter->pdev, skb->data,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ skb_headlen(skb), PCI_DMA_TODEVICE);
desc_mapping = mapping;
- desc_len = skb->len - skb->data_len;
+ desc_len = skb_headlen(skb);
flags = F_CMD_DATAVALID | F_CMD_SOP |
V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
@@ -1233,7 +1233,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
e->addr_hi = (u64)desc_mapping >> 32;
e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
ce->skb = NULL;
- pci_unmap_len_set(ce, dma_len, 0);
+ dma_unmap_len_set(ce, dma_len, 0);
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
desc_len > SGE_TX_DESC_MAX_PLEN) {
@@ -1257,8 +1257,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
}
ce->skb = NULL;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
for (i = 0; nfrags--; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1284,8 +1284,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
write_tx_desc(e1, desc_mapping, desc_len, gen,
nfrags == 0);
ce->skb = NULL;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, frag->size);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, frag->size);
}
ce->skb = skb;
wmb();
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4b451a7..be90d35 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1143,12 +1143,12 @@ static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
spin_lock_bh(&cp->cnic_ulp_lock);
if (num_wqes > cnic_kwq_avail(cp) &&
- !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
+ !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
spin_unlock_bh(&cp->cnic_ulp_lock);
return -EAGAIN;
}
- cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
+ clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
prod = cp->kwq_prod_idx;
sw_prod = prod & MAX_KWQ_IDX;
@@ -2092,7 +2092,6 @@ end:
i += j;
j = 1;
}
- return;
}
static u16 cnic_bnx2_next_idx(u16 idx)
@@ -2146,17 +2145,56 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
return last_cnt;
}
+static int cnic_l2_completion(struct cnic_local *cp)
+{
+ u16 hw_cons, sw_cons;
+ union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
+ (cp->l2_ring + (2 * BCM_PAGE_SIZE));
+ u32 cmd;
+ int comp = 0;
+
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
+ return 0;
+
+ hw_cons = *cp->rx_cons_ptr;
+ if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
+ hw_cons++;
+
+ sw_cons = cp->rx_cons;
+ while (sw_cons != hw_cons) {
+ u8 cqe_fp_flags;
+
+ cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
+ cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
+ cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
+ cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
+ cmd == RAMROD_CMD_ID_ETH_HALT)
+ comp++;
+ }
+ sw_cons = BNX2X_NEXT_RCQE(sw_cons);
+ }
+ return comp;
+}
+
static void cnic_chk_pkt_rings(struct cnic_local *cp)
{
u16 rx_cons = *cp->rx_cons_ptr;
u16 tx_cons = *cp->tx_cons_ptr;
+ int comp = 0;
if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ comp = cnic_l2_completion(cp);
+
cp->tx_cons = tx_cons;
cp->rx_cons = rx_cons;
uio_event_notify(cp->cnic_uinfo);
}
+ if (comp)
+ clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
}
static int cnic_service_bnx2(void *data, void *status_blk)
@@ -2325,7 +2363,6 @@ done:
status_idx, IGU_INT_ENABLE, 1);
cp->kcq_prod_idx = sw_prod;
- return;
}
static int cnic_service_bnx2x(void *data, void *status_blk)
@@ -3692,7 +3729,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
cp->max_kwq_idx = MAX_KWQ_IDX;
cp->kwq_prod_idx = 0;
cp->kwq_con_idx = 0;
- cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
+ set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
@@ -4170,6 +4207,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
+ set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
cnic_init_bnx2x_tx_ring(dev);
cnic_init_bnx2x_rx_ring(dev);
@@ -4177,6 +4216,15 @@ static void cnic_init_rings(struct cnic_dev *dev)
l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
+ i = 0;
+ while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+ ++i < 10)
+ msleep(1);
+
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ netdev_err(dev->netdev,
+ "iSCSI CLIENT_SETUP did not complete\n");
+ cnic_kwq_completion(dev, 1);
cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
}
}
@@ -4189,14 +4237,25 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
union l5cm_specific_data l5_data;
+ int i;
cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
+ set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
l5_data.phy_address.lo = cli;
l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
- msleep(10);
+ i = 0;
+ while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+ ++i < 10)
+ msleep(1);
+
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ netdev_err(dev->netdev,
+ "iSCSI CLIENT_HALT did not complete\n");
+ cnic_kwq_completion(dev, 1);
memset(&l5_data, 0, sizeof(l5_data));
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
@@ -4317,7 +4376,15 @@ static void cnic_stop_hw(struct cnic_dev *dev)
{
if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
struct cnic_local *cp = dev->cnic_priv;
+ int i = 0;
+ /* Need to wait for the ring shutdown event to complete
+ * before clearing the CNIC_UP flag.
+ */
+ while (cp->uio_dev != -1 && i < 15) {
+ msleep(100);
+ i++;
+ }
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -4628,7 +4695,6 @@ static void __exit cnic_exit(void)
{
unregister_netdevice_notifier(&cnic_netdev_notifier);
cnic_release();
- return;
}
module_init(cnic_init);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index a0d853d..08b1235 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -179,9 +179,9 @@ struct cnic_local {
#define ULP_F_CALL_PENDING 2
struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
- /* protected by ulp_lock */
- u32 cnic_local_flags;
-#define CNIC_LCL_FL_KWQ_INIT 0x00000001
+ unsigned long cnic_local_flags;
+#define CNIC_LCL_FL_KWQ_INIT 0x0
+#define CNIC_LCL_FL_L2_WAIT 0x1
struct cnic_dev *dev;
@@ -349,6 +349,10 @@ struct bnx2x_bd_chain_next {
#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
+#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \
+ (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
+ ((x) + 2) : ((x) + 1)
+
#define BNX2X_DEF_SB_ID 16
#define BNX2X_ISCSI_RX_SB_INDEX_NUM \
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 60777fd..3c58db5 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -328,7 +328,7 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map)
static void cpmac_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *iter;
+ struct netdev_hw_addr *ha;
u8 tmp;
u32 mbp, bit, hash[2] = { 0, };
struct cpmac_priv *priv = netdev_priv(dev);
@@ -348,19 +348,19 @@ static void cpmac_set_multicast_list(struct net_device *dev)
* cpmac uses some strange mac address hashing
* (not crc32)
*/
- netdev_for_each_mc_addr(iter, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
bit = 0;
- tmp = iter->dmi_addr[0];
+ tmp = ha->addr[0];
bit ^= (tmp >> 2) ^ (tmp << 4);
- tmp = iter->dmi_addr[1];
+ tmp = ha->addr[1];
bit ^= (tmp >> 4) ^ (tmp << 2);
- tmp = iter->dmi_addr[2];
+ tmp = ha->addr[2];
bit ^= (tmp >> 6) ^ tmp;
- tmp = iter->dmi_addr[3];
+ tmp = ha->addr[3];
bit ^= (tmp >> 2) ^ (tmp << 4);
- tmp = iter->dmi_addr[4];
+ tmp = ha->addr[4];
bit ^= (tmp >> 4) ^ (tmp << 2);
- tmp = iter->dmi_addr[5];
+ tmp = ha->addr[5];
bit ^= (tmp >> 6) ^ tmp;
bit &= 0x3f;
hash[bit / 32] |= 1 << (bit % 32);
@@ -579,7 +579,6 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock(&priv->lock);
- dev->trans_start = jiffies;
spin_unlock(&priv->lock);
desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
desc->skb = skb;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 61a3391..7e00027 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1108,7 +1108,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
myNextTxDesc->skb = skb;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
e100_hardware_send_packet(np, buf, skb->len);
@@ -1595,16 +1595,16 @@ set_multicast_list(struct net_device *dev)
} else {
/* MC mode, receive normal and MC packets */
char hash_ix;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *baddr;
lo_bits = 0x00000000ul;
hi_bits = 0x00000000ul;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Calculate the hash index for the GA registers */
hash_ix = 0;
- baddr = dmi->dmi_addr;
+ baddr = ha->addr;
hash_ix ^= (*baddr) & 0x3f;
hash_ix ^= ((*baddr) >> 6) & 0x03;
++baddr;
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 4c38491..2ccb9f1 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -902,7 +902,6 @@ get_dma_channel(struct net_device *dev)
return;
}
}
- return;
}
static void
@@ -1554,7 +1553,6 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev)
writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
spin_unlock_irqrestore(&lp->lock, flags);
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
/*
@@ -1673,7 +1671,6 @@ count_rx_errors(int status, struct net_local *lp)
/* per str 172 */
lp->stats.rx_crc_errors++;
if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++;
- return;
}
/* We have a good packet(s), get it/them out of the buffers. */
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 2f3ee72..f452c40 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -207,7 +207,6 @@ again:
*/
neigh_event_send(e->neigh, NULL);
}
- return;
}
EXPORT_SYMBOL(t3_l2t_send_event);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 07d7e7f..5962b91 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -118,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
struct sk_buff *skb;
struct fl_pg_chunk pg_chunk;
};
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
struct rsp_desc { /* response queue descriptor */
@@ -208,7 +208,7 @@ static inline int need_skb_unmap(void)
* unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
*/
struct dummy {
- DECLARE_PCI_UNMAP_ADDR(addr);
+ DEFINE_DMA_UNMAP_ADDR(addr);
};
return sizeof(struct dummy) != 0;
@@ -363,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
put_page(d->pg_chunk.page);
d->pg_chunk.page = NULL;
} else {
- pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
+ pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
kfree_skb(d->skb);
d->skb = NULL;
@@ -419,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
if (unlikely(pci_dma_mapping_error(pdev, mapping)))
return -ENOMEM;
- pci_unmap_addr_set(sd, dma_addr, mapping);
+ dma_unmap_addr_set(sd, dma_addr, mapping);
d->addr_lo = cpu_to_be32(mapping);
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
@@ -515,7 +515,7 @@ nomem: q->alloc_failed++;
break;
}
mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
- pci_unmap_addr_set(sd, dma_addr, mapping);
+ dma_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen);
pci_dma_sync_single_for_device(adap->pdev, mapping,
@@ -791,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
if (likely(skb != NULL)) {
__skb_put(skb, len);
pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd, dma_addr), len,
+ dma_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
memcpy(skb->data, sd->skb->data, len);
pci_dma_sync_single_for_device(adap->pdev,
- pci_unmap_addr(sd, dma_addr), len,
+ dma_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
} else if (!drop_thres)
goto use_orig_buf;
@@ -810,7 +810,7 @@ recycle:
goto recycle;
use_orig_buf:
- pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+ pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
fl->buf_size, PCI_DMA_FROMDEVICE);
skb = sd->skb;
skb_put(skb, len);
@@ -843,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
struct sk_buff *newskb, *skb;
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
- dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
+ dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
newskb = skb = q->pg_skb;
if (!skb && (len <= SGE_RX_COPY_THRES)) {
@@ -2097,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
fl->credits--;
pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd, dma_addr),
+ dma_unmap_addr(sd, dma_addr),
fl->buf_size - SGE_PG_RSVD,
PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index c142a21..3af19a5 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -311,16 +311,16 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
hash_lo = hash_hi = 0xffffffff;
else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int exact_addr_idx = mac->nucast;
hash_lo = hash_hi = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
if (exact_addr_idx < EXACT_ADDR_FILTERS)
set_addr_filter(mac, exact_addr_idx++,
- dmi->dmi_addr);
+ ha->addr);
else {
- int hash = hash_hw_addr(dmi->dmi_addr);
+ int hash = hash_hw_addr(ha->addr);
if (hash < 32)
hash_lo |= (1 << hash);
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3d8ff48..dd1770e 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -53,7 +53,7 @@
enum {
MAX_NPORTS = 4, /* max # of ports */
- SERNUM_LEN = 16, /* Serial # length */
+ SERNUM_LEN = 24, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
};
@@ -477,7 +477,6 @@ struct adapter {
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned long registered_device_map;
- unsigned long open_device_map;
unsigned long flags;
const char *name;
@@ -651,14 +650,11 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
-int t4_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
int t4_check_fw_version(struct adapter *adapter);
int t4_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
-void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
int filter_index, int enable);
void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
@@ -709,7 +705,8 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int viid);
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int mtu, int promisc, int all_multi, int bcast, bool sleep_ok);
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok);
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index a7e30a2..58045b0 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -240,9 +240,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
u16 filt_idx[7];
const u8 *addr[7];
int ret, naddr = 0;
- const struct dev_addr_list *d;
const struct netdev_hw_addr *ha;
int uc_cnt = netdev_uc_count(dev);
+ int mc_cnt = netdev_mc_count(dev);
const struct port_info *pi = netdev_priv(dev);
/* first do the secondary unicast addresses */
@@ -260,9 +260,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
}
/* next set up the multicast addresses */
- netdev_for_each_mc_addr(d, dev) {
- addr[naddr++] = d->dmi_addr;
- if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) {
+ netdev_for_each_mc_addr(ha, dev) {
+ addr[naddr++] = ha->addr;
+ if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
naddr, addr, filt_idx, &mhash, sleep);
if (ret < 0)
@@ -290,7 +290,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
if (ret == 0)
ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
- (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
+ (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
sleep_ok);
return ret;
}
@@ -311,11 +311,11 @@ static int link_start(struct net_device *dev)
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
- true);
+ pi->vlan_grp != NULL, true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, 0, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
- false);
+ true);
if (ret >= 0) {
pi->xact_addr_filt = ret;
ret = 0;
@@ -859,6 +859,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
"RxCsumGood ",
"VLANextractions ",
"VLANinsertions ",
+ "GROpackets ",
+ "GROmerged ",
};
static int get_sset_count(struct net_device *dev, int sset)
@@ -922,6 +924,8 @@ struct queue_port_stats {
u64 rx_csum;
u64 vlan_ex;
u64 vlan_ins;
+ u64 gro_pkts;
+ u64 gro_merged;
};
static void collect_sge_port_stats(const struct adapter *adap,
@@ -938,6 +942,8 @@ static void collect_sge_port_stats(const struct adapter *adap,
s->rx_csum += rx->stats.rx_cso;
s->vlan_ex += rx->stats.vlan_ex;
s->vlan_ins += tx->vlan_ins;
+ s->gro_pkts += rx->stats.lro_pkts;
+ s->gro_merged += rx->stats.lro_merged;
}
}
@@ -1711,6 +1717,18 @@ static int set_tso(struct net_device *dev, u32 value)
return 0;
}
+static int set_flags(struct net_device *dev, u32 flags)
+{
+ if (flags & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (flags & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+ return 0;
+}
+
static struct ethtool_ops cxgb_ethtool_ops = {
.get_settings = get_settings,
.set_settings = set_settings,
@@ -1741,6 +1759,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
.get_wol = get_wol,
.set_wol = set_wol,
.set_tso = set_tso,
+ .set_flags = set_flags,
.flash_device = set_flash,
};
@@ -2308,6 +2327,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
register_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = true;
}
+
+ if (adap->flags & FULL_INIT_DONE)
+ ulds[uld].state_change(handle, CXGB4_STATE_UP);
}
static void attach_ulds(struct adapter *adap)
@@ -2414,23 +2436,17 @@ EXPORT_SYMBOL(cxgb4_unregister_uld);
*/
static int cxgb_up(struct adapter *adap)
{
- int err = 0;
+ int err;
- if (!(adap->flags & FULL_INIT_DONE)) {
- err = setup_sge_queues(adap);
- if (err)
- goto out;
- err = setup_rss(adap);
- if (err) {
- t4_free_sge_resources(adap);
- goto out;
- }
- if (adap->flags & USING_MSIX)
- name_msix_vecs(adap);
- adap->flags |= FULL_INIT_DONE;
- }
+ err = setup_sge_queues(adap);
+ if (err)
+ goto out;
+ err = setup_rss(adap);
+ if (err)
+ goto freeq;
if (adap->flags & USING_MSIX) {
+ name_msix_vecs(adap);
err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
adap->msix_info[0].desc, adap);
if (err)
@@ -2451,11 +2467,14 @@ static int cxgb_up(struct adapter *adap)
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
+ adap->flags |= FULL_INIT_DONE;
notify_ulds(adap, CXGB4_STATE_UP);
out:
return err;
irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
+ freeq:
+ t4_free_sge_resources(adap);
goto out;
}
@@ -2471,6 +2490,9 @@ static void cxgb_down(struct adapter *adapter)
} else
free_irq(adapter->pdev->irq, adapter);
quiesce_rx(adapter);
+ t4_sge_stop(adapter);
+ t4_free_sge_resources(adapter);
+ adapter->flags &= ~FULL_INIT_DONE;
}
/*
@@ -2482,11 +2504,13 @@ static int cxgb_open(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
- return err;
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ err = cxgb_up(adapter);
+ if (err < 0)
+ return err;
+ }
dev->real_num_tx_queues = pi->nqsets;
- set_bit(pi->tx_chan, &adapter->open_device_map);
link_start(dev);
netif_tx_start_all_queues(dev);
return 0;
@@ -2494,19 +2518,12 @@ static int cxgb_open(struct net_device *dev)
static int cxgb_close(struct net_device *dev)
{
- int ret;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
-
- clear_bit(pi->tx_chan, &adapter->open_device_map);
-
- if (!adapter->open_device_map)
- cxgb_down(adapter);
- return 0;
+ return t4_enable_vi(adapter, 0, pi->viid, false, false);
}
static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
@@ -2601,7 +2618,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
return -EINVAL;
- ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1,
+ ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1,
true);
if (!ret)
dev->mtu = new_mtu;
@@ -2632,7 +2649,8 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
struct port_info *pi = netdev_priv(dev);
pi->vlan_grp = grp;
- t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL);
+ t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL,
+ true);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3066,6 +3084,12 @@ static void __devinit print_port_info(struct adapter *adap)
int i;
char buf[80];
+ const char *spd = "";
+
+ if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
+ spd = " 2.5 GT/s";
+ else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
+ spd = " 5 GT/s";
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
@@ -3085,10 +3109,10 @@ static void __devinit print_port_info(struct adapter *adap)
--bufp;
sprintf(bufp, "BASE-%s", base[pi->port_type]);
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n",
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id, adap->params.rev,
buf, is_offload(adap) ? "R" : "",
- adap->params.pci.width,
+ adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
if (adap->name == dev->name)
@@ -3203,7 +3227,7 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_GRO | highdma;
+ netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
netdev->vlan_features = netdev->features & VLAN_FEAT;
@@ -3334,8 +3358,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
if (adapter->debugfs_root)
debugfs_remove_recursive(adapter->debugfs_root);
- t4_sge_stop(adapter);
- t4_free_sge_resources(adapter);
+ if (adapter->flags & FULL_INIT_DONE)
+ cxgb_down(adapter);
t4_free_mem(adapter->l2t);
t4_free_mem(adapter->tids.tid_tab);
disable_msi(adapter);
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 14adc58..d1f8f22 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1471,7 +1471,7 @@ EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
* Releases the pages of a packet gather list. We do not own the last
* page on the list and do not free it.
*/
-void t4_pktgl_free(const struct pkt_gl *gl)
+static void t4_pktgl_free(const struct pkt_gl *gl)
{
int n;
const skb_frag_t *p;
@@ -1524,6 +1524,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
+ if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
struct port_info *pi = netdev_priv(rxq->rspq.netdev);
@@ -1565,7 +1567,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
return handle_trace_pkt(q->adap, si);
- pkt = (void *)&rsp[1];
+ pkt = (const struct cpl_rx_pkt *)rsp;
csum_ok = pkt->csum_calc && !pkt->err_vec;
if ((pkt->l2info & htonl(RXF_TCP)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
@@ -1583,6 +1585,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
__skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
+ if (skb->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+
pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
@@ -2047,7 +2052,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
adap->sge.ingr_map[iq->cntxt_id] = iq;
if (fl) {
- fl->cntxt_id = htons(c.fl0id);
+ fl->cntxt_id = ntohs(c.fl0id);
fl->avail = fl->pend_cred = 0;
fl->pidx = fl->cidx = 0;
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index a814a3a..da272a9 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -53,8 +53,8 @@
* at the time it indicated completion is stored there. Returns 0 if the
* operation completes and -EAGAIN otherwise.
*/
-int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
- int polarity, int attempts, int delay, u32 *valp)
+static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
{
while (1) {
u32 val = t4_read_reg(adapter, reg);
@@ -109,9 +109,9 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, u32 *vals, unsigned int nregs,
- unsigned int start_idx)
+static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx);
@@ -120,6 +120,7 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
}
}
+#if 0
/**
* t4_write_indirect - write indirectly addressed registers
* @adap: the adapter
@@ -132,15 +133,16 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
* Writes a sequential block of registers that are accessed indirectly
* through an address/data register pair.
*/
-void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, const u32 *vals,
- unsigned int nregs, unsigned int start_idx)
+static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx++);
t4_write_reg(adap, data_reg, *vals++);
}
}
+#endif
/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
@@ -345,33 +347,21 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
return 0;
}
-#define VPD_ENTRY(name, len) \
- u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
-
/*
* Partial EEPROM Vital Product Data structure. Includes only the ID and
- * VPD-R sections.
+ * VPD-R header.
*/
-struct t4_vpd {
+struct t4_vpd_hdr {
u8 id_tag;
u8 id_len[2];
u8 id_data[ID_LEN];
u8 vpdr_tag;
u8 vpdr_len[2];
- VPD_ENTRY(pn, 16); /* part number */
- VPD_ENTRY(ec, EC_LEN); /* EC level */
- VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
- VPD_ENTRY(na, 12); /* MAC address base */
- VPD_ENTRY(port_type, 8); /* port types */
- VPD_ENTRY(gpio, 14); /* GPIO usage */
- VPD_ENTRY(cclk, 6); /* core clock */
- VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
- VPD_ENTRY(rv, 1); /* csum */
- u32 pad; /* for multiple-of-4 sizing and alignment */
};
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0
+#define VPD_LEN 512
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
@@ -396,16 +386,36 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
*/
static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int ret;
- struct t4_vpd vpd;
- u8 *q = (u8 *)&vpd, csum;
+ int i, ret;
+ int ec, sn, v2;
+ u8 vpd[VPD_LEN], csum;
+ unsigned int vpdr_len;
+ const struct t4_vpd_hdr *v;
- ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd);
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
if (ret < 0)
return ret;
- for (csum = 0; q <= vpd.rv_data; q++)
- csum += *q;
+ v = (const struct t4_vpd_hdr *)vpd;
+ vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
+ if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
+ dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
+ return -EINVAL;
+ }
+
+#define FIND_VPD_KW(var, name) do { \
+ var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
+ vpdr_len, name); \
+ if (var < 0) { \
+ dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
+ return -EINVAL; \
+ } \
+ var += PCI_VPD_INFO_FLD_HDR_SIZE; \
+} while (0)
+
+ FIND_VPD_KW(i, "RV");
+ for (csum = 0; i >= 0; i--)
+ csum += vpd[i];
if (csum) {
dev_err(adapter->pdev_dev,
@@ -413,12 +423,18 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
return -EINVAL;
}
- p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
- memcpy(p->id, vpd.id_data, sizeof(vpd.id_data));
+ FIND_VPD_KW(ec, "EC");
+ FIND_VPD_KW(sn, "SN");
+ FIND_VPD_KW(v2, "V2");
+#undef FIND_VPD_KW
+
+ p->cclk = simple_strtoul(vpd + v2, NULL, 10);
+ memcpy(p->id, v->id_data, ID_LEN);
strim(p->id);
- memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data));
+ memcpy(p->ec, vpd + ec, EC_LEN);
strim(p->ec);
- memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data));
+ i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
return 0;
}
@@ -537,8 +553,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
* (i.e., big-endian), otherwise as 32-bit words in the platform's
* natural endianess.
*/
-int t4_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented)
+static int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
{
int ret;
@@ -870,22 +886,6 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
-/**
- * t4_set_vlan_accel - configure HW VLAN extraction
- * @adap: the adapter
- * @ports: bitmap of adapter ports to operate on
- * @on: enable (1) or disable (0) HW VLAN extraction
- *
- * Enables or disables HW extraction of VLAN tags for the ports specified
- * by @ports. @ports is a bitmap with the ith bit designating the port
- * associated with the ith adapter channel.
- */
-void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
-{
- ports <<= VLANEXTENABLE_SHIFT;
- t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
-}
-
struct intr_info {
unsigned int mask; /* bits to check in interrupt status */
const char *msg; /* message to print or NULL */
@@ -2608,12 +2608,14 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
* @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
* @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
* @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Sets Rx properties of a virtual interface.
*/
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int mtu, int promisc, int all_multi, int bcast, bool sleep_ok)
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok)
{
struct fw_vi_rxmode_cmd c;
@@ -2626,15 +2628,18 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
if (bcast < 0)
bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+ if (vlanex < 0)
+ vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
c.retval_len16 = htonl(FW_LEN16(c));
- c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
- FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
- FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
- FW_VI_RXMODE_CMD_BROADCASTEN(bcast));
+ c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index fdb1174..7a981b8 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -503,6 +503,7 @@ struct cpl_rx_data_ack {
};
struct cpl_rx_pkt {
+ struct rss_header rsshdr;
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 3393d05..63991d6 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -876,7 +876,7 @@ struct fw_vi_mac_cmd {
struct fw_vi_rxmode_cmd {
__be32 op_to_viid;
__be32 retval_len16;
- __be32 mtu_to_broadcasten;
+ __be32 mtu_to_vlanexen;
__be32 r4_lo;
};
@@ -888,6 +888,8 @@ struct fw_vi_rxmode_cmd {
#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
+#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8)
struct fw_vi_enable_cmd {
__be32 op_to_viid;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2b8edd2..08e82b1 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -952,13 +952,14 @@ static void emac_dev_mcast_set(struct net_device *ndev)
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
}
if (!netdev_mc_empty(ndev)) {
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
+
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
/* program multicast address list into EMAC hardware */
- netdev_for_each_mc_addr(mc_ptr, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
emac_add_mcast(priv, EMAC_MULTICAST_ADD,
- (u8 *) mc_ptr->dmi_addr);
+ (u8 *) ha->addr);
}
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
@@ -1467,7 +1468,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_buf.length = skb->len;
tx_buf.buf_token = (void *)skb;
tx_buf.data_ptr = skb->data;
- ndev->trans_start = jiffies;
ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
if (unlikely(ret_code != 0)) {
if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index 6b13f4f..23a6539 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -166,8 +166,8 @@ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
- tickssofar = jiffies - dev->trans_start;
- if (tickssofar < 5)
+ tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/20)
return NETDEV_TX_BUSY;
/* else */
printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index a0a6830..f3650fd 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -535,7 +535,6 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
de620_write_block(dev, buffer, skb->len, len-skb->len);
- dev->trans_start = jiffies;
if(!(using_txbuf == (TXBF0 | TXBF1)))
netif_wake_queue(dev);
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 8cf3cc6..1d973db 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -874,7 +874,7 @@ static inline int lance_reset(struct net_device *dev)
lance_init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
@@ -930,7 +930,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -940,7 +939,7 @@ static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -959,8 +958,8 @@ static void lance_load_multicast(struct net_device *dev)
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -970,7 +969,6 @@ static void lance_load_multicast(struct net_device *dev)
crc = crc >> 26;
*lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
}
- return;
}
static void lance_set_multicast(struct net_device *dev)
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index ed53a8d..e5667c5 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -2195,7 +2195,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
int i; /* used as index in for loop */
- struct dev_mc_list *dmi; /* ptr to multicast addr entry */
+ struct netdev_hw_addr *ha;
/* Enable LLC frame promiscuous mode, if necessary */
@@ -2241,9 +2241,9 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
/* Copy addresses to multicast address table, then update adapter CAM */
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
- dmi->dmi_addr, FDDI_K_ALEN);
+ ha->addr, FDDI_K_ALEN);
if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
{
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 744c192..bf66e9b 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -921,7 +921,7 @@ static void depca_tx_timeout(struct net_device *dev)
STOP_DEPCA;
depca_init_ring(dev);
LoadCSRs(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
InitRestartDepca(dev);
}
@@ -954,7 +954,6 @@ static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
outw(CSR0, DEPCA_ADDR);
outw(INEA | TDMD, DEPCA_DATA);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
}
if (TX_BUFFS_AVAIL)
@@ -1204,8 +1203,6 @@ static void LoadCSRs(struct net_device *dev)
outw(ACON, DEPCA_DATA);
outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
-
- return;
}
static int InitRestartDepca(struct net_device *dev)
@@ -1272,7 +1269,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i, j, bit, byte;
u16 hashcode;
@@ -1287,8 +1284,8 @@ static void SetMulticastFilter(struct net_device *dev)
lp->init_block.mcast_table[i] = 0;
}
/* Add multicast addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc(ETH_ALEN, addrs);
hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
@@ -1303,8 +1300,6 @@ static void SetMulticastFilter(struct net_device *dev)
}
}
}
-
- return;
}
static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
@@ -1909,8 +1904,6 @@ static void depca_dbg_open(struct net_device *dev)
outw(CSR3, DEPCA_ADDR);
printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
}
-
- return;
}
/*
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index b05bad8..a2f238d 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -596,8 +596,6 @@ alloc_list (struct net_device *dev)
/* Set RFDListPtr */
writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
writel (0, dev->base_addr + RFDListPtr1);
-
- return;
}
static netdev_tx_t
@@ -1132,14 +1130,14 @@ set_multicast (struct net_device *dev)
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int bit, index = 0;
- int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ int crc = ether_crc_le(ETH_ALEN, ha->addr);
/* The inverted high significant 6 bits of CRC are
used as an index to hashtable */
for (bit = 0; bit < 6; bit++)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 7f9960f..abcc838 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -476,17 +476,13 @@ static uint32_t dm9000_get_rx_csum(struct net_device *dev)
return dm->rx_csum;
}
-static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
+static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
{
board_info_t *dm = to_dm9000_board(dev);
- unsigned long flags;
if (dm->can_csum) {
dm->rx_csum = data;
-
- spin_lock_irqsave(&dm->lock, flags);
iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
- spin_unlock_irqrestore(&dm->lock, flags);
return 0;
}
@@ -494,6 +490,19 @@ static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
return -EOPNOTSUPP;
}
+static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dm->lock, flags);
+ ret = dm9000_set_rx_csum_unlocked(dev, data);
+ spin_unlock_irqrestore(&dm->lock, flags);
+
+ return ret;
+}
+
static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
{
board_info_t *dm = to_dm9000_board(dev);
@@ -722,20 +731,17 @@ static unsigned char dm9000_type_to_char(enum dm9000_type type)
* Set DM9000 multicast address
*/
static void
-dm9000_hash_table(struct net_device *dev)
+dm9000_hash_table_unlocked(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
int i, oft;
u32 hash_val;
u16 hash_table[4];
u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
- unsigned long flags;
dm9000_dbg(db, 1, "entering %s\n", __func__);
- spin_lock_irqsave(&db->lock, flags);
-
for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
iow(db, oft, dev->dev_addr[i]);
@@ -753,8 +759,8 @@ dm9000_hash_table(struct net_device *dev)
rcr |= RCR_ALL;
/* the multicast address in Hash Table : 64 bits */
- netdev_for_each_mc_addr(mcptr, dev) {
- hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ hash_val = ether_crc_le(6, ha->addr) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -765,11 +771,21 @@ dm9000_hash_table(struct net_device *dev)
}
iow(db, DM9000_RCR, rcr);
+}
+
+static void
+dm9000_hash_table(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&db->lock, flags);
+ dm9000_hash_table_unlocked(dev);
spin_unlock_irqrestore(&db->lock, flags);
}
/*
- * Initilize dm9000 board
+ * Initialize dm9000 board
*/
static void
dm9000_init_dm9000(struct net_device *dev)
@@ -784,7 +800,7 @@ dm9000_init_dm9000(struct net_device *dev)
db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
/* Checksum mode */
- dm9000_set_rx_csum(dev, db->rx_csum);
+ dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
/* GPIO0 on pre-activate PHY */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
@@ -811,7 +827,7 @@ dm9000_init_dm9000(struct net_device *dev)
iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
/* Set address filter table */
- dm9000_hash_table(dev);
+ dm9000_hash_table_unlocked(dev);
imr = IMR_PAR | IMR_PTM | IMR_PRM;
if (db->type != TYPE_DM9000E)
@@ -825,7 +841,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
- dev->trans_start = 0;
+ dev->trans_start = jiffies;
}
/* Our watchdog timed out. Called by the networking layer */
@@ -843,7 +859,7 @@ static void dm9000_timeout(struct net_device *dev)
dm9000_reset(db);
dm9000_init_dm9000(dev);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 2346852..8b0f50b 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -594,8 +594,6 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -918,7 +916,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
bp->regs, mem_base, dev->irq, dev->dev_addr);
- dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma \n",
+ dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
(bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
(bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
(bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 7910803..b194bad 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -147,6 +147,8 @@
* - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -175,7 +177,6 @@
#define DRV_VERSION "3.5.24-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
-#define PFX DRV_NAME ": "
#define E100_WATCHDOG_PERIOD (2 * HZ)
#define E100_NAPI_WEIGHT 16
@@ -201,10 +202,6 @@ module_param(use_io, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
- __func__ , ## args))
#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
@@ -690,12 +687,13 @@ static int e100_self_test(struct nic *nic)
/* Check results of self-test */
if (nic->mem->selftest.result != 0) {
- DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
- nic->mem->selftest.result);
+ netif_err(nic, hw, nic->netdev,
+ "Self-test failed: result=0x%08X\n",
+ nic->mem->selftest.result);
return -ETIMEDOUT;
}
if (nic->mem->selftest.signature == 0) {
- DPRINTK(HW, ERR, "Self-test failed: timed out\n");
+ netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
return -ETIMEDOUT;
}
@@ -798,7 +796,7 @@ static int e100_eeprom_load(struct nic *nic)
/* The checksum, stored in the last word, is calculated such that
* the sum of words should be 0xBABA */
if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
- DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
+ netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
if (!eeprom_bad_csum_allow)
return -EAGAIN;
}
@@ -954,8 +952,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
udelay(20);
}
if (unlikely(!i)) {
- printk("e100.mdio_ctrl(%s) won't go Ready\n",
- nic->netdev->name );
+ netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
spin_unlock_irqrestore(&nic->mdio_lock, flags);
return 0; /* No way to indicate timeout error */
}
@@ -967,9 +964,10 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
break;
}
spin_unlock_irqrestore(&nic->mdio_lock, flags);
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data, data_out);
return (u16)data_out;
}
@@ -1029,17 +1027,19 @@ static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
return ADVERTISE_10HALF |
ADVERTISE_10FULL;
default:
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data);
return 0xFFFF;
}
} else {
switch (reg) {
default:
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data);
return 0xFFFF;
}
}
@@ -1156,12 +1156,15 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
}
}
- DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
- DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
- DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
}
/*************************************************************************
@@ -1254,16 +1257,18 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
err = request_firmware(&fw, fw_name, &nic->pdev->dev);
if (err) {
- DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
- fw_name, err);
+ netif_err(nic, probe, nic->netdev,
+ "Failed to load firmware \"%s\": %d\n",
+ fw_name, err);
return ERR_PTR(err);
}
/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
if (fw->size != UCODE_SIZE * 4 + 3) {
- DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
- fw_name, fw->size);
+ netif_err(nic, probe, nic->netdev,
+ "Firmware \"%s\" has wrong size %zu\n",
+ fw_name, fw->size);
release_firmware(fw);
return ERR_PTR(-EINVAL);
}
@@ -1275,9 +1280,9 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
min_size >= UCODE_SIZE) {
- DPRINTK(PROBE, ERR,
- "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
- fw_name, timer, bundle, min_size);
+ netif_err(nic, probe, nic->netdev,
+ "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
+ fw_name, timer, bundle, min_size);
release_firmware(fw);
return ERR_PTR(-EINVAL);
}
@@ -1329,7 +1334,8 @@ static inline int e100_load_ucode_wait(struct nic *nic)
return PTR_ERR(fw);
if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
- DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
+ netif_err(nic, probe, nic->netdev,
+ "ucode cmd failed with error %d\n", err);
/* must restart cuc */
nic->cuc_cmd = cuc_start;
@@ -1349,7 +1355,7 @@ static inline int e100_load_ucode_wait(struct nic *nic)
/* if the command failed, or is not OK, notify and return */
if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
- DPRINTK(PROBE,ERR, "ucode load failed\n");
+ netif_err(nic, probe, nic->netdev, "ucode load failed\n");
err = -EPERM;
}
@@ -1387,8 +1393,8 @@ static int e100_phy_check_without_mii(struct nic *nic)
* media is sensed automatically based on how the link partner
* is configured. This is, in essence, manual configuration.
*/
- DPRINTK(PROBE, INFO,
- "found MII-less i82503 or 80c24 or other PHY\n");
+ netif_info(nic, probe, nic->netdev,
+ "found MII-less i82503 or 80c24 or other PHY\n");
nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
@@ -1435,18 +1441,20 @@ static int e100_phy_init(struct nic *nic)
return 0; /* simply return and hope for the best */
else {
/* for unknown cases log a fatal error */
- DPRINTK(HW, ERR,
- "Failed to locate any known PHY, aborting.\n");
+ netif_err(nic, hw, nic->netdev,
+ "Failed to locate any known PHY, aborting\n");
return -EAGAIN;
}
} else
- DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "phy_addr = %d\n", nic->mii.phy_id);
/* Get phy ID */
id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
nic->phy = (u32)id_hi << 16 | (u32)id_lo;
- DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "phy ID = 0x%08X\n", nic->phy);
/* Select the phy and isolate the rest */
for (addr = 0; addr < 32; addr++) {
@@ -1508,7 +1516,7 @@ static int e100_hw_init(struct nic *nic)
e100_hw_reset(nic);
- DPRINTK(HW, ERR, "e100_hw_init\n");
+ netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
if (!in_interrupt() && (err = e100_self_test(nic)))
return err;
@@ -1538,16 +1546,16 @@ static int e100_hw_init(struct nic *nic)
static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
cb->command = cpu_to_le16(cb_multi);
cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
i = 0;
- netdev_for_each_mc_addr(list, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == count)
break;
- memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr,
+ memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
ETH_ALEN);
}
}
@@ -1556,8 +1564,9 @@ static void e100_set_multicast_list(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
- DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
- netdev_mc_count(netdev), netdev->flags);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "mc_count=%d, flags=0x%04X\n",
+ netdev_mc_count(netdev), netdev->flags);
if (netdev->flags & IFF_PROMISC)
nic->flags |= promiscuous;
@@ -1630,7 +1639,8 @@ static void e100_update_stats(struct nic *nic)
if (e100_exec_cmd(nic, cuc_dump_reset, 0))
- DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "exec cuc_dump_reset failed\n");
}
static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
@@ -1660,20 +1670,19 @@ static void e100_watchdog(unsigned long data)
struct nic *nic = (struct nic *)data;
struct ethtool_cmd cmd;
- DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
+ netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
+ "right now = %ld\n", jiffies);
/* mii library handles link maintenance tasks */
mii_ethtool_gset(&nic->mii, &cmd);
if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
- printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
- nic->netdev->name,
- cmd.speed == SPEED_100 ? "100" : "10",
- cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
+ netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
+ cmd.speed == SPEED_100 ? 100 : 10,
+ cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
- printk(KERN_INFO "e100: %s NIC Link is Down\n",
- nic->netdev->name);
+ netdev_info(nic->netdev, "NIC Link is Down\n");
}
mii_check_link(&nic->mii);
@@ -1733,7 +1742,8 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
Issue a NOP command followed by a 1us delay before
issuing the Tx command. */
if (e100_exec_cmd(nic, cuc_nop, 0))
- DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "exec cuc_nop failed\n");
udelay(1);
}
@@ -1742,17 +1752,18 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
switch (err) {
case -ENOSPC:
/* We queued the skb, but now we're out of space. */
- DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "No space for CB\n");
netif_stop_queue(netdev);
break;
case -ENOMEM:
/* This is a hard error - log it. */
- DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "Out of Tx resources, returning skb\n");
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
}
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -1768,9 +1779,10 @@ static int e100_tx_clean(struct nic *nic)
for (cb = nic->cb_to_clean;
cb->status & cpu_to_le16(cb_complete);
cb = nic->cb_to_clean = cb->next) {
- DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
- (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
- cb->status);
+ netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
+ "cb[%d]->status = 0x%04X\n",
+ (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
+ cb->status);
if (likely(cb->skb != NULL)) {
dev->stats.tx_packets++;
@@ -1913,7 +1925,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
rfd_status = le16_to_cpu(rfd->status);
- DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
+ netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
+ "status=0x%04X\n", rfd_status);
/* If data isn't ready, nothing to indicate */
if (unlikely(!(rfd_status & cb_complete))) {
@@ -2124,7 +2137,8 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
struct nic *nic = netdev_priv(netdev);
u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
- DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
+ netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
+ "stat_ack = 0x%02X\n", stat_ack);
if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
stat_ack == stat_ack_not_present) /* Hardware is ejected */
@@ -2264,8 +2278,8 @@ static void e100_tx_timeout_task(struct work_struct *work)
struct nic *nic = container_of(work, struct nic, tx_timeout_task);
struct net_device *netdev = nic->netdev;
- DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
- ioread8(&nic->csr->scb.status));
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
rtnl_lock();
if (netif_running(netdev)) {
@@ -2532,8 +2546,8 @@ static int e100_set_ringparam(struct net_device *netdev,
rfds->count = min(rfds->count, rfds->max);
cbs->count = max(ring->tx_pending, cbs->min);
cbs->count = min(cbs->count, cbs->max);
- DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
- rfds->count, cbs->count);
+ netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
+ rfds->count, cbs->count);
if (netif_running(netdev))
e100_up(nic);
@@ -2710,7 +2724,7 @@ static int e100_open(struct net_device *netdev)
netif_carrier_off(netdev);
if ((err = e100_up(nic)))
- DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
+ netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
return err;
}
@@ -2744,7 +2758,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
if (((1 << debug) - 1) & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
return -ENOMEM;
}
@@ -2762,35 +2776,34 @@ static int __devinit e100_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, netdev);
if ((err = pci_enable_device(pdev))) {
- DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
goto err_out_free_dev;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
- "base address, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
if ((err = pci_request_regions(pdev, DRV_NAME))) {
- DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
goto err_out_free_res;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
if (use_io)
- DPRINTK(PROBE, INFO, "using i/o access mode\n");
+ netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
if (!nic->csr) {
- DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -2824,7 +2837,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
if ((err = e100_alloc(nic))) {
- DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
goto err_out_iounmap;
}
@@ -2837,13 +2850,11 @@ static int __devinit e100_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
if (!is_valid_ether_addr(netdev->perm_addr)) {
if (!eeprom_bad_csum_allow) {
- DPRINTK(PROBE, ERR, "Invalid MAC address from "
- "EEPROM, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
err = -EAGAIN;
goto err_out_free;
} else {
- DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
- "you MUST configure one.\n");
+ netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
}
}
@@ -2859,7 +2870,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
strcpy(netdev->name, "eth%d");
if ((err = register_netdev(netdev))) {
- DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
goto err_out_free;
}
nic->cbs_pool = pci_pool_create(netdev->name,
@@ -2867,9 +2878,10 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->params.cbs.max * sizeof(struct cb),
sizeof(u32),
0);
- DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
- (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
- pdev->irq, netdev->dev_addr);
+ netif_info(nic, probe, nic->netdev,
+ "addr 0x%llx, irq %d, MAC addr %pM\n",
+ (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
+ pdev->irq, netdev->dev_addr);
return 0;
@@ -3027,7 +3039,7 @@ static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
struct nic *nic = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
+ pr_err("Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -3086,8 +3098,8 @@ static struct pci_driver e100_driver = {
static int __init e100_init_module(void)
{
if (((1 << debug) - 1) & NETIF_MSG_DRV) {
- printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
- printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
+ pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ pr_info("%s\n", DRV_COPYRIGHT);
}
return pci_register_driver(&e100_driver);
}
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2f29c21..40b62b4 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -81,23 +81,6 @@ struct e1000_adapter;
#include "e1000_hw.h"
-#ifdef DBG
-#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
-#else
-#define E1000_DBG(args...)
-#endif
-
-#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
-
-#define PFX "e1000: "
-
-#define DPRINTK(nlevel, klevel, fmt, args...) \
-do { \
- if (NETIF_MSG_##nlevel & adapter->msg_enable) \
- printk(KERN_##klevel PFX "%s: %s: " fmt, \
- adapter->netdev->name, __func__, ##args); \
-} while (0)
-
#define E1000_MAX_INTR 10
/* TX/RX descriptor defines */
@@ -335,6 +318,25 @@ enum e1000_state_t {
__E1000_DOWN
};
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+#define e_dbg(format, arg...) \
+ netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
+#define e_err(format, arg...) \
+ netdev_err(adapter->netdev, format, ## arg)
+#define e_info(format, arg...) \
+ netdev_info(adapter->netdev, format, ## arg)
+#define e_warn(format, arg...) \
+ netdev_warn(adapter->netdev, format, ## arg)
+#define e_notice(format, arg...) \
+ netdev_notice(adapter->netdev, format, ## arg)
+#define e_dev_info(format, arg...) \
+ dev_info(&adapter->pdev->dev, format, ## arg)
+#define e_dev_warn(format, arg...) \
+ dev_warn(&adapter->pdev->dev, format, ## arg)
+
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
@@ -352,5 +354,6 @@ extern bool e1000_has_link(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
+extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c67e931..d5ff029 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -346,7 +346,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
netdev->features &= ~NETIF_F_TSO6;
- DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+ e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->tso_force = true;
return 0;
}
@@ -714,9 +714,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
writel(write & test[i], address);
read = readl(address);
if (read != (write & test[i] & mask)) {
- DPRINTK(DRV, ERR, "pattern test reg %04X failed: "
- "got 0x%08X expected 0x%08X\n",
- reg, read, (write & test[i] & mask));
+ e_info("pattern test reg %04X failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, read, (write & test[i] & mask));
*data = reg;
return true;
}
@@ -734,9 +734,9 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
writel(write & mask, address);
read = readl(address);
if ((read & mask) != (write & mask)) {
- DPRINTK(DRV, ERR, "set/check reg %04X test failed: "
- "got 0x%08X expected 0x%08X\n",
- reg, (read & mask), (write & mask));
+ e_err("set/check reg %04X test failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, (read & mask), (write & mask));
*data = reg;
return true;
}
@@ -779,8 +779,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
ew32(STATUS, toggle);
after = er32(STATUS) & toggle;
if (value != after) {
- DPRINTK(DRV, ERR, "failed STATUS register test got: "
- "0x%08X expected: 0x%08X\n", after, value);
+ e_err("failed STATUS register test got: "
+ "0x%08X expected: 0x%08X\n", after, value);
*data = 1;
return 1;
}
@@ -894,8 +894,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 1;
return -1;
}
- DPRINTK(HW, INFO, "testing %s interrupt\n",
- (shared_int ? "shared" : "unshared"));
+ e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
@@ -980,9 +979,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (txdr->desc && txdr->buffer_info) {
for (i = 0; i < txdr->count; i++) {
if (txdr->buffer_info[i].dma)
- pci_unmap_single(pdev, txdr->buffer_info[i].dma,
+ dma_unmap_single(&pdev->dev,
+ txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (txdr->buffer_info[i].skb)
dev_kfree_skb(txdr->buffer_info[i].skb);
}
@@ -991,20 +991,23 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rxdr->desc && rxdr->buffer_info) {
for (i = 0; i < rxdr->count; i++) {
if (rxdr->buffer_info[i].dma)
- pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
+ dma_unmap_single(&pdev->dev,
+ rxdr->buffer_info[i].dma,
rxdr->buffer_info[i].length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rxdr->buffer_info[i].skb)
dev_kfree_skb(rxdr->buffer_info[i].skb);
}
}
if (txdr->desc) {
- pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
+ dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+ txdr->dma);
txdr->desc = NULL;
}
if (rxdr->desc) {
- pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
+ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+ rxdr->dma);
rxdr->desc = NULL;
}
@@ -1012,8 +1015,6 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info = NULL;
kfree(rxdr->buffer_info);
rxdr->buffer_info = NULL;
-
- return;
}
static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
@@ -1039,7 +1040,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
@@ -1070,8 +1072,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].skb = skb;
txdr->buffer_info[i].length = skb->len;
txdr->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1093,7 +1095,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 5;
goto err_nomem;
@@ -1126,8 +1129,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info[i].skb = skb;
rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
rxdr->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ dma_map_single(&pdev->dev, skb->data,
+ E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}
@@ -1444,10 +1447,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
- txdr->buffer_info[k].dma,
- txdr->buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(&pdev->dev,
+ txdr->buffer_info[k].dma,
+ txdr->buffer_info[k].length,
+ DMA_TO_DEVICE);
if (unlikely(++k == txdr->count)) k = 0;
}
ew32(TDT, k);
@@ -1455,10 +1458,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
- rxdr->buffer_info[l].dma,
- rxdr->buffer_info[l].length,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev,
+ rxdr->buffer_info[l].dma,
+ rxdr->buffer_info[l].length,
+ DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
@@ -1558,7 +1561,7 @@ static void e1000_diag_test(struct net_device *netdev,
u8 forced_speed_duplex = hw->forced_speed_duplex;
u8 autoneg = hw->autoneg;
- DPRINTK(HW, INFO, "offline testing starting\n");
+ e_info("offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
@@ -1598,7 +1601,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
- DPRINTK(HW, INFO, "online testing starting\n");
+ e_info("online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1691,7 +1694,7 @@ static void e1000_get_wol(struct net_device *netdev,
wol->supported &= ~WAKE_UCAST;
if (adapter->wol & E1000_WUFC_EX)
- DPRINTK(DRV, ERR, "Interface does not support "
+ e_err("Interface does not support "
"directed (unicast) frame wake-up packets\n");
break;
default:
@@ -1706,8 +1709,6 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
-
- return;
}
static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -1725,8 +1726,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
if (wol->wolopts & WAKE_UCAST) {
- DPRINTK(DRV, ERR, "Interface does not support "
- "directed (unicast) frame wake-up packets\n");
+ e_err("Interface does not support "
+ "directed (unicast) frame wake-up packets\n");
return -EOPNOTSUPP;
}
break;
@@ -1803,7 +1804,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
if (adapter->hw.mac_type < e1000_82545)
return -EOPNOTSUPP;
- if (adapter->itr_setting <= 3)
+ if (adapter->itr_setting <= 4)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1821,12 +1822,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
return -EOPNOTSUPP;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
+ ((ec->rx_coalesce_usecs > 4) &&
(ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
- if (ec->rx_coalesce_usecs <= 3) {
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8d7d87f..c7e242b6 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -30,7 +30,7 @@
* Shared functions for accessing and configuring the MAC
*/
-#include "e1000_hw.h"
+#include "e1000.h"
static s32 e1000_check_downshift(struct e1000_hw *hw);
static s32 e1000_check_polarity(struct e1000_hw *hw,
@@ -114,7 +114,7 @@ static DEFINE_SPINLOCK(e1000_eeprom_lock);
*/
static s32 e1000_set_phy_type(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_set_phy_type");
+ e_dbg("e1000_set_phy_type");
if (hw->mac_type == e1000_undefined)
return -E1000_ERR_PHY_TYPE;
@@ -152,7 +152,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
u32 ret_val;
u16 phy_saved_data;
- DEBUGFUNC("e1000_phy_init_script");
+ e_dbg("e1000_phy_init_script");
if (hw->phy_init_script) {
msleep(20);
@@ -245,7 +245,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
*/
s32 e1000_set_mac_type(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_set_mac_type");
+ e_dbg("e1000_set_mac_type");
switch (hw->device_id) {
case E1000_DEV_ID_82542:
@@ -354,7 +354,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
{
u32 status;
- DEBUGFUNC("e1000_set_media_type");
+ e_dbg("e1000_set_media_type");
if (hw->mac_type != e1000_82543) {
/* tbi_compatibility is only valid on 82543 */
@@ -401,16 +401,16 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
u32 led_ctrl;
s32 ret_val;
- DEBUGFUNC("e1000_reset_hw");
+ e_dbg("e1000_reset_hw");
/* For 82542 (rev 2.0), disable MWI before issuing a device reset */
if (hw->mac_type == e1000_82542_rev2_0) {
- DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
}
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
/* Disable the Transmit and Receive units. Then delay to allow
@@ -442,7 +442,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ e_dbg("Issuing a global reset to MAC\n");
switch (hw->mac_type) {
case e1000_82544:
@@ -516,7 +516,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
}
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
/* Clear any pending interrupt events. */
@@ -549,12 +549,12 @@ s32 e1000_init_hw(struct e1000_hw *hw)
u32 mta_size;
u32 ctrl_ext;
- DEBUGFUNC("e1000_init_hw");
+ e_dbg("e1000_init_hw");
/* Initialize Identification LED */
ret_val = e1000_id_led_init(hw);
if (ret_val) {
- DEBUGOUT("Error Initializing Identification LED\n");
+ e_dbg("Error Initializing Identification LED\n");
return ret_val;
}
@@ -562,14 +562,14 @@ s32 e1000_init_hw(struct e1000_hw *hw)
e1000_set_media_type(hw);
/* Disabling VLAN filtering. */
- DEBUGOUT("Initializing the IEEE VLAN\n");
+ e_dbg("Initializing the IEEE VLAN\n");
if (hw->mac_type < e1000_82545_rev_3)
ew32(VET, 0);
e1000_clear_vfta(hw);
/* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
if (hw->mac_type == e1000_82542_rev2_0) {
- DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
ew32(RCTL, E1000_RCTL_RST);
E1000_WRITE_FLUSH();
@@ -591,7 +591,7 @@ s32 e1000_init_hw(struct e1000_hw *hw)
}
/* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
+ e_dbg("Zeroing the MTA\n");
mta_size = E1000_MC_TBL_SIZE;
for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
@@ -662,7 +662,7 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
u16 eeprom_data;
s32 ret_val;
- DEBUGFUNC("e1000_adjust_serdes_amplitude");
+ e_dbg("e1000_adjust_serdes_amplitude");
if (hw->media_type != e1000_media_type_internal_serdes)
return E1000_SUCCESS;
@@ -709,7 +709,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- DEBUGFUNC("e1000_setup_link");
+ e_dbg("e1000_setup_link");
/* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
@@ -723,7 +723,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1, &eeprom_data);
if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
@@ -747,7 +747,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
hw->original_fc = hw->fc;
- DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc);
/* Take the 4 bits from EEPROM word 0x0F that determine the initial
* polarity value for the SW controlled pins, and setup the
@@ -760,7 +760,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1, &eeprom_data);
if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
@@ -777,8 +777,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
* control is disabled, because it does not hurt anything to
* initialize these registers.
*/
- DEBUGOUT
- ("Initializing the Flow Control address, type and timer regs\n");
+ e_dbg("Initializing the Flow Control address, type and timer regs\n");
ew32(FCT, FLOW_CONTROL_TYPE);
ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
@@ -827,7 +826,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
u32 signal = 0;
s32 ret_val;
- DEBUGFUNC("e1000_setup_fiber_serdes_link");
+ e_dbg("e1000_setup_fiber_serdes_link");
/* On adapters with a MAC newer than 82544, SWDP 1 will be
* set when the optics detect a signal. On older adapters, it will be
@@ -893,7 +892,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
break;
}
@@ -904,7 +903,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
* link-up status bit will be set and the flow control enable bits (RFCE
* and TFCE) will be set according to their negotiated value.
*/
- DEBUGOUT("Auto-negotiation enabled\n");
+ e_dbg("Auto-negotiation enabled\n");
ew32(TXCW, txcw);
ew32(CTRL, ctrl);
@@ -921,7 +920,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
- DEBUGOUT("Looking for Link\n");
+ e_dbg("Looking for Link\n");
for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
msleep(10);
status = er32(STATUS);
@@ -929,7 +928,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
break;
}
if (i == (LINK_UP_TIMEOUT / 10)) {
- DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ e_dbg("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
* e1000_check_for_link. This routine will force the link up if
@@ -938,16 +937,16 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
*/
ret_val = e1000_check_for_link(hw);
if (ret_val) {
- DEBUGOUT("Error while checking for link\n");
+ e_dbg("Error while checking for link\n");
return ret_val;
}
hw->autoneg_failed = 0;
} else {
hw->autoneg_failed = 0;
- DEBUGOUT("Valid Link Found\n");
+ e_dbg("Valid Link Found\n");
}
} else {
- DEBUGOUT("No Signal Detected\n");
+ e_dbg("No Signal Detected\n");
}
return E1000_SUCCESS;
}
@@ -964,7 +963,7 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_preconfig");
+ e_dbg("e1000_copper_link_preconfig");
ctrl = er32(CTRL);
/* With 82543, we need to force speed and duplex on the MAC equal to what
@@ -987,10 +986,10 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
/* Make sure we have a valid PHY */
ret_val = e1000_detect_gig_phy(hw);
if (ret_val) {
- DEBUGOUT("Error, did not detect valid phy.\n");
+ e_dbg("Error, did not detect valid phy.\n");
return ret_val;
}
- DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+ e_dbg("Phy ID = %x\n", hw->phy_id);
/* Set PHY to class A mode (if necessary) */
ret_val = e1000_set_phy_mode(hw);
@@ -1025,14 +1024,14 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_igp_setup");
+ e_dbg("e1000_copper_link_igp_setup");
if (hw->phy_reset_disable)
return E1000_SUCCESS;
ret_val = e1000_phy_reset(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
+ e_dbg("Error Resetting the PHY\n");
return ret_val;
}
@@ -1049,7 +1048,7 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* disable lplu d3 during driver init */
ret_val = e1000_set_d3_lplu_state(hw, false);
if (ret_val) {
- DEBUGOUT("Error Disabling LPLU D3\n");
+ e_dbg("Error Disabling LPLU D3\n");
return ret_val;
}
}
@@ -1166,7 +1165,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_mgp_setup");
+ e_dbg("e1000_copper_link_mgp_setup");
if (hw->phy_reset_disable)
return E1000_SUCCESS;
@@ -1255,7 +1254,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
/* SW Reset the PHY so all changes take effect */
ret_val = e1000_phy_reset(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
+ e_dbg("Error Resetting the PHY\n");
return ret_val;
}
@@ -1274,7 +1273,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_autoneg");
+ e_dbg("e1000_copper_link_autoneg");
/* Perform some bounds checking on the hw->autoneg_advertised
* parameter. If this variable is zero, then set it to the default.
@@ -1287,13 +1286,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ e_dbg("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw);
if (ret_val) {
- DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ e_dbg("Error Setting up Auto-Negotiation\n");
return ret_val;
}
- DEBUGOUT("Restarting Auto-Neg\n");
+ e_dbg("Restarting Auto-Neg\n");
/* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
@@ -1313,7 +1312,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->wait_autoneg_complete) {
ret_val = e1000_wait_autoneg(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error while waiting for autoneg to complete\n");
return ret_val;
}
@@ -1340,20 +1339,20 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
{
s32 ret_val;
- DEBUGFUNC("e1000_copper_link_postconfig");
+ e_dbg("e1000_copper_link_postconfig");
if (hw->mac_type >= e1000_82544) {
e1000_config_collision_dist(hw);
} else {
ret_val = e1000_config_mac_to_phy(hw);
if (ret_val) {
- DEBUGOUT("Error configuring MAC to PHY settings\n");
+ e_dbg("Error configuring MAC to PHY settings\n");
return ret_val;
}
}
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error Configuring Flow Control\n");
+ e_dbg("Error Configuring Flow Control\n");
return ret_val;
}
@@ -1361,7 +1360,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_config_dsp_after_link_change(hw, true);
if (ret_val) {
- DEBUGOUT("Error Configuring DSP after link up\n");
+ e_dbg("Error Configuring DSP after link up\n");
return ret_val;
}
}
@@ -1381,7 +1380,7 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- DEBUGFUNC("e1000_setup_copper_link");
+ e_dbg("e1000_setup_copper_link");
/* Check if it is a valid PHY and set PHY mode if necessary. */
ret_val = e1000_copper_link_preconfig(hw);
@@ -1407,10 +1406,10 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
* depending on value from forced_speed_duplex. */
- DEBUGOUT("Forcing speed and duplex\n");
+ e_dbg("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) {
- DEBUGOUT("Error Forcing Speed and Duplex\n");
+ e_dbg("Error Forcing Speed and Duplex\n");
return ret_val;
}
}
@@ -1432,13 +1431,13 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT("Valid link established!!!\n");
+ e_dbg("Valid link established!!!\n");
return E1000_SUCCESS;
}
udelay(10);
}
- DEBUGOUT("Unable to establish link!!!\n");
+ e_dbg("Unable to establish link!!!\n");
return E1000_SUCCESS;
}
@@ -1454,7 +1453,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
- DEBUGFUNC("e1000_phy_setup_autoneg");
+ e_dbg("e1000_phy_setup_autoneg");
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
@@ -1481,41 +1480,41 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
- DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+ e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
- DEBUGOUT("Advertise 10mb Half duplex\n");
+ e_dbg("Advertise 10mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
}
/* Do we want to advertise 10 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
- DEBUGOUT("Advertise 10mb Full duplex\n");
+ e_dbg("Advertise 10mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
}
/* Do we want to advertise 100 Mb Half Duplex? */
if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
- DEBUGOUT("Advertise 100mb Half duplex\n");
+ e_dbg("Advertise 100mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
}
/* Do we want to advertise 100 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
- DEBUGOUT("Advertise 100mb Full duplex\n");
+ e_dbg("Advertise 100mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
- DEBUGOUT
+ e_dbg
("Advertise 1000mb Half duplex requested, request denied!\n");
}
/* Do we want to advertise 1000 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
- DEBUGOUT("Advertise 1000mb Full duplex\n");
+ e_dbg("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
@@ -1568,7 +1567,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
@@ -1576,7 +1575,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+ e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
if (ret_val)
@@ -1600,12 +1599,12 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
u16 phy_data;
u16 i;
- DEBUGFUNC("e1000_phy_force_speed_duplex");
+ e_dbg("e1000_phy_force_speed_duplex");
/* Turn off Flow control if we are forcing speed and duplex. */
hw->fc = E1000_FC_NONE;
- DEBUGOUT1("hw->fc = %d\n", hw->fc);
+ e_dbg("hw->fc = %d\n", hw->fc);
/* Read the Device Control Register. */
ctrl = er32(CTRL);
@@ -1634,14 +1633,14 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
*/
ctrl |= E1000_CTRL_FD;
mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
+ e_dbg("Full Duplex\n");
} else {
/* We want to force half duplex so we CLEAR the full duplex bits in
* the Device and MII Control Registers.
*/
ctrl &= ~E1000_CTRL_FD;
mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
- DEBUGOUT("Half Duplex\n");
+ e_dbg("Half Duplex\n");
}
/* Are we forcing 100Mbps??? */
@@ -1651,13 +1650,13 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
ctrl |= E1000_CTRL_SPD_100;
mii_ctrl_reg |= MII_CR_SPEED_100;
mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
- DEBUGOUT("Forcing 100mb ");
+ e_dbg("Forcing 100mb ");
} else {
/* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
mii_ctrl_reg |= MII_CR_SPEED_10;
mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
- DEBUGOUT("Forcing 10mb ");
+ e_dbg("Forcing 10mb ");
}
e1000_config_collision_dist(hw);
@@ -1680,7 +1679,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+ e_dbg("M88E1000 PSCR: %x\n", phy_data);
/* Need to reset the PHY or these changes will be ignored */
mii_ctrl_reg |= MII_CR_RESET;
@@ -1720,7 +1719,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
*/
if (hw->wait_autoneg_complete) {
/* We will wait for autoneg to complete. */
- DEBUGOUT("Waiting for forced speed/duplex link.\n");
+ e_dbg("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0;
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
@@ -1746,7 +1745,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* We didn't get link. Reset the DSP and wait again for link. */
ret_val = e1000_phy_reset_dsp(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting PHY DSP\n");
+ e_dbg("Error Resetting PHY DSP\n");
return ret_val;
}
}
@@ -1826,7 +1825,7 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
{
u32 tctl, coll_dist;
- DEBUGFUNC("e1000_config_collision_dist");
+ e_dbg("e1000_config_collision_dist");
if (hw->mac_type < e1000_82543)
coll_dist = E1000_COLLISION_DISTANCE_82542;
@@ -1857,7 +1856,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_config_mac_to_phy");
+ e_dbg("e1000_config_mac_to_phy");
/* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/
@@ -1913,7 +1912,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
{
u32 ctrl;
- DEBUGFUNC("e1000_force_mac_fc");
+ e_dbg("e1000_force_mac_fc");
/* Get the current configuration of the Device Control Register */
ctrl = er32(CTRL);
@@ -1952,7 +1951,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
@@ -1984,7 +1983,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
u16 speed;
u16 duplex;
- DEBUGFUNC("e1000_config_fc_after_link_up");
+ e_dbg("e1000_config_fc_after_link_up");
/* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
@@ -1997,7 +1996,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
&& (!hw->autoneg))) {
ret_val = e1000_force_mac_fc(hw);
if (ret_val) {
- DEBUGOUT("Error forcing flow control settings\n");
+ e_dbg("Error forcing flow control settings\n");
return ret_val;
}
}
@@ -2079,10 +2078,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
if (hw->original_fc == E1000_FC_FULL) {
hw->fc = E1000_FC_FULL;
- DEBUGOUT("Flow Control = FULL.\n");
+ e_dbg("Flow Control = FULL.\n");
} else {
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
}
@@ -2100,7 +2099,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
{
hw->fc = E1000_FC_TX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = TX PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
@@ -2117,7 +2116,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
{
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
/* Per the IEEE spec, at this point flow control should be
@@ -2144,10 +2143,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
hw->original_fc == E1000_FC_TX_PAUSE) ||
hw->fc_strict_ieee) {
hw->fc = E1000_FC_NONE;
- DEBUGOUT("Flow Control = NONE.\n");
+ e_dbg("Flow Control = NONE.\n");
} else {
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
@@ -2158,7 +2157,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
ret_val =
e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error getting link speed and duplex\n");
return ret_val;
}
@@ -2171,12 +2170,12 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
ret_val = e1000_force_mac_fc(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error forcing flow control settings\n");
return ret_val;
}
} else {
- DEBUGOUT
+ e_dbg
("Copper PHY and Auto Neg has not completed.\n");
}
}
@@ -2197,7 +2196,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
u32 status;
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_check_for_serdes_link_generic");
+ e_dbg("e1000_check_for_serdes_link_generic");
ctrl = er32(CTRL);
status = er32(STATUS);
@@ -2216,7 +2215,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
hw->autoneg_failed = 1;
goto out;
}
- DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+ e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
@@ -2229,7 +2228,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
/* Configure Flow Control after forcing link up. */
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
+ e_dbg("Error configuring flow control\n");
goto out;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -2239,7 +2238,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
*/
- DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
ew32(TXCW, hw->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -2256,11 +2255,11 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
hw->serdes_has_link = true;
- DEBUGOUT("SERDES: Link up - forced.\n");
+ e_dbg("SERDES: Link up - forced.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - force failed.\n");
+ e_dbg("SERDES: Link down - force failed.\n");
}
}
@@ -2273,20 +2272,20 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
hw->serdes_has_link = true;
- DEBUGOUT("SERDES: Link up - autoneg "
+ e_dbg("SERDES: Link up - autoneg "
"completed successfully.\n");
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - invalid"
+ e_dbg("SERDES: Link down - invalid"
"codewords detected in autoneg.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - no sync.\n");
+ e_dbg("SERDES: Link down - no sync.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - autoneg failed\n");
+ e_dbg("SERDES: Link down - autoneg failed\n");
}
}
@@ -2312,7 +2311,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_for_link");
+ e_dbg("e1000_check_for_link");
ctrl = er32(CTRL);
status = er32(STATUS);
@@ -2407,7 +2406,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
else {
ret_val = e1000_config_mac_to_phy(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error configuring MAC to PHY settings\n");
return ret_val;
}
@@ -2419,7 +2418,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
*/
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
+ e_dbg("Error configuring flow control\n");
return ret_val;
}
@@ -2435,7 +2434,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
ret_val =
e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error getting link speed and duplex\n");
return ret_val;
}
@@ -2487,30 +2486,30 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_get_speed_and_duplex");
+ e_dbg("e1000_get_speed_and_duplex");
if (hw->mac_type >= e1000_82543) {
status = er32(STATUS);
if (status & E1000_STATUS_SPEED_1000) {
*speed = SPEED_1000;
- DEBUGOUT("1000 Mbs, ");
+ e_dbg("1000 Mbs, ");
} else if (status & E1000_STATUS_SPEED_100) {
*speed = SPEED_100;
- DEBUGOUT("100 Mbs, ");
+ e_dbg("100 Mbs, ");
} else {
*speed = SPEED_10;
- DEBUGOUT("10 Mbs, ");
+ e_dbg("10 Mbs, ");
}
if (status & E1000_STATUS_FD) {
*duplex = FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
+ e_dbg("Full Duplex\n");
} else {
*duplex = HALF_DUPLEX;
- DEBUGOUT(" Half Duplex\n");
+ e_dbg(" Half Duplex\n");
}
} else {
- DEBUGOUT("1000 Mbs, Full Duplex\n");
+ e_dbg("1000 Mbs, Full Duplex\n");
*speed = SPEED_1000;
*duplex = FULL_DUPLEX;
}
@@ -2554,8 +2553,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- DEBUGFUNC("e1000_wait_autoneg");
- DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+ e_dbg("e1000_wait_autoneg");
+ e_dbg("Waiting for Auto-Neg to complete.\n");
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
@@ -2718,7 +2717,7 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
{
u32 ret_val;
- DEBUGFUNC("e1000_read_phy_reg");
+ e_dbg("e1000_read_phy_reg");
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2741,10 +2740,10 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = 1;
- DEBUGFUNC("e1000_read_phy_reg_ex");
+ e_dbg("e1000_read_phy_reg_ex");
if (reg_addr > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
}
@@ -2767,11 +2766,11 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Read did not complete\n");
+ e_dbg("MDI Read did not complete\n");
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- DEBUGOUT("MDI Error\n");
+ e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
*phy_data = (u16) mdic;
@@ -2820,7 +2819,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
{
u32 ret_val;
- DEBUGFUNC("e1000_write_phy_reg");
+ e_dbg("e1000_write_phy_reg");
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2843,10 +2842,10 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = 1;
- DEBUGFUNC("e1000_write_phy_reg_ex");
+ e_dbg("e1000_write_phy_reg_ex");
if (reg_addr > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
}
@@ -2870,7 +2869,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Write did not complete\n");
+ e_dbg("MDI Write did not complete\n");
return -E1000_ERR_PHY;
}
} else {
@@ -2910,9 +2909,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
u32 led_ctrl;
s32 ret_val;
- DEBUGFUNC("e1000_phy_hw_reset");
+ e_dbg("e1000_phy_hw_reset");
- DEBUGOUT("Resetting Phy...\n");
+ e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
/* Read the device control register and assert the E1000_CTRL_PHY_RST
@@ -2973,7 +2972,7 @@ s32 e1000_phy_reset(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_phy_reset");
+ e_dbg("e1000_phy_reset");
switch (hw->phy_type) {
case e1000_phy_igp:
@@ -3013,7 +3012,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
u16 phy_id_high, phy_id_low;
bool match = false;
- DEBUGFUNC("e1000_detect_gig_phy");
+ e_dbg("e1000_detect_gig_phy");
if (hw->phy_id != 0)
return E1000_SUCCESS;
@@ -3057,16 +3056,16 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
match = true;
break;
default:
- DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+ e_dbg("Invalid MAC type %d\n", hw->mac_type);
return -E1000_ERR_CONFIG;
}
phy_init_status = e1000_set_phy_type(hw);
if ((match) && (phy_init_status == E1000_SUCCESS)) {
- DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+ e_dbg("PHY ID 0x%X detected\n", hw->phy_id);
return E1000_SUCCESS;
}
- DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+ e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id);
return -E1000_ERR_PHY;
}
@@ -3079,7 +3078,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
{
s32 ret_val;
- DEBUGFUNC("e1000_phy_reset_dsp");
+ e_dbg("e1000_phy_reset_dsp");
do {
ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
@@ -3111,7 +3110,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
u16 phy_data, min_length, max_length, average;
e1000_rev_polarity polarity;
- DEBUGFUNC("e1000_phy_igp_get_info");
+ e_dbg("e1000_phy_igp_get_info");
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */
@@ -3189,7 +3188,7 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
u16 phy_data;
e1000_rev_polarity polarity;
- DEBUGFUNC("e1000_phy_m88_get_info");
+ e_dbg("e1000_phy_m88_get_info");
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */
@@ -3261,7 +3260,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_phy_get_info");
+ e_dbg("e1000_phy_get_info");
phy_info->cable_length = e1000_cable_length_undefined;
phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
@@ -3273,7 +3272,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
phy_info->remote_rx = e1000_1000t_rx_status_undefined;
if (hw->media_type != e1000_media_type_copper) {
- DEBUGOUT("PHY info is only valid for copper media\n");
+ e_dbg("PHY info is only valid for copper media\n");
return -E1000_ERR_CONFIG;
}
@@ -3286,7 +3285,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
return ret_val;
if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
- DEBUGOUT("PHY info is only valid if link is up\n");
+ e_dbg("PHY info is only valid if link is up\n");
return -E1000_ERR_CONFIG;
}
@@ -3298,10 +3297,10 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_validate_mdi_settings");
+ e_dbg("e1000_validate_mdi_settings");
if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
- DEBUGOUT("Invalid MDI setting detected\n");
+ e_dbg("Invalid MDI setting detected\n");
hw->mdix = 1;
return -E1000_ERR_CONFIG;
}
@@ -3322,7 +3321,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u16 eeprom_size;
- DEBUGFUNC("e1000_init_eeprom_params");
+ e_dbg("e1000_init_eeprom_params");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -3539,7 +3538,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 eecd, i = 0;
- DEBUGFUNC("e1000_acquire_eeprom");
+ e_dbg("e1000_acquire_eeprom");
eecd = er32(EECD);
@@ -3557,7 +3556,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
if (!(eecd & E1000_EECD_GNT)) {
eecd &= ~E1000_EECD_REQ;
ew32(EECD, eecd);
- DEBUGOUT("Could not acquire EEPROM grant\n");
+ e_dbg("Could not acquire EEPROM grant\n");
return -E1000_ERR_EEPROM;
}
}
@@ -3639,7 +3638,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
{
u32 eecd;
- DEBUGFUNC("e1000_release_eeprom");
+ e_dbg("e1000_release_eeprom");
eecd = er32(EECD);
@@ -3687,7 +3686,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
u16 retry_count = 0;
u8 spi_stat_reg;
- DEBUGFUNC("e1000_spi_eeprom_ready");
+ e_dbg("e1000_spi_eeprom_ready");
/* Read "Status Register" repeatedly until the LSB is cleared. The
* EEPROM will signal that the command has been completed by clearing
@@ -3712,7 +3711,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
* only 0-5mSec on 5V devices)
*/
if (retry_count >= EEPROM_MAX_RETRY_SPI) {
- DEBUGOUT("SPI EEPROM Status error\n");
+ e_dbg("SPI EEPROM Status error\n");
return -E1000_ERR_EEPROM;
}
@@ -3741,7 +3740,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 i = 0;
- DEBUGFUNC("e1000_read_eeprom");
+ e_dbg("e1000_read_eeprom");
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
@@ -3752,9 +3751,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
- DEBUGOUT2
- ("\"words\" parameter out of bounds. Words = %d, size = %d\n",
- offset, eeprom->word_size);
+ e_dbg("\"words\" parameter out of bounds. Words = %d,"
+ "size = %d\n", offset, eeprom->word_size);
return -E1000_ERR_EEPROM;
}
@@ -3832,11 +3830,11 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- DEBUGFUNC("e1000_validate_eeprom_checksum");
+ e_dbg("e1000_validate_eeprom_checksum");
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
checksum += eeprom_data;
@@ -3845,7 +3843,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
if (checksum == (u16) EEPROM_SUM)
return E1000_SUCCESS;
else {
- DEBUGOUT("EEPROM Checksum Invalid\n");
+ e_dbg("EEPROM Checksum Invalid\n");
return -E1000_ERR_EEPROM;
}
}
@@ -3862,18 +3860,18 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- DEBUGFUNC("e1000_update_eeprom_checksum");
+ e_dbg("e1000_update_eeprom_checksum");
for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
checksum += eeprom_data;
}
checksum = (u16) EEPROM_SUM - checksum;
if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
- DEBUGOUT("EEPROM Write Error\n");
+ e_dbg("EEPROM Write Error\n");
return -E1000_ERR_EEPROM;
}
return E1000_SUCCESS;
@@ -3904,7 +3902,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
s32 status = 0;
- DEBUGFUNC("e1000_write_eeprom");
+ e_dbg("e1000_write_eeprom");
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
@@ -3915,7 +3913,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
- DEBUGOUT("\"words\" parameter out of bounds\n");
+ e_dbg("\"words\" parameter out of bounds\n");
return -E1000_ERR_EEPROM;
}
@@ -3949,7 +3947,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u16 widx = 0;
- DEBUGFUNC("e1000_write_eeprom_spi");
+ e_dbg("e1000_write_eeprom_spi");
while (widx < words) {
u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
@@ -4013,7 +4011,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
u16 words_written = 0;
u16 i = 0;
- DEBUGFUNC("e1000_write_eeprom_microwire");
+ e_dbg("e1000_write_eeprom_microwire");
/* Send the write enable command to the EEPROM (3-bit opcode plus
* 6/8-bit dummy address beginning with 11). It's less work to include
@@ -4056,7 +4054,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
udelay(50);
}
if (i == 200) {
- DEBUGOUT("EEPROM Write did not complete\n");
+ e_dbg("EEPROM Write did not complete\n");
return -E1000_ERR_EEPROM;
}
@@ -4092,12 +4090,12 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
u16 offset;
u16 eeprom_data, i;
- DEBUGFUNC("e1000_read_mac_addr");
+ e_dbg("e1000_read_mac_addr");
for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
offset = i >> 1;
if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
@@ -4132,17 +4130,17 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
u32 i;
u32 rar_num;
- DEBUGFUNC("e1000_init_rx_addrs");
+ e_dbg("e1000_init_rx_addrs");
/* Setup the receive address. */
- DEBUGOUT("Programming MAC Address into RAR[0]\n");
+ e_dbg("Programming MAC Address into RAR[0]\n");
e1000_rar_set(hw, hw->mac_addr, 0);
rar_num = E1000_RAR_ENTRIES;
/* Zero out the other 15 receive addresses. */
- DEBUGOUT("Clearing RAR[1-15]\n");
+ e_dbg("Clearing RAR[1-15]\n");
for (i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
E1000_WRITE_FLUSH();
@@ -4290,7 +4288,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
u16 eeprom_data, i, temp;
const u16 led_mask = 0x0F;
- DEBUGFUNC("e1000_id_led_init");
+ e_dbg("e1000_id_led_init");
if (hw->mac_type < e1000_82540) {
/* Nothing to do */
@@ -4303,7 +4301,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
hw->ledctl_mode2 = hw->ledctl_default;
if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
@@ -4363,7 +4361,7 @@ s32 e1000_setup_led(struct e1000_hw *hw)
u32 ledctl;
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_setup_led");
+ e_dbg("e1000_setup_led");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4415,7 +4413,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_cleanup_led");
+ e_dbg("e1000_cleanup_led");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4451,7 +4449,7 @@ s32 e1000_led_on(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- DEBUGFUNC("e1000_led_on");
+ e_dbg("e1000_led_on");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4497,7 +4495,7 @@ s32 e1000_led_off(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- DEBUGFUNC("e1000_led_off");
+ e_dbg("e1000_led_off");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4626,7 +4624,7 @@ static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
*/
void e1000_reset_adaptive(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_reset_adaptive");
+ e_dbg("e1000_reset_adaptive");
if (hw->adaptive_ifs) {
if (!hw->ifs_params_forced) {
@@ -4639,7 +4637,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
hw->in_ifs_mode = false;
ew32(AIT, 0);
} else {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
+ e_dbg("Not in Adaptive IFS mode!\n");
}
}
@@ -4654,7 +4652,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
*/
void e1000_update_adaptive(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_update_adaptive");
+ e_dbg("e1000_update_adaptive");
if (hw->adaptive_ifs) {
if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
@@ -4679,7 +4677,7 @@ void e1000_update_adaptive(struct e1000_hw *hw)
}
}
} else {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
+ e_dbg("Not in Adaptive IFS mode!\n");
}
}
@@ -4851,7 +4849,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
u16 i, phy_data;
u16 cable_length;
- DEBUGFUNC("e1000_get_cable_length");
+ e_dbg("e1000_get_cable_length");
*min_length = *max_length = 0;
@@ -4968,7 +4966,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_polarity");
+ e_dbg("e1000_check_polarity");
if (hw->phy_type == e1000_phy_m88) {
/* return the Polarity bit in the Status register. */
@@ -5034,7 +5032,7 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_downshift");
+ e_dbg("e1000_check_downshift");
if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
@@ -5081,7 +5079,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
};
u16 min_length, max_length;
- DEBUGFUNC("e1000_config_dsp_after_link_change");
+ e_dbg("e1000_config_dsp_after_link_change");
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5089,7 +5087,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if (link_up) {
ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT("Error getting link speed and duplex\n");
+ e_dbg("Error getting link speed and duplex\n");
return ret_val;
}
@@ -5289,7 +5287,7 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- DEBUGFUNC("e1000_set_phy_mode");
+ e_dbg("e1000_set_phy_mode");
if ((hw->mac_type == e1000_82545_rev_3) &&
(hw->media_type == e1000_media_type_copper)) {
@@ -5337,7 +5335,7 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
{
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_set_d3_lplu_state");
+ e_dbg("e1000_set_d3_lplu_state");
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5440,7 +5438,7 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw)
u16 default_page = 0;
u16 phy_data;
- DEBUGFUNC("e1000_set_vco_speed");
+ e_dbg("e1000_set_vco_speed");
switch (hw->mac_type) {
case e1000_82545_rev_3:
@@ -5613,7 +5611,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
*/
static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_get_auto_rd_done");
+ e_dbg("e1000_get_auto_rd_done");
msleep(5);
return E1000_SUCCESS;
}
@@ -5628,7 +5626,7 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
*/
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_get_phy_cfg_done");
+ e_dbg("e1000_get_phy_cfg_done");
mdelay(10);
return E1000_SUCCESS;
}
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 9acfddb..ecd9f6c 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -35,6 +35,7 @@
#include "e1000_osdep.h"
+
/* Forward declarations of structures used by the shared code */
struct e1000_hw;
struct e1000_hw_stats;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b15ece2..ebdea08 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k5-NAPI"
+#define DRV_VERSION "7.3.21-k6-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -214,6 +214,17 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/**
+ * e1000_get_hw_dev - return device
+ * used by hardware layer to print debugging information
+ *
+ **/
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ return adapter->netdev;
+}
+
+/**
* e1000_init_module - Driver Registration Routine
*
* e1000_init_module is the first routine called when the driver is
@@ -223,18 +234,17 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static int __init e1000_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
- e1000_driver_string, e1000_driver_version);
+ pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
- printk(KERN_INFO "%s\n", e1000_copyright);
+ pr_info("%s\n", e1000_copyright);
ret = pci_register_driver(&e1000_driver);
if (copybreak != COPYBREAK_DEFAULT) {
if (copybreak == 0)
- printk(KERN_INFO "e1000: copybreak disabled\n");
+ pr_info("copybreak disabled\n");
else
- printk(KERN_INFO "e1000: copybreak enabled for "
- "packets <= %u bytes\n", copybreak);
+ pr_info("copybreak enabled for "
+ "packets <= %u bytes\n", copybreak);
}
return ret;
}
@@ -265,8 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (err) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate interrupt Error: %d\n", err);
+ e_err("Unable to allocate interrupt Error: %d\n", err);
}
return err;
@@ -648,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(WUC, 0);
if (e1000_init_hw(hw))
- DPRINTK(PROBE, ERR, "Hardware Error\n");
+ e_err("Hardware Error\n");
e1000_update_mng_vlan(adapter);
/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -689,8 +698,7 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
data = kmalloc(eeprom.len, GFP_KERNEL);
if (!data) {
- printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
- " data\n");
+ pr_err("Unable to allocate memory to dump EEPROM data\n");
return;
}
@@ -702,30 +710,25 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
csum_new += data[i] + (data[i + 1] << 8);
csum_new = EEPROM_SUM - csum_new;
- printk(KERN_ERR "/*********************/\n");
- printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
- printk(KERN_ERR "Calculated : 0x%04x\n", csum_new);
+ pr_err("/*********************/\n");
+ pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
+ pr_err("Calculated : 0x%04x\n", csum_new);
- printk(KERN_ERR "Offset Values\n");
- printk(KERN_ERR "======== ======\n");
+ pr_err("Offset Values\n");
+ pr_err("======== ======\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
- printk(KERN_ERR "Include this output when contacting your support "
- "provider.\n");
- printk(KERN_ERR "This is not a software error! Something bad "
- "happened to your hardware or\n");
- printk(KERN_ERR "EEPROM image. Ignoring this "
- "problem could result in further problems,\n");
- printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
- printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
- "which is invalid\n");
- printk(KERN_ERR "and requires you to set the proper MAC "
- "address manually before continuing\n");
- printk(KERN_ERR "to enable this network device.\n");
- printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
- "to your hardware vendor\n");
- printk(KERN_ERR "or Intel Customer Support.\n");
- printk(KERN_ERR "/*********************/\n");
+ pr_err("Include this output when contacting your support provider.\n");
+ pr_err("This is not a software error! Something bad happened to\n");
+ pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
+ pr_err("result in further problems, possibly loss of data,\n");
+ pr_err("corruption or system hangs!\n");
+ pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
+ pr_err("which is invalid and requires you to set the proper MAC\n");
+ pr_err("address manually before continuing to enable this network\n");
+ pr_err("device. Please inspect the EEPROM dump and report the\n");
+ pr_err("issue to your hardware vendor or Intel Customer Support.\n");
+ pr_err("/*********************/\n");
kfree(data);
}
@@ -823,16 +826,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
- E1000_ERR("No usable DMA configuration, "
- "aborting\n");
+ pr_err("No usable DMA config, aborting\n");
goto err_dma;
}
}
@@ -922,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* initialize eeprom parameters */
if (e1000_init_eeprom_params(hw)) {
- E1000_ERR("EEPROM initialization failed\n");
+ e_err("EEPROM initialization failed\n");
goto err_eeprom;
}
@@ -933,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* make sure the EEPROM is good */
if (e1000_validate_eeprom_checksum(hw) < 0) {
- DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+ e_err("The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
/*
* set MAC address to all zeroes to invalidate and temporary
@@ -947,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
} else {
/* copy the MAC address out of the EEPROM */
if (e1000_read_mac_addr(hw))
- DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+ e_err("EEPROM Read Error\n");
}
/* don't block initalization here due to bad MAC address */
memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr))
- DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+ e_err("Invalid MAC Address\n");
e1000_get_bus_info(hw);
@@ -1035,8 +1038,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->wol = adapter->eeprom_wol;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ /* reset the hardware with the new settings */
+ e1000_reset(adapter);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
/* print bus type/speed/width info */
- DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+ e_info("(PCI%s:%s:%s) ",
((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
(hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
@@ -1044,20 +1055,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
(hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
- printk("%pM\n", netdev->dev_addr);
-
- /* reset the hardware with the new settings */
- e1000_reset(adapter);
-
- strcpy(netdev->name, "eth%d");
- err = register_netdev(netdev);
- if (err)
- goto err_register;
+ e_info("%pM\n", netdev->dev_addr);
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+ e_info("Intel(R) PRO/1000 Network Connection\n");
cards_found++;
return 0;
@@ -1157,7 +1160,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
/* identify the MAC */
if (e1000_set_mac_type(hw)) {
- DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+ e_err("Unknown MAC Type\n");
return -EIO;
}
@@ -1190,7 +1193,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
adapter->num_rx_queues = 1;
if (e1000_alloc_queues(adapter)) {
- DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+ e_err("Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -1384,8 +1387,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size);
if (!txdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -1395,12 +1397,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
@@ -1408,29 +1410,32 @@ setup_tx_desc_die:
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
void *olddesc = txdr->desc;
dma_addr_t olddma = txdr->dma;
- DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
- "at %p\n", txdr->size, txdr->desc);
+ e_err("txdr align check failed: %u bytes at %p\n",
+ txdr->size, txdr->desc);
/* Try again, without freeing the previous */
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
+ &txdr->dma, GFP_KERNEL);
/* Failed allocation, critical failure */
if (!txdr->desc) {
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
goto setup_tx_desc_die;
}
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
/* give up */
- pci_free_consistent(pdev, txdr->size, txdr->desc,
- txdr->dma);
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate aligned memory "
- "for the transmit descriptor ring\n");
+ dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+ txdr->dma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate aligned memory "
+ "for the transmit descriptor ring\n");
vfree(txdr->buffer_info);
return -ENOMEM;
} else {
/* Free old allocation, new allocation was successful */
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
}
}
memset(txdr->desc, 0, txdr->size);
@@ -1456,8 +1461,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
if (err) {
- DPRINTK(PROBE, ERR,
- "Allocation for Tx Queue %u failed\n", i);
+ e_err("Allocation for Tx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_tx_resources(adapter,
&adapter->tx_ring[i]);
@@ -1577,8 +1581,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
if (!rxdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the receive descriptor ring\n");
+ e_err("Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -1590,11 +1593,11 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->size = rxdr->count * desc_len;
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the receive descriptor ring\n");
+ e_err("Unable to allocate memory for the Rx descriptor ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
return -ENOMEM;
@@ -1604,31 +1607,33 @@ setup_rx_desc_die:
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
void *olddesc = rxdr->desc;
dma_addr_t olddma = rxdr->dma;
- DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
- "at %p\n", rxdr->size, rxdr->desc);
+ e_err("rxdr align check failed: %u bytes at %p\n",
+ rxdr->size, rxdr->desc);
/* Try again, without freeing the previous */
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
+ &rxdr->dma, GFP_KERNEL);
/* Failed allocation, critical failure */
if (!rxdr->desc) {
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory "
- "for the receive descriptor ring\n");
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate memory for the Rx descriptor "
+ "ring\n");
goto setup_rx_desc_die;
}
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
/* give up */
- pci_free_consistent(pdev, rxdr->size, rxdr->desc,
- rxdr->dma);
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate aligned memory "
- "for the receive descriptor ring\n");
+ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+ rxdr->dma);
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate aligned memory for the Rx "
+ "descriptor ring\n");
goto setup_rx_desc_die;
} else {
/* Free old allocation, new allocation was successful */
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
}
}
memset(rxdr->desc, 0, rxdr->size);
@@ -1655,8 +1660,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
if (err) {
- DPRINTK(PROBE, ERR,
- "Allocation for Rx Queue %u failed\n", i);
+ e_err("Allocation for Rx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_rx_resources(adapter,
&adapter->rx_ring[i]);
@@ -1804,7 +1808,8 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -1829,12 +1834,12 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -1912,7 +1917,8 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -1952,14 +1958,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma &&
adapter->clean_rx == e1000_clean_rx_irq) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
} else if (buffer_info->dma &&
adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
}
buffer_info->dma = 0;
@@ -2098,7 +2104,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
bool use_uc = false;
- struct dev_addr_list *mc_ptr;
u32 rctl;
u32 hash_value;
int i, rar_entries = E1000_RAR_ENTRIES;
@@ -2106,7 +2111,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
if (!mcarray) {
- DPRINTK(PROBE, ERR, "memory allocation failed\n");
+ e_err("memory allocation failed\n");
return;
}
@@ -2156,19 +2161,17 @@ static void e1000_set_rx_mode(struct net_device *netdev)
e1000_rar_set(hw, ha->addr, i++);
}
- WARN_ON(i == rar_entries);
-
- netdev_for_each_mc_addr(mc_ptr, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == rar_entries) {
/* load any remaining addresses into the hash table */
u32 hash_reg, hash_bit, mta;
- hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
+ hash_value = e1000_hash_mc_addr(hw, ha->addr);
hash_reg = (hash_value >> 5) & 0x7F;
hash_bit = hash_value & 0x1F;
mta = (1 << hash_bit);
mcarray[hash_reg] |= mta;
} else {
- e1000_rar_set(hw, mc_ptr->da_addr, i++);
+ e1000_rar_set(hw, ha->addr, i++);
}
}
@@ -2302,16 +2305,16 @@ static void e1000_watchdog(unsigned long data)
&adapter->link_duplex);
ctrl = er32(CTRL);
- printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- netdev->name,
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) && (ctrl &
- E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
- E1000_CTRL_RFCE) ? "RX" : ((ctrl &
- E1000_CTRL_TFCE) ? "TX" : "None" )));
+ pr_info("%s NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+ E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+ E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+ E1000_CTRL_TFCE) ? "TX" : "None")));
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
@@ -2341,8 +2344,8 @@ static void e1000_watchdog(unsigned long data)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- printk(KERN_INFO "e1000: %s NIC Link is Down\n",
- netdev->name);
+ pr_info("%s NIC Link is Down\n",
+ netdev->name);
netif_carrier_off(netdev);
if (!test_bit(__E1000_DOWN, &adapter->flags))
@@ -2381,6 +2384,22 @@ link_up:
}
}
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
+ u32 dif = (adapter->gotcl > adapter->gorcl ?
+ adapter->gotcl - adapter->gorcl :
+ adapter->gorcl - adapter->gotcl) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
/* Cause software interrupt to ensure rx ring is cleaned */
ew32(ICS, E1000_ICS_RXDMT0);
@@ -2525,8 +2544,6 @@ set_itr_now:
adapter->itr = new_itr;
ew32(ITR, 1000000000 / (new_itr * 256));
}
-
- return;
}
#define E1000_TX_FLAGS_CSUM 0x00000001
@@ -2632,8 +2649,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
break;
default:
if (unlikely(net_ratelimit()))
- DPRINTK(DRV, WARNING,
- "checksum_partial proto=%x!\n", skb->protocol);
+ e_warn("checksum_partial proto=%x!\n", skb->protocol);
break;
}
@@ -2715,9 +2731,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2761,10 +2778,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev, frag->page,
+ buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
offset, size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2930,7 +2947,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned int len = skb->len - skb->data_len;
+ unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -2976,12 +2993,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- DPRINTK(DRV, ERR,
- "__pskb_pull_tail failed.\n");
+ e_err("__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
break;
default:
/* do nothing */
@@ -3125,7 +3141,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+ e_err("Invalid MTU setting\n");
return -EINVAL;
}
@@ -3133,7 +3149,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
switch (hw->mac_type) {
case e1000_undefined ... e1000_82542_rev2_1:
if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
- DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+ e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
break;
@@ -3171,8 +3187,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n",
- netdev->name, netdev->mtu, new_mtu);
+ pr_info("%s changing MTU from %d to %d\n",
+ netdev->name, netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
@@ -3485,17 +3501,17 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
- " Tx Queue <%lu>\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
+ e_err("Detected Tx Unit Hang\n"
+ " Tx Queue <%lu>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
(unsigned long)((tx_ring - adapter->tx_ring) /
sizeof(struct e1000_tx_ring)),
readl(hw->hw_addr + tx_ring->tdh),
@@ -3635,8 +3651,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -3734,7 +3750,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* eth type trans needs skb->data to point to something */
if (!pskb_may_pull(skb, ETH_HLEN)) {
- DPRINTK(DRV, ERR, "pskb_may_pull failed.\n");
+ e_err("pskb_may_pull failed.\n");
dev_kfree_skb(skb);
goto next_desc;
}
@@ -3769,6 +3785,31 @@ next_desc:
return cleaned;
}
+/*
+ * this should improve performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+static void e1000_check_copybreak(struct net_device *netdev,
+ struct e1000_buffer *buffer_info,
+ u32 length, struct sk_buff **skb)
+{
+ struct sk_buff *new_skb;
+
+ if (length > copybreak)
+ return;
+
+ new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ if (!new_skb)
+ return;
+
+ skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
+ (*skb)->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = *skb;
+ *skb = new_skb;
+}
+
/**
* e1000_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
@@ -3818,8 +3859,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma, buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -3834,8 +3875,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (adapter->discarding) {
/* All receives must fit into a single buffer */
- E1000_DBG("%s: Receive packet consumed multiple"
- " buffers\n", netdev->name);
+ e_info("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
if (status & E1000_RXD_STAT_EOP)
@@ -3868,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
total_rx_bytes += length;
total_rx_packets++;
- /* code added for copybreak, this should improve
- * performance for small packets with large amounts
- * of reassembly being done in the stack */
- if (length < copybreak) {
- struct sk_buff *new_skb =
- netdev_alloc_skb_ip_align(netdev, length);
- if (new_skb) {
- skb_copy_to_linear_data_offset(new_skb,
- -NET_IP_ALIGN,
- (skb->data -
- NET_IP_ALIGN),
- (length +
- NET_IP_ALIGN));
- /* save the skb in buffer_info as good */
- buffer_info->skb = skb;
- skb = new_skb;
- }
- /* else just continue with the old one */
- }
- /* end copybreak code */
+ e1000_check_copybreak(netdev, buffer_info, length, &skb);
+
skb_put(skb, length);
/* Receive Checksum Offload */
@@ -3965,8 +3987,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
- DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
- "at %p\n", bufsz, skb->data);
+ e_err("skb align check failed: %u bytes at %p\n",
+ bufsz, skb->data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
/* Failed allocation, critical failure */
@@ -3999,11 +4021,11 @@ check_page:
}
if (!buffer_info->dma) {
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
put_page(buffer_info->page);
dev_kfree_skb(skb);
buffer_info->page = NULL;
@@ -4074,8 +4096,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
- DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
- "at %p\n", bufsz, skb->data);
+ e_err("skb align check failed: %u bytes at %p\n",
+ bufsz, skb->data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
/* Failed allocation, critical failure */
@@ -4099,11 +4121,11 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
- buffer_info->dma = pci_map_single(pdev,
+ buffer_info->dma = dma_map_single(&pdev->dev,
skb->data,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_kfree_skb(skb);
buffer_info->skb = NULL;
buffer_info->dma = 0;
@@ -4120,16 +4142,15 @@ map_skb:
if (!e1000_check_64k_bound(adapter,
(void *)(unsigned long)buffer_info->dma,
adapter->rx_buffer_len)) {
- DPRINTK(RX_ERR, ERR,
- "dma align check failed: %u bytes at %p\n",
- adapter->rx_buffer_len,
- (void *)(unsigned long)buffer_info->dma);
+ e_err("dma align check failed: %u bytes at %p\n",
+ adapter->rx_buffer_len,
+ (void *)(unsigned long)buffer_info->dma);
dev_kfree_skb(skb);
buffer_info->skb = NULL;
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
@@ -4335,7 +4356,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw)
int ret_val = pci_set_mwi(adapter->pdev);
if (ret_val)
- DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+ e_err("Error in setting MWI\n");
}
void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4466,7 +4487,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((hw->media_type == e1000_media_type_fiber) &&
spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ e_err("Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
@@ -4489,7 +4510,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
- DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ e_err("Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
return 0;
@@ -4612,7 +4633,7 @@ static int e1000_resume(struct pci_dev *pdev)
else
err = pci_enable_device_mem(pdev);
if (err) {
- printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+ pr_err("Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
@@ -4715,7 +4736,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
else
err = pci_enable_device_mem(pdev);
if (err) {
- printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+ pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -4746,7 +4767,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (e1000_up(adapter)) {
- printk("e1000: can't bring device back up after reset\n");
+ pr_info("can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index d929852..edd1c75 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -41,20 +41,6 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
-#ifdef DBG
-#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
-#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
-#else
-#define DEBUGOUT(S)
-#define DEBUGOUT1(S, A...)
-#endif
-
-#define DEBUGFUNC(F) DEBUGOUT(F "\n")
-#define DEBUGOUT2 DEBUGOUT1
-#define DEBUGOUT3 DEBUGOUT2
-#define DEBUGOUT7 DEBUGOUT3
-
-
#define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg)))
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 38d2741..10d8d98 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -188,14 +188,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
-/* Enable Kumeran Lock Loss workaround
- *
- * Valid Range: 0, 1
- *
- * Default Value: 1 (enabled)
- */
-E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
-
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
@@ -226,17 +218,16 @@ static int __devinit e1000_validate_option(unsigned int *value,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+ e_dev_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+ e_dev_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- DPRINTK(PROBE, INFO,
- "%s set to %i\n", opt->name, *value);
+ e_dev_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
@@ -248,7 +239,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- DPRINTK(PROBE, INFO, "%s\n", ent->str);
+ e_dev_info("%s\n", ent->str);
return 0;
}
}
@@ -258,7 +249,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
BUG();
}
- DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+ e_dev_info("Invalid %s value specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
@@ -283,9 +274,8 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
- DPRINTK(PROBE, NOTICE,
- "Warning: no configuration for board #%i\n", bd);
- DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+ e_dev_warn("Warning: no configuration for board #%i "
+ "using defaults for all values\n", bd);
}
{ /* Transmit Descriptor Count */
@@ -472,27 +462,31 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
adapter->itr = InterruptThrottleRate[bd];
switch (adapter->itr) {
case 0:
- DPRINTK(PROBE, INFO, "%s turned off\n",
- opt.name);
+ e_dev_info("%s turned off\n", opt.name);
break;
case 1:
- DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
- opt.name);
+ e_dev_info("%s set to dynamic mode\n",
+ opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 3:
- DPRINTK(PROBE, INFO,
- "%s set to dynamic conservative mode\n",
- opt.name);
+ e_dev_info("%s set to dynamic conservative "
+ "mode\n", opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
+ case 4:
+ e_dev_info("%s set to simplified "
+ "(2000-8000) ints mode\n", opt.name);
+ adapter->itr_setting = adapter->itr;
+ break;
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
- /* save the setting, because the dynamic bits change itr */
- /* clear the lower two bits because they are
+ /* save the setting, because the dynamic bits
+ * change itr.
+ * clear the lower two bits because they are
* used as control */
adapter->itr_setting = adapter->itr & ~3;
break;
@@ -543,19 +537,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
- DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("Speed not valid for fiber adapters, parameter "
+ "ignored\n");
}
if (num_Duplex > bd) {
- DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("Duplex not valid for fiber adapters, parameter "
+ "ignored\n");
}
if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
- DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
- "not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
+ "adapters, parameter ignored\n");
}
}
@@ -619,9 +612,8 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
}
if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
- DPRINTK(PROBE, INFO,
- "AutoNeg specified along with Speed or Duplex, "
- "parameter ignored\n");
+ e_dev_info("AutoNeg specified along with Speed or Duplex, "
+ "parameter ignored\n");
adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
} else { /* Autoneg */
static const struct e1000_opt_list an_list[] =
@@ -680,79 +672,72 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
if ((num_Speed > bd) && (speed != 0 || dplx != 0))
- DPRINTK(PROBE, INFO,
- "Speed and duplex autonegotiation enabled\n");
+ e_dev_info("Speed and duplex autonegotiation "
+ "enabled\n");
break;
case HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "Half Duplex only\n");
+ e_dev_info("Half Duplex specified without Speed\n");
+ e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "Full Duplex only\n");
+ e_dev_info("Full Duplex specified without Speed\n");
+ e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL |
ADVERTISE_1000_FULL;
break;
case SPEED_10:
- DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
- "without Duplex\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+ e_dev_info("10 Mbps Speed specified without Duplex\n");
+ e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+ e_dev_info("Forcing to 10 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_10 + FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+ e_dev_info("Forcing to 10 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100:
- DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
- "without Duplex\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "100 Mbps only\n");
+ e_dev_info("100 Mbps Speed specified without Duplex\n");
+ e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+ e_dev_info("Forcing to 100 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100 + FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+ e_dev_info("Forcing to 100 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_1000:
- DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
- "Duplex\n");
+ e_dev_info("1000 Mbps Speed specified without Duplex\n");
goto full_duplex_only;
case SPEED_1000 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO,
- "Half Duplex is not supported at 1000 Mbps\n");
+ e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
/* fall through */
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
- DPRINTK(PROBE, INFO,
- "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
+ "only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
break;
@@ -762,9 +747,8 @@ full_duplex_only:
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
- DPRINTK(PROBE, INFO,
- "Speed, AutoNeg and MDI-X specifications are "
- "incompatible. Setting MDI-X to a compatible value.\n");
+ e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
+ "Setting MDI-X to a compatible value.\n");
}
}
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 9015555..f654db9 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -234,9 +234,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
@@ -271,6 +268,16 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /*
+ * ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
break;
case e1000_82574:
case e1000_82583:
@@ -281,6 +288,9 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
default:
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
break;
}
@@ -323,7 +333,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
}
/*
- * Initialze device specific counter of SMBI acquisition
+ * Initialize device specific counter of SMBI acquisition
* timeouts.
*/
hw->dev_spec.e82571.smb_counter = 0;
@@ -993,9 +1003,10 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* ...for both queues. */
switch (mac->type) {
case e1000_82573:
+ e1000e_enable_tx_pkt_filtering(hw);
+ /* fall through */
case e1000_82574:
case e1000_82583:
- e1000e_enable_tx_pkt_filtering(hw);
reg_data = er32(GCR);
reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
ew32(GCR, reg_data);
@@ -1137,8 +1148,6 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
default:
break;
}
-
- return;
}
/**
@@ -1642,8 +1651,6 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
/* If the management interface is not enabled, then power down */
if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
@@ -1845,7 +1852,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 20,
+ .pba = 36,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
@@ -1862,7 +1869,7 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 20,
+ .pba = 36,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e301e26..4dc02c7 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -138,6 +138,11 @@
/* Enable MNG packets to host memory */
#define E1000_MANC_EN_MNG2HOST 0x00200000
+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
/* Receive Control */
#define E1000_RCTL_EN 0x00000002 /* enable */
#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
@@ -214,6 +219,8 @@
#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -622,6 +629,8 @@
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index ee32b9b..c0b3db4 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -43,25 +43,16 @@
struct e1000_info;
-#define e_printk(level, adapter, format, arg...) \
- printk(level "%s: %s: " format, pci_name(adapter->pdev), \
- adapter->netdev->name, ## arg)
-
-#ifdef DEBUG
#define e_dbg(format, arg...) \
- e_printk(KERN_DEBUG , hw->adapter, format, ## arg)
-#else
-#define e_dbg(format, arg...) do { (void)(hw); } while (0)
-#endif
-
+ netdev_dbg(hw->adapter->netdev, format, ## arg)
#define e_err(format, arg...) \
- e_printk(KERN_ERR, adapter, format, ## arg)
+ netdev_err(adapter->netdev, format, ## arg)
#define e_info(format, arg...) \
- e_printk(KERN_INFO, adapter, format, ## arg)
+ netdev_info(adapter->netdev, format, ## arg)
#define e_warn(format, arg...) \
- e_printk(KERN_WARNING, adapter, format, ## arg)
+ netdev_warn(adapter->netdev, format, ## arg)
#define e_notice(format, arg...) \
- e_printk(KERN_NOTICE, adapter, format, ## arg)
+ netdev_notice(adapter->netdev, format, ## arg)
/* Interrupt modes, as used by the IntMode parameter */
@@ -159,6 +150,9 @@ struct e1000_info;
#define HV_M_STATUS_SPEED_1000 0x0200
#define HV_M_STATUS_LINK_UP 0x0040
+/* Time to wait before putting the device into D3 if there's no link (in ms). */
+#define LINK_TIMEOUT 100
+
enum e1000_boards {
board_82571,
board_82572,
@@ -195,6 +189,8 @@ struct e1000_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
+ unsigned int segs;
+ unsigned int bytecount;
u16 mapped_as_page;
};
/* Rx */
@@ -370,6 +366,8 @@ struct e1000_adapter {
struct work_struct update_phy_task;
struct work_struct led_blink_task;
struct work_struct print_hang_task;
+
+ bool idle_check;
};
struct e1000_info {
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 27d2158..38d79a6 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -221,9 +221,12 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
/* Adaptive IFS not supported */
mac->adaptive_ifs = false;
@@ -1380,8 +1383,6 @@ static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
if (!(hw->mac.ops.check_mng_mode(hw) ||
hw->phy.ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 983493f..2c52121 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -412,7 +412,6 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
netdev->features &= ~NETIF_F_TSO6;
}
- e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->flags |= FLAG_TSO_FORCE;
return 0;
}
@@ -1069,10 +1068,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (tx_ring->desc && tx_ring->buffer_info) {
for (i = 0; i < tx_ring->count; i++) {
if (tx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
tx_ring->buffer_info[i].dma,
tx_ring->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (tx_ring->buffer_info[i].skb)
dev_kfree_skb(tx_ring->buffer_info[i].skb);
}
@@ -1081,9 +1080,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rx_ring->desc && rx_ring->buffer_info) {
for (i = 0; i < rx_ring->count; i++) {
if (rx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
rx_ring->buffer_info[i].dma,
- 2048, PCI_DMA_FROMDEVICE);
+ 2048, DMA_FROM_DEVICE);
if (rx_ring->buffer_info[i].skb)
dev_kfree_skb(rx_ring->buffer_info[i].skb);
}
@@ -1163,9 +1162,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[i].length = skb->len;
tx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ tx_ring->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
@@ -1226,9 +1226,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->buffer_info[i].skb = skb;
rx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, 2048,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ rx_ring->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
@@ -1556,10 +1557,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->buffer_info[k].dma,
tx_ring->buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
k++;
if (k == tx_ring->count)
k = 0;
@@ -1569,9 +1570,9 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->buffer_info[l].dma, 2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rx_ring->buffer_info[l].skb, 1024);
@@ -1736,6 +1737,12 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
+ if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
+ clear_bit(__E1000_TESTING, &adapter->state);
+ dev_open(netdev);
+ set_bit(__E1000_TESTING, &adapter->state);
+ }
+
e_info("online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
@@ -1747,6 +1754,9 @@ static void e1000_diag_test(struct net_device *netdev,
data[2] = 0;
data[3] = 0;
+ if (!if_running && (adapter->flags & FLAG_HAS_AMT))
+ dev_close(netdev);
+
clear_bit(__E1000_TESTING, &adapter->state);
}
msleep_interruptible(4 * 1000);
@@ -1889,7 +1899,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (adapter->itr_setting <= 3)
+ if (adapter->itr_setting <= 4)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1904,12 +1914,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
+ ((ec->rx_coalesce_usecs > 4) &&
(ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
- if (ec->rx_coalesce_usecs <= 3) {
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 8bdcd5f..5d1220d 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -208,6 +208,8 @@ enum e1e_registers {
E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
+ E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
+#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
E1000_GCR = 0x05B00, /* PCI-Ex Control */
E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
@@ -380,6 +382,7 @@ enum e1e_registers {
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
@@ -828,6 +831,7 @@ struct e1000_mac_info {
u8 forced_speed_duplex;
bool adaptive_ifs;
+ bool has_fwsm;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
@@ -898,6 +902,7 @@ struct e1000_fc_info {
u32 high_water; /* Flow control high-water mark */
u32 low_water; /* Flow control low-water mark */
u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
enum e1000_fc_mode current_mode; /* FC mode in effect */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b5e157..b2507d9 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -83,6 +83,8 @@
#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
#define E1000_ICH_MNG_IAMT_MODE 0x2
@@ -259,6 +261,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
+ u32 ctrl;
s32 ret_val = 0;
phy->addr = 1;
@@ -274,6 +277,33 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /*
+ * The MAC-PHY interconnect may still be in SMBus mode
+ * after Sx->S0. Toggle the LANPHYPC Value bit to force
+ * the interconnect to PCIe mode, but only if there is no
+ * firmware present otherwise firmware will have done it.
+ */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, ctrl);
+ udelay(10);
+ ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, ctrl);
+ msleep(50);
+ }
+
+ /*
+ * Reset the PHY before any acccess to it. Doing so, ensures that
+ * the PHY is in a known good state before we read/write PHY registers.
+ * The generic reset is sufficient here, because we haven't determined
+ * the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
phy->id = e1000_phy_unknown;
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
@@ -300,6 +330,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_82577;
phy->ops.get_info = e1000_get_phy_info_82577;
phy->ops.commit = e1000e_phy_sw_reset;
+ break;
case e1000_phy_82578:
phy->ops.check_polarity = e1000_check_polarity_m88;
phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
@@ -472,8 +503,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
if (mac->type == e1000_ich8lan)
mac->rar_entry_count--;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC subsystem not supported */
+ mac->arc_subsystem_valid = false;
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
@@ -657,8 +690,6 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
{
mutex_unlock(&nvm_mutex);
-
- return;
}
static DEFINE_MUTEX(swflag_mutex);
@@ -737,8 +768,6 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
ew32(EXTCNF_CTRL, extcnf_ctrl);
mutex_unlock(&swflag_mutex);
-
- return;
}
/**
@@ -785,11 +814,16 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{
+ struct e1000_adapter *adapter = hw->adapter;
struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
- s32 ret_val;
+ s32 ret_val = 0;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
+ if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
+ !(hw->mac.type == e1000_pchlan))
+ return ret_val;
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
@@ -801,97 +835,87 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* Therefore, after each PHY reset, we will load the
* configuration data out of the NVM manually.
*/
- if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
- (hw->mac.type == e1000_pchlan)) {
- struct e1000_adapter *adapter = hw->adapter;
-
- /* Check if SW needs to configure the PHY */
- if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
- (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
- (hw->mac.type == e1000_pchlan))
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
- else
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+ (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
+ (hw->mac.type == e1000_pchlan))
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+ else
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
- data = er32(FEXTNVM);
- if (!(data & sw_cfg_mask))
- goto out;
+ data = er32(FEXTNVM);
+ if (!(data & sw_cfg_mask))
+ goto out;
- /* Wait for basic configuration completes before proceeding */
- e1000_lan_init_done_ich8lan(hw);
+ /*
+ * Make sure HW does not configure LCD from PHY
+ * extended configuration before SW configuration
+ */
+ data = er32(EXTCNF_CTRL);
+ if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ goto out;
+
+ cnf_size = er32(EXTCNF_SIZE);
+ cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+ cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+ if (!cnf_size)
+ goto out;
+
+ cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+ cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+ if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) {
/*
- * Make sure HW does not configure LCD from PHY
- * extended configuration before SW configuration
+ * HW configures the SMBus address and LEDs when the
+ * OEM and LCD Write Enable bits are set in the NVM.
+ * When both NVM bits are cleared, SW will configure
+ * them instead.
*/
- data = er32(EXTCNF_CTRL);
- if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ data = er32(STRAP);
+ data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+ reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
+ reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
+ reg_data);
+ if (ret_val)
goto out;
- cnf_size = er32(EXTCNF_SIZE);
- cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
- cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
- if (!cnf_size)
+ data = er32(LEDCTL);
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+ (u16)data);
+ if (ret_val)
goto out;
+ }
- cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
- cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
-
- if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- (hw->mac.type == e1000_pchlan)) {
- /*
- * HW configures the SMBus address and LEDs when the
- * OEM and LCD Write Enable bits are set in the NVM.
- * When both NVM bits are cleared, SW will configure
- * them instead.
- */
- data = er32(STRAP);
- data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
- reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
- reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
- reg_data);
- if (ret_val)
- goto out;
-
- data = er32(LEDCTL);
- ret_val = e1000_write_phy_reg_hv_locked(hw,
- HV_LED_CONFIG,
- (u16)data);
- if (ret_val)
- goto out;
- }
- /* Configure LCD from extended configuration region. */
+ /* Configure LCD from extended configuration region. */
- /* cnf_base_addr is in DWORD */
- word_addr = (u16)(cnf_base_addr << 1);
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
- for (i = 0; i < cnf_size; i++) {
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
- &reg_data);
- if (ret_val)
- goto out;
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
+ &reg_data);
+ if (ret_val)
+ goto out;
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
- 1, &reg_addr);
- if (ret_val)
- goto out;
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
+ 1, &reg_addr);
+ if (ret_val)
+ goto out;
- /* Save off the PHY page for future writes. */
- if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
- phy_page = reg_data;
- continue;
- }
+ /* Save off the PHY page for future writes. */
+ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+ phy_page = reg_data;
+ continue;
+ }
- reg_addr &= PHY_REG_MASK;
- reg_addr |= phy_page;
+ reg_addr &= PHY_REG_MASK;
+ reg_addr |= phy_page;
- ret_val = phy->ops.write_reg_locked(hw,
- (u32)reg_addr,
- reg_data);
- if (ret_val)
- goto out;
- }
+ ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+ reg_data);
+ if (ret_val)
+ goto out;
}
out:
@@ -1229,30 +1253,26 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
}
/**
- * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
* @hw: pointer to the HW structure
- *
- * Resets the PHY
- * This is a function pointer entry point called by drivers
- * or other shared routines.
**/
-static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 reg;
- ret_val = e1000e_phy_hw_reset_generic(hw);
- if (ret_val)
- return ret_val;
-
- /* Allow time for h/w to get to a quiescent state after reset */
- mdelay(10);
+ if (e1000_check_reset_block(hw))
+ goto out;
/* Perform any necessary post-reset workarounds */
- if (hw->mac.type == e1000_pchlan) {
+ switch (hw->mac.type) {
+ case e1000_pchlan:
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
- return ret_val;
+ goto out;
+ break;
+ default:
+ break;
}
/* Dummy read to clear the phy wakeup bit after lcd reset */
@@ -1265,11 +1285,32 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
goto out;
/* Configure the LCD with the OEM bits in NVM */
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = e1000_oem_bits_config_ich8lan(hw, true);
out:
- return 0;
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY
+ * This is a function pointer entry point called by drivers
+ * or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+
+out:
+ return ret_val;
}
/**
@@ -1622,7 +1663,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Check if the flash descriptor is valid */
if (hsfsts.hsf_status.fldesvalid == 0) {
e_dbg("Flash descriptor invalid. "
- "SW Sequencing must be used.");
+ "SW Sequencing must be used.\n");
return -E1000_ERR_NVM;
}
@@ -1671,7 +1712,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
hsfsts.hsf_status.flcdone = 1;
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
} else {
- e_dbg("Flash controller busy, cannot get access");
+ e_dbg("Flash controller busy, cannot get access\n");
}
}
@@ -1822,7 +1863,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
continue;
} else if (hsfsts.hsf_status.flcdone == 0) {
e_dbg("Timeout error - flash cycle "
- "did not complete.");
+ "did not complete.\n");
break;
}
}
@@ -1908,18 +1949,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
new_bank_offset = nvm->flash_bank_size;
old_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
} else {
old_bank_offset = nvm->flash_bank_size;
new_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
}
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -1975,8 +2012,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val) {
/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
e_dbg("Flash commit failed.\n");
- nvm->ops.release(hw);
- goto out;
+ goto release;
}
/*
@@ -1987,18 +2023,15 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
+
data &= 0xBFFF;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset * 2 + 1,
(u8)(data >> 8));
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
/*
* And invalidate the previously valid segment by setting
@@ -2008,10 +2041,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
/* Great! Everything worked, we can now clear the cached entries. */
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -2019,14 +2050,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
dev_spec->shadow_ram[i].value = 0xFFFF;
}
+release:
nvm->ops.release(hw);
/*
* Reload the EEPROM, or else modifications will not appear
* until after the next adapter reset.
*/
- e1000e_reload_nvm(hw);
- msleep(10);
+ if (!ret_val) {
+ e1000e_reload_nvm(hw);
+ msleep(10);
+ }
out:
if (ret_val)
@@ -2487,9 +2521,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
- if (ret_val) {
+ if (ret_val)
e_dbg("PCI-E Master disable polling has failed.\n");
- }
e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
@@ -2528,14 +2561,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
if (!e1000_check_reset_block(hw)) {
- /* Clear PHY Reset Asserted bit */
- if (hw->mac.type >= e1000_pchlan) {
- u32 status = er32(STATUS);
- ew32(STATUS, status & ~E1000_STATUS_PHYRA);
- }
-
/*
- * PHY HW reset requires MAC CORE reset at the same
+ * Full-chip reset requires MAC and PHY reset at the same
* time to make sure the interface between MAC and the
* external PHY is reset.
*/
@@ -2549,39 +2576,16 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
- /* Perform any necessary post-reset workarounds */
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
-
- if (ctrl & E1000_CTRL_PHY_RST)
+ if (ctrl & E1000_CTRL_PHY_RST) {
ret_val = hw->phy.ops.get_cfg_done(hw);
+ if (ret_val)
+ goto out;
- if (hw->mac.type >= e1000_ich10lan) {
- e1000_lan_init_done_ich8lan(hw);
- } else {
- ret_val = e1000e_get_auto_rd_done(hw);
- if (ret_val) {
- /*
- * When auto config read does not complete, do not
- * return with an error. This can happen in situations
- * where there is no eeprom and prevents getting link.
- */
- e_dbg("Auto Read Done did not complete\n");
- }
- }
- /* Dummy read to clear the phy wakeup bit after lcd reset */
- if (hw->mac.type == e1000_pchlan)
- e1e_rphy(hw, BM_WUC, &reg);
-
- ret_val = e1000_sw_lcd_config_ich8lan(hw);
- if (ret_val)
- goto out;
-
- if (hw->mac.type == e1000_pchlan) {
- ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
if (ret_val)
goto out;
}
+
/*
* For PCH, this write will make sure that any noise
* will be detected as a CRC error and be dropped rather than show up
@@ -2748,8 +2752,6 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
reg = er32(RFCTL);
reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
ew32(RFCTL, reg);
-
- return;
}
/**
@@ -2799,6 +2801,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
ew32(FCTTV, hw->fc.pause_time);
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
+ ew32(FCRTV_PCH, hw->fc.refresh_time);
+
ret_val = hw->phy.ops.write_reg(hw,
PHY_REG(BM_PORT_CTRL_PAGE, 27),
hw->fc.pause_time);
@@ -3127,8 +3131,6 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
default:
break;
}
-
- return;
}
/**
@@ -3265,33 +3267,50 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
}
/**
- * e1000_get_cfg_done_ich8lan - Read config done bit
+ * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
* @hw: pointer to the HW structure
*
- * Read the management control register for the config done bit for
- * completion status. NOTE: silicon which is EEPROM-less will fail trying
- * to read the config done bit, so an error is *ONLY* logged and returns
- * 0. If we were to return with error, EEPROM-less silicon
- * would not be able to be reset or change link.
+ * Read appropriate register for the config done bit for completion status
+ * and configure the PHY through s/w for EEPROM-less parts.
+ *
+ * NOTE: some silicon which is EEPROM-less will fail trying to read the
+ * config done bit, so only an error is logged and continues. If we were
+ * to return with error, EEPROM-less silicon would not be able to be reset
+ * or change link.
**/
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
{
+ s32 ret_val = 0;
u32 bank = 0;
+ u32 status;
- if (hw->mac.type >= e1000_pchlan) {
- u32 status = er32(STATUS);
+ e1000e_get_cfg_done(hw);
- if (status & E1000_STATUS_PHYRA)
- ew32(STATUS, status & ~E1000_STATUS_PHYRA);
- else
- e_dbg("PHY Reset Asserted not set - needs delay\n");
+ /* Wait for indication from h/w that it has completed basic config */
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ e_dbg("Auto Read Done did not complete\n");
+ ret_val = 0;
+ }
}
- e1000e_get_cfg_done(hw);
+ /* Clear PHY Reset Asserted bit */
+ status = er32(STATUS);
+ if (status & E1000_STATUS_PHYRA)
+ ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ e_dbg("PHY Reset Asserted not set - needs delay\n");
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
- if ((hw->mac.type != e1000_ich10lan) &&
- (hw->mac.type != e1000_pchlan)) {
+ if (hw->mac.type <= e1000_ich9lan) {
if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
(hw->phy.type == e1000_phy_igp_3)) {
e1000e_phy_init_script_igp3(hw);
@@ -3300,11 +3319,11 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
/* Maybe we should do a basic PHY config */
e_dbg("EEPROM not present\n");
- return -E1000_ERR_CONFIG;
+ ret_val = -E1000_ERR_CONFIG;
}
}
- return 0;
+ return ret_val;
}
/**
@@ -3320,8 +3339,6 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
if (!(hw->mac.ops.check_mng_mode(hw) ||
hw->phy.ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a8b2c0d..a968e3a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1262,24 +1262,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
u32 status;
status = er32(STATUS);
- if (status & E1000_STATUS_SPEED_1000) {
+ if (status & E1000_STATUS_SPEED_1000)
*speed = SPEED_1000;
- e_dbg("1000 Mbs, ");
- } else if (status & E1000_STATUS_SPEED_100) {
+ else if (status & E1000_STATUS_SPEED_100)
*speed = SPEED_100;
- e_dbg("100 Mbs, ");
- } else {
+ else
*speed = SPEED_10;
- e_dbg("10 Mbs, ");
- }
- if (status & E1000_STATUS_FD) {
+ if (status & E1000_STATUS_FD)
*duplex = FULL_DUPLEX;
- e_dbg("Full Duplex\n");
- } else {
+ else
*duplex = HALF_DUPLEX;
- e_dbg("Half Duplex\n");
- }
+
+ e_dbg("%u Mbps, %s Duplex\n",
+ *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
+ *duplex == FULL_DUPLEX ? "Full" : "Half");
return 0;
}
@@ -2275,6 +2272,11 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
u32 hicr;
u8 i;
+ if (!(hw->mac.arc_subsystem_valid)) {
+ e_dbg("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
/* Check that the host interface is enabled. */
hicr = er32(HICR);
if ((hicr & E1000_HICR_EN) == 0) {
@@ -2518,10 +2520,11 @@ s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
}
/**
- * e1000e_enable_mng_pass_thru - Enable processing of ARP's
+ * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
* @hw: pointer to the HW structure
*
- * Verifies the hardware needs to allow ARPs to be processed by the host.
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
**/
bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
{
@@ -2531,11 +2534,10 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
manc = er32(MANC);
- if (!(manc & E1000_MANC_RCV_TCO_EN) ||
- !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
- return ret_val;
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ goto out;
- if (hw->mac.arc_subsystem_valid) {
+ if (hw->mac.has_fwsm) {
fwsm = er32(FWSM);
factps = er32(FACTPS);
@@ -2543,16 +2545,28 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
((fwsm & E1000_FWSM_MODE_MASK) ==
(e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
ret_val = true;
- return ret_val;
+ goto out;
}
- } else {
- if ((manc & E1000_MANC_SMBUS_EN) &&
- !(manc & E1000_MANC_ASF_EN)) {
+ } else if ((hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82583)) {
+ u16 data;
+
+ factps = er32(FACTPS);
+ e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+ (e1000_mng_mode_pt << 13))) {
ret_val = true;
- return ret_val;
+ goto out;
}
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ ret_val = true;
+ goto out;
}
+out:
return ret_val;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d5d55c6..24507f3 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -45,11 +47,12 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/pm_qos_params.h>
+#include <linux/pm_runtime.h>
#include <linux/aer.h>
#include "e1000.h"
-#define DRV_VERSION "1.0.2-k2"
+#define DRV_VERSION "1.0.2-k4"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -66,6 +69,361 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pchlan] = &e1000_pch_info,
};
+struct e1000_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* RX Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN, "RDLEN"},
+ {E1000_RDH, "RDH"},
+ {E1000_RDT, "RDT"},
+ {E1000_RDTR, "RDTR"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_ERT, "ERT"},
+ {E1000_RDBAL, "RDBAL"},
+ {E1000_RDBAH, "RDBAH"},
+ {E1000_RDFH, "RDFH"},
+ {E1000_RDFT, "RDFT"},
+ {E1000_RDFHS, "RDFHS"},
+ {E1000_RDFTS, "RDFTS"},
+ {E1000_RDFPC, "RDFPC"},
+
+ /* TX Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL, "TDBAL"},
+ {E1000_TDBAH, "TDBAH"},
+ {E1000_TDLEN, "TDLEN"},
+ {E1000_TDH, "TDH"},
+ {E1000_TDT, "TDT"},
+ {E1000_TIDV, "TIDV"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TADV, "TADV"},
+ {E1000_TARC(0), "TARC"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFTS, "TDFTS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_RXDCTL(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TXDCTL(n));
+ break;
+ case E1000_TARC(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TARC(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 2; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+
+/*
+ * e1000e_dump - Print registers, tx-ring and rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_reg_info *reginfo;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_packet_split *rx_desc_ps;
+ struct e1000_rx_desc *rx_desc;
+ struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ e1000_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+ *
+ * Legacy Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] (Reserved on Write Back) |
+ * +--------------------------------------------------------------+
+ * 8 | Special | CSS | Status | CMD | CSO | Length |
+ * +--------------------------------------------------------------+
+ * 63 48 47 36 35 32 31 24 23 16 15 0
+ *
+ * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+ * 63 48 47 40 39 32 31 16 15 8 7 0
+ * +----------------------------------------------------------------+
+ * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
+ * +----------------------------------------------------------------+
+ * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * Extended Data Descriptor (DTYP=0x1)
+ * +----------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +----------------------------------------------------------------+
+ * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ */
+ printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Legacy format\n");
+ printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Context format\n");
+ printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Data format\n");
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
+ "%04X %3X %016llX %p",
+ (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+ le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+ (u64)buffer_info->dma, buffer_info->length,
+ buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ printk(KERN_INFO " %5d %5X %5X\n", 0,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ switch (adapter->rx_ps_pages) {
+ case 1:
+ case 2:
+ case 3:
+ /* [Extended] Packet Split Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address 0 [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Buffer Address 1 [63:0] |
+ * +-----------------------------------------------------+
+ * 16 | Buffer Address 2 [63:0] |
+ * +-----------------------------------------------------+
+ * 24 | Buffer Address 3 [63:0] |
+ * +-----------------------------------------------------+
+ */
+ printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
+ "[buffer 1 63:0 ] "
+ "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
+ "[bi->skb] <-- Ext Pkt Split format\n");
+ /* [Extended] Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 13 12 8 7 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
+ * | Checksum | Ident | | Queue | | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
+ "[vl l0 ee es] "
+ "[ l3 l2 l1 hs] [reserved ] ---------------- "
+ "[bi->skb] <-- Ext Rx Write-Back format\n");
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc_ps;
+ staterr =
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX %016llX %016llX "
+ "---------------- %p", i,
+ le64_to_cpu(u1->a),
+ le64_to_cpu(u1->b),
+ le64_to_cpu(u1->c),
+ le64_to_cpu(u1->d),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %016llX %016llX %p", i,
+ le64_to_cpu(u1->a),
+ le64_to_cpu(u1->b),
+ le64_to_cpu(u1->c),
+ le64_to_cpu(u1->d),
+ (u64)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(buffer_info->dma),
+ adapter->rx_ps_bsize0, true);
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+ }
+ break;
+ default:
+ case 0:
+ /* Legacy Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * | Buffer Address [63:0] |
+ * +-----------------------------------------------------+
+ * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
+ * +-----------------------------------------------------+
+ * 63 48 47 40 39 32 31 16 15 0
+ */
+ printk(KERN_INFO "Rl[desc] [address 63:0 ] "
+ "[vl er S cks ln] [bi->dma ] [bi->skb] "
+ "<-- Legacy format\n");
+ for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)rx_desc;
+ printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
+ "%016llX %p",
+ i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+ (u64)buffer_info->dma, buffer_info->skb);
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ adapter->rx_buffer_len, true);
+ }
+ }
+
+exit:
+ return;
+}
+
/**
* e1000_desc_unused - calculate if we have unused descriptors
**/
@@ -178,10 +536,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
map_skb:
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
break;
@@ -190,26 +548,23 @@ map_skb:
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
i++;
if (i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -247,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- ps_page->dma = pci_map_page(pdev,
- ps_page->page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, ps_page->dma)) {
+ ps_page->dma = dma_map_page(&pdev->dev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ ps_page->dma)) {
dev_err(&adapter->pdev->dev,
"RX DMA page map failed\n");
adapter->rx_dma_failed++;
@@ -276,10 +632,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
@@ -290,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+ }
+
i++;
if (i == rx_ring->count)
i = 0;
@@ -297,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
-
- if (!(i--))
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- /*
- * Hardware increments by 16 bytes, but packet split
- * descriptors are 32 bytes...so we increment tail
- * twice as much.
- */
- writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -366,10 +714,10 @@ check_page:
}
if (!buffer_info->dma)
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -443,10 +791,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -547,12 +895,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -643,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
cleaned = (i == eop);
if (cleaned) {
- struct sk_buff *skb = buffer_info->skb;
- unsigned int segs, bytecount;
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_tx_packets += segs;
- total_tx_bytes += bytecount;
+ total_tx_packets += buffer_info->segs;
+ total_tx_bytes += buffer_info->bytecount;
}
e1000_put_txbuf(adapter, buffer_info);
@@ -753,9 +1094,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* see !EOP comment in other rx routine */
@@ -811,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
* kmap_atomic, so we can't hold the mapping
* very long
*/
- pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
memcpy(skb_tail_pointer(skb), vaddr, l1);
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
- pci_dma_sync_single_for_device(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
/* remove the CRC */
if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -834,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
break;
ps_page = &buffer_info->ps_pages[j];
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
skb_fill_page_desc(skb, j, ps_page->page, 0, length);
ps_page->page = NULL;
@@ -953,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -1090,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->clean_rx == e1000_clean_rx_irq)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
- pci_unmap_page(pdev, buffer_info->dma,
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -1118,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
ps_page = &buffer_info->ps_pages[j];
if (!ps_page->page)
break;
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
put_page(ps_page->page);
ps_page->page = NULL;
@@ -1426,8 +1767,6 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
pci_disable_msi(adapter->pdev);
adapter->flags &= ~FLAG_MSI_ENABLED;
}
-
- return;
}
/**
@@ -1479,8 +1818,6 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
/* Don't do anything; this is the system default */
break;
}
-
- return;
}
/**
@@ -2185,10 +2522,10 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
}
}
-static void e1000_init_manageability(struct e1000_adapter *adapter)
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 manc, manc2h;
+ u32 manc, manc2h, mdef, i, j;
if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
return;
@@ -2202,10 +2539,49 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
*/
manc |= E1000_MANC_EN_MNG2HOST;
manc2h = er32(MANC2H);
-#define E1000_MNG2HOST_PORT_623 (1 << 5)
-#define E1000_MNG2HOST_PORT_664 (1 << 6)
- manc2h |= E1000_MNG2HOST_PORT_623;
- manc2h |= E1000_MNG2HOST_PORT_664;
+
+ switch (hw->mac.type) {
+ default:
+ manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ /*
+ * Check if IPMI pass-through decision filter already exists;
+ * if so, enable it.
+ */
+ for (i = 0, j = 0; i < 8; i++) {
+ mdef = er32(MDEF(i));
+
+ /* Ignore filters with anything other than IPMI ports */
+ if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ continue;
+
+ /* Enable this decision filter in MANC2H */
+ if (mdef)
+ manc2h |= (1 << i);
+
+ j |= mdef;
+ }
+
+ if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ break;
+
+ /* Create new decision filter in an empty filter */
+ for (i = 0, j = 0; i < 8; i++)
+ if (er32(MDEF(i)) == 0) {
+ ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+ E1000_MDEF_PORT_664));
+ manc2h |= (1 << 1);
+ j++;
+ break;
+ }
+
+ if (!j)
+ e_warn("Unable to create IPMI pass-through filter\n");
+ break;
+ }
+
ew32(MANC2H, manc2h);
ew32(MANC, manc);
}
@@ -2565,7 +2941,7 @@ static void e1000_set_multi(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list;
u32 rctl;
int i;
@@ -2597,9 +2973,8 @@ static void e1000_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN),
- mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
e1000_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -2621,7 +2996,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_set_multi(adapter->netdev);
e1000_restore_vlan(adapter);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
@@ -2755,6 +3130,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
fc->high_water = 0x5000;
fc->low_water = 0x3000;
}
+ fc->refresh_time = 0x1000;
} else {
if ((adapter->flags & FLAG_HAS_ERT) &&
(adapter->netdev->mtu > ETH_DATA_LEN))
@@ -2792,10 +3168,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
if (mac->ops.init_hw(hw))
e_err("Hardware Error\n");
- /* additional part of the flow-control workaround above */
- if (hw->mac.type == e1000_pchlan)
- ew32(FCRTV_PCH, 0x1000);
-
e1000_update_mng_vlan(adapter);
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -2841,7 +3213,11 @@ int e1000e_up(struct e1000_adapter *adapter)
netif_wake_queue(adapter->netdev);
/* fire a link change interrupt to start the watchdog */
- ew32(ICS, E1000_ICS_LSC);
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
+
return 0;
}
@@ -3085,12 +3461,15 @@ static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
int err;
/* disallow open during test */
if (test_bit(__E1000_TESTING, &adapter->state))
return -EBUSY;
+ pm_runtime_get_sync(&pdev->dev);
+
netif_carrier_off(netdev);
/* allocate transmit descriptors */
@@ -3103,6 +3482,15 @@ static int e1000_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ /*
+ * If AMT is enabled, let the firmware know that the network
+ * interface is now open and reset the part to a known state.
+ */
+ if (adapter->flags & FLAG_HAS_AMT) {
+ e1000_get_hw_control(adapter);
+ e1000e_reset(adapter);
+ }
+
e1000e_power_up_phy(adapter);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -3111,13 +3499,6 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/*
- * If AMT is enabled, let the firmware know that the network
- * interface is now open
- */
- if (adapter->flags & FLAG_HAS_AMT)
- e1000_get_hw_control(adapter);
-
- /*
* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
@@ -3151,8 +3532,14 @@ static int e1000_open(struct net_device *netdev)
netif_start_queue(netdev);
+ adapter->idle_check = true;
+ pm_runtime_put(&pdev->dev);
+
/* fire a link status change interrupt to start the watchdog */
- ew32(ICS, E1000_ICS_LSC);
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
return 0;
@@ -3164,6 +3551,7 @@ err_setup_rx:
e1000e_free_tx_resources(adapter);
err_setup_tx:
e1000e_reset(adapter);
+ pm_runtime_put_sync(&pdev->dev);
return err;
}
@@ -3182,11 +3570,17 @@ err_setup_tx:
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
- e1000e_down(adapter);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ e1000e_down(adapter);
+ e1000_free_irq(adapter);
+ }
e1000_power_down_phy(adapter);
- e1000_free_irq(adapter);
e1000e_free_tx_resources(adapter);
e1000e_free_rx_resources(adapter);
@@ -3208,6 +3602,8 @@ static int e1000_close(struct net_device *netdev)
if (adapter->flags & FLAG_HAS_AMT)
e1000_release_hw_control(adapter);
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
/**
@@ -3552,6 +3948,9 @@ static void e1000_watchdog_task(struct work_struct *work)
link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
e1000e_enable_receives(adapter);
goto link_up;
}
@@ -3563,6 +3962,10 @@ static void e1000_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
bool txb2b = 1;
+
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
/* update snapshot of PHY registers on LSC */
e1000_phy_read_status(adapter);
mac->ops.get_link_up_info(&adapter->hw,
@@ -3672,6 +4075,9 @@ static void e1000_watchdog_task(struct work_struct *work)
if (adapter->flags & FLAG_RX_NEEDS_RESTART)
schedule_work(&adapter->reset_task);
+ else
+ pm_schedule_suspend(netdev->dev.parent,
+ LINK_TIMEOUT);
}
}
@@ -3707,6 +4113,22 @@ link_up:
}
}
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+ u32 dif = (adapter->gotc > adapter->gorc ?
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
/* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->msix_entries)
ew32(ICS, adapter->rx_ring->ims_val);
@@ -3881,7 +4303,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info;
unsigned int len = skb_headlen(skb);
unsigned int offset = 0, size, count = 0, i;
- unsigned int f;
+ unsigned int f, bytecount, segs;
i = tx_ring->next_to_use;
@@ -3892,10 +4314,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
buffer_info->mapped_as_page = false;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3927,11 +4350,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_page(pdev, frag->page,
+ buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3940,7 +4363,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
}
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].segs = segs;
+ tx_ring->buffer_info[i].bytecount = bytecount;
tx_ring->buffer_info[first].next_to_watch = i;
return count;
@@ -4107,7 +4536,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int max_per_txd = E1000_MAX_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned int len = skb->len - skb->data_len;
+ unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -4157,7 +4586,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
}
}
@@ -4243,6 +4672,8 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ e1000e_dump(adapter);
+ e_err("Reset adapter\n");
e1000e_reinit_locked(adapter);
}
@@ -4477,13 +4908,15 @@ out:
return retval;
}
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status;
- u32 wufc = adapter->wol;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
int retval = 0;
netif_device_detach(netdev);
@@ -4653,20 +5086,13 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state);
}
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_OPS
+static bool e1000e_pm_ready(struct e1000_adapter *adapter)
{
- int retval;
- bool wake;
-
- retval = __e1000_shutdown(pdev, &wake);
- if (!retval)
- e1000_complete_shutdown(pdev, true, wake);
-
- return retval;
+ return !!adapter->tx_ring->buffer_info;
}
-static int e1000_resume(struct pci_dev *pdev)
+static int __e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4679,18 +5105,6 @@ static int e1000_resume(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "Cannot enable PCI device from suspend\n");
- return err;
- }
-
- pci_set_master(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
e1000e_set_interrupt_capability(adapter);
if (netif_running(netdev)) {
err = e1000_request_irq(adapter);
@@ -4731,7 +5145,7 @@ static int e1000_resume(struct pci_dev *pdev)
e1000e_reset(adapter);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
if (netif_running(netdev))
e1000e_up(adapter);
@@ -4748,13 +5162,88 @@ static int e1000_resume(struct pci_dev *pdev)
return 0;
}
-#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int e1000_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __e1000_shutdown(pdev, &wake, false);
+ if (!retval)
+ e1000_complete_shutdown(pdev, true, wake);
+
+ return retval;
+}
+
+static int e1000_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter))
+ adapter->idle_check = true;
+
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int e1000_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter)) {
+ bool wake;
+
+ __e1000_shutdown(pdev, &wake, true);
+ }
+
+ return 0;
+}
+
+static int e1000_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ if (adapter->idle_check) {
+ adapter->idle_check = false;
+ if (!e1000e_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC);
+ }
+
+ return -EBUSY;
+}
+
+static int e1000_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ adapter->idle_check = !dev->power.runtime_auto;
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM_OPS */
static void e1000_shutdown(struct pci_dev *pdev)
{
bool wake = false;
- __e1000_shutdown(pdev, &wake);
+ __e1000_shutdown(pdev, &wake, false);
if (system_state == SYSTEM_POWER_OFF)
e1000_complete_shutdown(pdev, false, wake);
@@ -4828,8 +5317,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
+ pdev->state_saved = true;
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -4857,7 +5346,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
if (netif_running(netdev)) {
if (e1000e_up(adapter)) {
@@ -4970,16 +5459,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -5010,6 +5499,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
+ netdev->irq = pdev->irq;
+
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
hw = &adapter->hw;
@@ -5230,6 +5721,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_print_device_info(adapter);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ }
+ pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
+
return 0;
err_register:
@@ -5272,12 +5769,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+ pm_runtime_get_sync(&pdev->dev);
/*
* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled
*/
- set_bit(__E1000_DOWN, &adapter->state);
+ if (!down)
+ set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -5291,8 +5792,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
if (!(netdev->flags & IFF_UP))
e1000_power_down_phy(adapter);
+ /* Don't lie to e1000_close() down the road. */
+ if (!down)
+ clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ }
+ pm_runtime_put_noidle(&pdev->dev);
+
/*
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -5382,6 +5892,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
@@ -5392,16 +5903,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+#ifdef CONFIG_PM_OPS
+static const struct dev_pm_ops e1000_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
+ e1000_runtime_resume, e1000_idle)
+};
+#endif
+
/* PCI Device API Driver */
static struct pci_driver e1000_driver = {
.name = e1000e_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = e1000_suspend,
- .resume = e1000_resume,
+#ifdef CONFIG_PM_OPS
+ .driver.pm = &e1000_pm_ops,
#endif
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
@@ -5416,10 +5933,9 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
- printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
- e1000e_driver_name, e1000e_driver_version);
- printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
- e1000e_driver_name);
+ pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+ e1000e_driver_version);
+ pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 2e39977..a150e48 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -248,7 +248,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
{ /* Transmit Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of "
@@ -267,7 +267,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Transmit Absolute Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of "
@@ -286,7 +286,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Interrupt Delay */
- struct e1000_option opt = {
+ static struct e1000_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of "
@@ -305,7 +305,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Absolute Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of "
@@ -324,7 +324,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Interrupt Throttling Rate */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of "
@@ -351,6 +351,11 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
+ case 4:
+ e_info("%s set to simplified (2000-8000 ints) "
+ "mode\n", opt.name);
+ adapter->itr_setting = 4;
+ break;
default:
/*
* Save the setting, because the dynamic bits
@@ -381,7 +386,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Interrupt Mode */
- struct e1000_option opt = {
+ static struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Mode",
.err = "defaulting to 2 (MSI-X)",
@@ -399,7 +404,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Smart Power Down */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
@@ -415,7 +420,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* CRC Stripping */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
.err = "defaulting to enabled",
@@ -432,7 +437,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Kumeran Lock Loss Workaround */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
@@ -452,7 +457,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Write-protect NVM */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "Write-protect NVM",
.err = "defaulting to Enabled",
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 7f3ceb9..b4ac82d 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -3116,9 +3116,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
* e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
* @hw: pointer to the HW structure
*
- * Calls the PHY setup function to force speed and duplex. Clears the
- * auto-crossover to force MDI manually. Waits for link and returns
- * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ * Calls the PHY setup function to force speed and duplex.
**/
s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
{
@@ -3137,23 +3135,6 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Clear Auto-Crossover to force MDI manually. 82577 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
- if (ret_val)
- goto out;
-
- phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
- phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
-
- ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
- if (ret_val)
- goto out;
-
- e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data);
-
udelay(1);
if (phy->autoneg_wait_to_complete) {
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index ca93c9a..06e72fb 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -328,7 +328,6 @@ e21_reset_8390(struct net_device *dev)
/* Set up the ASIC registers, just in case something changed them. */
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. We put the 2k window so the header page
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 27c7bdb..8d97f16 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -645,7 +645,7 @@ static void __init printEEPROMInfo(struct net_device *dev)
if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
- printk(KERN_DEBUG "port(s) \n");
+ printk(KERN_DEBUG "port(s)\n");
Word = lp->word[6];
printk(KERN_DEBUG "Word6:\n");
@@ -765,7 +765,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
/* Grab the region so we can find another board if autoIRQ fails. */
if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
if (!autoprobe)
- printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n",
+ printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n",
ioaddr);
return -EBUSY;
}
@@ -1161,8 +1161,7 @@ static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
/* we won't wake queue here because we're out of space */
dev->stats.tx_dropped++;
else {
- dev->stats.tx_bytes+=skb->len;
- dev->trans_start = jiffies;
+ dev->stats.tx_bytes+=skb->len;
netif_wake_queue(dev);
}
@@ -1286,7 +1285,7 @@ set_multicast_list(struct net_device *dev)
struct eepro_local *lp = netdev_priv(dev);
short ioaddr = dev->base_addr;
unsigned short mode;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int mc_count = netdev_mc_count(dev);
if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
@@ -1331,8 +1330,8 @@ set_multicast_list(struct net_device *dev)
outw(0, ioaddr + IO_PORT);
outw(6 * (mc_count + 1), ioaddr + IO_PORT);
- netdev_for_each_mc_addr(dmi, dev) {
- eaddrs = (unsigned short *) dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (unsigned short *) ha->addr;
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 1a7322b..12c37d2 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -543,7 +543,7 @@ static void unstick_cu(struct net_device *dev)
if (lp->started)
{
- if (time_after(jiffies, dev->trans_start + 50))
+ if (time_after(jiffies, dev_trans_start(dev) + HZ/2))
{
if (lp->tx_link==lp->last_tx_restart)
{
@@ -1018,7 +1018,7 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
outw(lp->tx_head+0x16, ioaddr + DATAPORT);
outw(0, ioaddr + DATAPORT);
- outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
+ outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR);
outw(lp->tx_head, ioaddr + DATAPORT);
@@ -1570,12 +1570,11 @@ static void eexp_hw_init586(struct net_device *dev)
#if NET_DEBUG > 6
printk("%s: leaving eexp_hw_init586()\n", dev->name);
#endif
- return;
}
static void eexp_setup_filter(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned short ioaddr = dev->base_addr;
int count = netdev_mc_count(dev);
int i;
@@ -1588,8 +1587,8 @@ static void eexp_setup_filter(struct net_device *dev)
outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- unsigned short *data = (unsigned short *) dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned short *data = (unsigned short *) ha->addr;
if (i == count)
break;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index fa311a9..0630980 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0102"
+#define DRV_VERSION "EHEA_0103"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 809ccc9..02698a1 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -791,11 +791,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
cqe_counter++;
rmb();
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
- ehea_error("Send Completion Error: Resetting port");
+ ehea_error("Bad send completion status=0x%04X",
+ cqe->status);
+
if (netif_msg_tx_err(pr->port))
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
- ehea_schedule_port_reset(pr->port);
- break;
+
+ if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
+ ehea_error("Resetting port");
+ ehea_schedule_port_reset(pr->port);
+ break;
+ }
}
if (netif_msg_tx_done(pr->port))
@@ -814,7 +820,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
quota--;
cqe = ehea_poll_cq(send_cq);
- };
+ }
ehea_update_feca(send_cq, cqe_counter);
atomic_add(swqe_av, &pr->swqe_avail);
@@ -901,6 +907,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
struct ehea_eqe *eqe;
struct ehea_qp *qp;
u32 qp_token;
+ u64 resource_type, aer, aerr;
+ int reset_port = 0;
eqe = ehea_poll_eq(port->qp_eq);
@@ -910,11 +918,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
eqe->entry, qp_token);
qp = port->port_res[qp_token].qp;
- ehea_error_data(port->adapter, qp->fw_handle);
+
+ resource_type = ehea_error_data(port->adapter, qp->fw_handle,
+ &aer, &aerr);
+
+ if (resource_type == EHEA_AER_RESTYPE_QP) {
+ if ((aer & EHEA_AER_RESET_MASK) ||
+ (aerr & EHEA_AERR_RESET_MASK))
+ reset_port = 1;
+ } else
+ reset_port = 1; /* Reset in case of CQ or EQ error */
+
eqe = ehea_poll_eq(port->qp_eq);
}
- ehea_schedule_port_reset(port);
+ if (reset_port) {
+ ehea_error("Resetting port");
+ ehea_schedule_port_reset(port);
+ }
return IRQ_HANDLED;
}
@@ -1618,7 +1639,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
{
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
- int skb_data_size = skb->len - skb->data_len;
+ int skb_data_size = skb_headlen(skb);
int headersize;
/* Packet is TCP with TSO enabled */
@@ -1629,7 +1650,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
*/
headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
- skb_data_size = skb->len - skb->data_len;
+ skb_data_size = skb_headlen(skb);
if (skb_data_size >= headersize) {
/* copy immediate data */
@@ -1651,7 +1672,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
static void write_swqe2_nonTSO(struct sk_buff *skb,
struct ehea_swqe *swqe, u32 lkey)
{
- int skb_data_size = skb->len - skb->data_len;
+ int skb_data_size = skb_headlen(skb);
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
@@ -1860,7 +1881,6 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
port->promisc = enable;
out:
free_page((unsigned long)cb7);
- return;
}
static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
@@ -1967,7 +1987,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
static void ehea_set_multicast_list(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
- struct dev_mc_list *k_mcl_entry;
+ struct netdev_hw_addr *ha;
int ret;
if (dev->flags & IFF_PROMISC) {
@@ -1998,13 +2018,12 @@ static void ehea_set_multicast_list(struct net_device *dev)
goto out;
}
- netdev_for_each_mc_addr(k_mcl_entry, dev)
- ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ ehea_add_multicast_entry(port, ha->addr);
}
out:
ehea_update_bcmc_registrations();
- return;
}
static int ehea_change_mtu(struct net_device *dev, int new_mtu)
@@ -2108,8 +2127,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
} else {
/* first copy data from the skb->data buffer ... */
skb_copy_from_linear_data(skb, imm_data,
- skb->len - skb->data_len);
- imm_data += skb->len - skb->data_len;
+ skb_headlen(skb));
+ imm_data += skb_headlen(skb);
/* ... then copy data from the fragments */
for (i = 0; i < nfrags; i++) {
@@ -2220,7 +2239,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore(&pr->netif_queue, flags);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
spin_unlock(&pr->xmit_lock);
return NETDEV_TX_OK;
@@ -2317,7 +2336,6 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
ehea_error("modify_ehea_port failed");
out:
free_page((unsigned long)cb1);
- return;
}
int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
@@ -2860,7 +2878,6 @@ static void ehea_reset_port(struct work_struct *work)
netif_wake_queue(dev);
out:
mutex_unlock(&port->port_lock);
- return;
}
static void ehea_rereg_mrs(struct work_struct *work)
@@ -2868,7 +2885,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
int ret, i;
struct ehea_adapter *adapter;
- mutex_lock(&dlpar_mem_lock);
ehea_info("LPAR memory changed - re-initializing driver");
list_for_each_entry(adapter, &adapter_list, list)
@@ -2938,7 +2954,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
}
ehea_info("re-initializing driver complete");
out:
- mutex_unlock(&dlpar_mem_lock);
return;
}
@@ -3238,7 +3253,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
ehea_remove_adapter_mr(adapter);
i++;
- };
+ }
return 0;
}
@@ -3257,7 +3272,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
if (dn_log_port_id)
if (*dn_log_port_id == logical_port_id)
return eth_dn;
- };
+ }
return NULL;
}
@@ -3521,7 +3536,14 @@ void ehea_crash_handler(void)
static int ehea_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
+ int ret = NOTIFY_BAD;
struct memory_notify *arg = data;
+
+ if (!mutex_trylock(&dlpar_mem_lock)) {
+ ehea_info("ehea_mem_notifier must not be called parallelized");
+ goto out;
+ }
+
switch (action) {
case MEM_CANCEL_OFFLINE:
ehea_info("memory offlining canceled");
@@ -3530,14 +3552,14 @@ static int ehea_mem_notifier(struct notifier_block *nb,
ehea_info("memory is going online");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
- return NOTIFY_BAD;
+ goto out_unlock;
ehea_rereg_mrs(NULL);
break;
case MEM_GOING_OFFLINE:
ehea_info("memory is going offline");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
- return NOTIFY_BAD;
+ goto out_unlock;
ehea_rereg_mrs(NULL);
break;
default:
@@ -3545,8 +3567,12 @@ static int ehea_mem_notifier(struct notifier_block *nb,
}
ehea_update_firmware_handles();
+ ret = NOTIFY_OK;
- return NOTIFY_OK;
+out_unlock:
+ mutex_unlock(&dlpar_mem_lock);
+out:
+ return ret;
}
static struct notifier_block ehea_mem_nb = {
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index a1b4c7e..89128b63 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -229,14 +229,14 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
int ehea_destroy_cq(struct ehea_cq *cq)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!cq)
return 0;
hcp_epas_dtor(&cq->epas);
hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(cq->adapter, cq->fw_handle);
+ ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
hret = ehea_destroy_cq_res(cq, FORCE_FREE);
}
@@ -357,7 +357,7 @@ u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
int ehea_destroy_eq(struct ehea_eq *eq)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!eq)
return 0;
@@ -365,7 +365,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(eq->adapter, eq->fw_handle);
+ ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
hret = ehea_destroy_eq_res(eq, FORCE_FREE);
}
@@ -540,7 +540,7 @@ u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
int ehea_destroy_qp(struct ehea_qp *qp)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!qp)
return 0;
@@ -548,7 +548,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(qp->adapter, qp->fw_handle);
+ ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
hret = ehea_destroy_qp_res(qp, FORCE_FREE);
}
@@ -986,42 +986,45 @@ void print_error_data(u64 *data)
if (length > EHEA_PAGESIZE)
length = EHEA_PAGESIZE;
- if (type == 0x8) /* Queue Pair */
+ if (type == EHEA_AER_RESTYPE_QP)
ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
"port=%llX", resource, data[6], data[12], data[22]);
-
- if (type == 0x4) /* Completion Queue */
+ else if (type == EHEA_AER_RESTYPE_CQ)
ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
data[6]);
-
- if (type == 0x3) /* Event Queue */
+ else if (type == EHEA_AER_RESTYPE_EQ)
ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
data[6]);
ehea_dump(data, length, "error data");
}
-void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
+u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
+ u64 *aer, u64 *aerr)
{
unsigned long ret;
u64 *rblock;
+ u64 type = 0;
rblock = (void *)get_zeroed_page(GFP_KERNEL);
if (!rblock) {
ehea_error("Cannot allocate rblock memory.");
- return;
+ goto out;
}
- ret = ehea_h_error_data(adapter->handle,
- res_handle,
- rblock);
+ ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
- if (ret == H_R_STATE)
- ehea_error("No error data is available: %llX.", res_handle);
- else if (ret == H_SUCCESS)
+ if (ret == H_SUCCESS) {
+ type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
+ *aer = rblock[6];
+ *aerr = rblock[12];
print_error_data(rblock);
- else
+ } else if (ret == H_R_STATE) {
+ ehea_error("No error data available: %llX.", res_handle);
+ } else
ehea_error("Error data could not be fetched: %llX", res_handle);
free_page((unsigned long)rblock);
+out:
+ return type;
}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0817c1e..882c50c 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -154,6 +154,9 @@ struct ehea_rwqe {
#define EHEA_CQE_STAT_ERR_IP 0x2000
#define EHEA_CQE_STAT_ERR_CRC 0x1000
+/* Defines which bad send cqe stati lead to a port reset */
+#define EHEA_CQE_STAT_RESET_MASK 0x0002
+
struct ehea_cqe {
u64 wr_id; /* work request ID from WQE */
u8 type;
@@ -187,6 +190,14 @@ struct ehea_cqe {
#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
+#define EHEA_AER_RESTYPE_QP 0x8
+#define EHEA_AER_RESTYPE_CQ 0x4
+#define EHEA_AER_RESTYPE_EQ 0x3
+
+/* Defines which affiliated errors lead to a port reset */
+#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
+#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
+
struct ehea_eqe {
u64 entry;
};
@@ -379,7 +390,8 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
int ehea_rem_mr(struct ehea_mr *mr);
-void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
+u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
+ u64 *aer, u64 *aerr);
int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index ff27f72..112c5aa 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1293,8 +1293,6 @@ static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
*/
netif_stop_queue(dev);
- /* save the timestamp */
- priv->netdev->trans_start = jiffies;
/* Remember the skb for deferred processing */
priv->tx_skb = skb;
schedule_work(&priv->tx_work);
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 391c3bc..e7b6c31 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
- enic_res.o vnic_dev.o vnic_rq.o
+ enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
index 03dce9e..337d194 100644
--- a/drivers/net/enic/cq_enet_desc.h
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -101,14 +101,18 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
{
- u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- u16 q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
*ingress_port = (completed_index_flags &
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index ee01f5a..85f2a2e7 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -33,8 +33,8 @@
#include "vnic_rss.h"
#define DRV_NAME "enic"
-#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
-#define DRV_VERSION "1.1.0.241a"
+#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
+#define DRV_VERSION "1.3.1.1-pp"
#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
#define PFX DRV_NAME ": "
@@ -74,6 +74,13 @@ struct enic_msix_entry {
void *devid;
};
+struct enic_port_profile {
+ u8 request;
+ char name[PORT_PROFILE_MAX];
+ u8 instance_uuid[PORT_UUID_MAX];
+ u8 host_uuid[PORT_UUID_MAX];
+};
+
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
@@ -95,6 +102,7 @@ struct enic {
u32 port_mtu;
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
+ struct enic_port_profile pp;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index cf098bb..e125113 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -29,6 +29,7 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/if_link.h>
#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/ip.h>
@@ -40,6 +41,7 @@
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
+#include "vnic_vic.h"
#include "enic_res.h"
#include "enic.h"
@@ -49,10 +51,12 @@
#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
+#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
/* Supported devices */
static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
+ { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
{ 0, } /* end of table */
};
@@ -113,6 +117,11 @@ static const struct enic_stat enic_rx_stats[] = {
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+static int enic_is_dynamic(struct enic *enic)
+{
+ return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
+}
+
static int enic_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -810,26 +819,90 @@ static void enic_reset_mcaddrs(struct enic *enic)
static int enic_set_mac_addr(struct net_device *netdev, char *addr)
{
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
+ struct enic *enic = netdev_priv(netdev);
+
+ if (enic_is_dynamic(enic)) {
+ if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+ } else {
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+ }
memcpy(netdev->dev_addr, addr, netdev->addr_len);
return 0;
}
+static int enic_dev_add_station_addr(struct enic *enic)
+{
+ int err = 0;
+
+ if (is_valid_ether_addr(enic->netdev->dev_addr)) {
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+ }
+
+ return err;
+}
+
+static int enic_dev_del_station_addr(struct enic *enic)
+{
+ int err = 0;
+
+ if (is_valid_ether_addr(enic->netdev->dev_addr)) {
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+ }
+
+ return err;
+}
+
+static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct sockaddr *saddr = p;
+ char *addr = saddr->sa_data;
+ int err;
+
+ if (netif_running(enic->netdev)) {
+ err = enic_dev_del_station_addr(enic);
+ if (err)
+ return err;
+ }
+
+ err = enic_set_mac_addr(netdev, addr);
+ if (err)
+ return err;
+
+ if (netif_running(enic->netdev)) {
+ err = enic_dev_add_station_addr(enic);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int enic_set_mac_address(struct net_device *netdev, void *p)
+{
+ return -EOPNOTSUPP;
+}
+
/* netif_tx_lock held, BHs disabled */
static void enic_set_multicast_list(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
int directed = 1;
int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
unsigned int mc_count = netdev_mc_count(netdev);
int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
+ mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int i, j;
@@ -852,10 +925,10 @@ static void enic_set_multicast_list(struct net_device *netdev)
*/
i = 0;
- netdev_for_each_mc_addr(list, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == mc_count)
break;
- memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN);
+ memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
}
for (i = 0; i < enic->mc_count; i++) {
@@ -922,6 +995,213 @@ static void enic_tx_timeout(struct net_device *netdev)
schedule_work(&enic->reset);
}
+static int enic_vnic_dev_deinit(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_deinit(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_prov(enic->vdev,
+ (u8 *)vp, vic_provinfo_size(vp));
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_init_done(struct enic *enic, int *done, int *error)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_done(enic->vdev, done, error);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
+ char *name, u8 *instance_uuid, u8 *host_uuid)
+{
+ struct vic_provinfo *vp;
+ u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
+ unsigned short *uuid;
+ char uuid_str[38];
+ static char *uuid_fmt = "%04X%04X-%04X-%04X-%04X-%04X%04X%04X";
+ int err;
+
+ if (!name)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
+ vp = vic_provinfo_alloc(GFP_KERNEL, oui, VIC_PROVINFO_LINUX_TYPE);
+ if (!vp)
+ return -ENOMEM;
+
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
+ strlen(name) + 1, name);
+
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
+ ETH_ALEN, mac);
+
+ if (instance_uuid) {
+ uuid = (unsigned short *)instance_uuid;
+ sprintf(uuid_str, uuid_fmt,
+ uuid[0], uuid[1], uuid[2], uuid[3],
+ uuid[4], uuid[5], uuid[6], uuid[7]);
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ if (host_uuid) {
+ uuid = (unsigned short *)host_uuid;
+ sprintf(uuid_str, uuid_fmt,
+ uuid[0], uuid[1], uuid[2], uuid[3],
+ uuid[4], uuid[5], uuid[6], uuid[7]);
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_HOST_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ err = enic_vnic_dev_deinit(enic);
+ if (err)
+ goto err_out;
+
+ memset(&enic->pp, 0, sizeof(enic->pp));
+
+ err = enic_dev_init_prov(enic, vp);
+ if (err)
+ goto err_out;
+
+ enic->pp.request = request;
+ memcpy(enic->pp.name, name, PORT_PROFILE_MAX);
+ if (instance_uuid)
+ memcpy(enic->pp.instance_uuid,
+ instance_uuid, PORT_UUID_MAX);
+ if (host_uuid)
+ memcpy(enic->pp.host_uuid,
+ host_uuid, PORT_UUID_MAX);
+
+err_out:
+ vic_provinfo_free(vp);
+
+ return err;
+}
+
+static int enic_unset_port_profile(struct enic *enic)
+{
+ memset(&enic->pp, 0, sizeof(enic->pp));
+ return enic_vnic_dev_deinit(enic);
+}
+
+static int enic_set_vf_port(struct net_device *netdev, int vf,
+ struct nlattr *port[])
+{
+ struct enic *enic = netdev_priv(netdev);
+ char *name = NULL;
+ u8 *instance_uuid = NULL;
+ u8 *host_uuid = NULL;
+ u8 request = PORT_REQUEST_DISASSOCIATE;
+
+ /* don't support VFs, yet */
+ if (vf != PORT_SELF_VF)
+ return -EOPNOTSUPP;
+
+ if (port[IFLA_PORT_REQUEST])
+ request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+
+ switch (request) {
+ case PORT_REQUEST_ASSOCIATE:
+
+ if (port[IFLA_PORT_PROFILE])
+ name = nla_data(port[IFLA_PORT_PROFILE]);
+
+ if (port[IFLA_PORT_INSTANCE_UUID])
+ instance_uuid =
+ nla_data(port[IFLA_PORT_INSTANCE_UUID]);
+
+ if (port[IFLA_PORT_HOST_UUID])
+ host_uuid = nla_data(port[IFLA_PORT_HOST_UUID]);
+
+ return enic_set_port_profile(enic, request,
+ netdev->dev_addr, name,
+ instance_uuid, host_uuid);
+
+ case PORT_REQUEST_DISASSOCIATE:
+
+ return enic_unset_port_profile(enic);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int enic_get_vf_port(struct net_device *netdev, int vf,
+ struct sk_buff *skb)
+{
+ struct enic *enic = netdev_priv(netdev);
+ int err, error, done;
+ u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
+
+ /* don't support VFs, yet */
+ if (vf != PORT_SELF_VF)
+ return -EOPNOTSUPP;
+
+ err = enic_dev_init_done(enic, &done, &error);
+
+ if (err)
+ return err;
+
+ switch (error) {
+ case ERR_SUCCESS:
+ if (!done)
+ response = PORT_PROFILE_RESPONSE_INPROGRESS;
+ break;
+ case ERR_EINVAL:
+ response = PORT_PROFILE_RESPONSE_INVALID;
+ break;
+ case ERR_EBADSTATE:
+ response = PORT_PROFILE_RESPONSE_BADSTATE;
+ break;
+ case ERR_ENOMEM:
+ response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
+ break;
+ default:
+ response = PORT_PROFILE_RESPONSE_ERROR;
+ break;
+ }
+
+ NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
+ NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
+ NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
+ enic->pp.name);
+ NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
+ enic->pp.instance_uuid);
+ NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
+ enic->pp.host_uuid);
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
struct enic *enic = vnic_dev_priv(rq->vdev);
@@ -1440,9 +1720,7 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->rq_count; i++)
vnic_rq_enable(&enic->rq[i]);
- spin_lock(&enic->devcmd_lock);
- enic_add_station_addr(enic);
- spin_unlock(&enic->devcmd_lock);
+ enic_dev_add_station_addr(enic);
enic_set_multicast_list(netdev);
netif_wake_queue(netdev);
@@ -1489,6 +1767,8 @@ static int enic_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ enic_dev_del_station_addr(enic);
+
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_disable(&enic->wq[i]);
if (err)
@@ -1774,14 +2054,34 @@ static void enic_clear_intr_mode(struct enic *enic)
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
}
+static const struct net_device_ops enic_netdev_dynamic_ops = {
+ .ndo_open = enic_open,
+ .ndo_stop = enic_stop,
+ .ndo_start_xmit = enic_hard_start_xmit,
+ .ndo_get_stats = enic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = enic_set_multicast_list,
+ .ndo_set_mac_address = enic_set_mac_address_dynamic,
+ .ndo_change_mtu = enic_change_mtu,
+ .ndo_vlan_rx_register = enic_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
+ .ndo_tx_timeout = enic_tx_timeout,
+ .ndo_set_vf_port = enic_set_vf_port,
+ .ndo_get_vf_port = enic_get_vf_port,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = enic_poll_controller,
+#endif
+};
+
static const struct net_device_ops enic_netdev_ops = {
.ndo_open = enic_open,
.ndo_stop = enic_stop,
.ndo_start_xmit = enic_hard_start_xmit,
.ndo_get_stats = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
.ndo_set_multicast_list = enic_set_multicast_list,
+ .ndo_set_mac_address = enic_set_mac_address,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_register = enic_vlan_rx_register,
.ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2010,11 +2310,13 @@ static int __devinit enic_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
- err = vnic_dev_init(enic->vdev, 0);
- if (err) {
- printk(KERN_ERR PFX
- "vNIC dev init failed, aborting.\n");
- goto err_out_dev_close;
+ if (!enic_is_dynamic(enic)) {
+ err = vnic_dev_init(enic->vdev, 0);
+ if (err) {
+ printk(KERN_ERR PFX
+ "vNIC dev init failed, aborting.\n");
+ goto err_out_dev_close;
+ }
}
err = enic_dev_init(enic);
@@ -2054,12 +2356,15 @@ static int __devinit enic_probe(struct pci_dev *pdev,
enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
- netdev->netdev_ops = &enic_netdev_ops;
+ if (enic_is_dynamic(enic))
+ netdev->netdev_ops = &enic_netdev_dynamic_ops;
+ else
+ netdev->netdev_ops = &enic_netdev_ops;
+
netdev->watchdog_timeo = 2 * HZ;
netdev->ethtool_ops = &enic_ethtool_ops;
- netdev->features |= NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
if (ENIC_SETTING(enic, TXCSUM))
netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
if (ENIC_SETTING(enic, TSO))
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 02839bf..9b18840 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -103,11 +103,6 @@ int enic_get_vnic_config(struct enic *enic)
return 0;
}
-void enic_add_station_addr(struct enic *enic)
-{
- vnic_dev_add_addr(enic->vdev, enic->mac_addr);
-}
-
void enic_add_multicast_addr(struct enic *enic, u8 *addr)
{
vnic_dev_add_addr(enic->vdev, addr);
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index abc1974..494664f 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -131,7 +131,6 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
struct enic;
int enic_get_vnic_config(struct enic *);
-void enic_add_station_addr(struct enic *enic);
void enic_add_multicast_addr(struct enic *enic, u8 *addr);
void enic_del_multicast_addr(struct enic *enic, u8 *addr);
void enic_add_vlan(struct enic *enic, u16 vlanid);
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index cf22de7..2b3e16d 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -530,7 +530,7 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
printk(KERN_ERR "Can't set packet filter\n");
}
-void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -543,9 +543,11 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
printk(KERN_ERR "Can't add addr [%pM], %d\n", addr, err);
+
+ return err;
}
-void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -558,6 +560,8 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
printk(KERN_ERR "Can't del addr [%pM], %d\n", addr, err);
+
+ return err;
}
int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
@@ -574,22 +578,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
return err;
}
-int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr)
{
u64 a0, a1;
int wait = 1000;
int r;
- if (!vdev->notify) {
- vdev->notify = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_notify),
- &vdev->notify_pa);
- if (!vdev->notify)
- return -ENOMEM;
- memset(vdev->notify, 0, sizeof(struct vnic_devcmd_notify));
- }
+ memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
+ vdev->notify = notify_addr;
+ vdev->notify_pa = notify_pa;
- a0 = vdev->notify_pa;
+ a0 = (u64)notify_pa;
a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
a1 += sizeof(struct vnic_devcmd_notify);
@@ -598,7 +598,27 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
return r;
}
-void vnic_dev_notify_unset(struct vnic_dev *vdev)
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ void *notify_addr;
+ dma_addr_t notify_pa;
+
+ if (vdev->notify || vdev->notify_pa) {
+ printk(KERN_ERR "notify block %p still allocated",
+ vdev->notify);
+ return -EINVAL;
+ }
+
+ notify_addr = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ &notify_pa);
+ if (!notify_addr)
+ return -ENOMEM;
+
+ return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
+}
+
+void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
@@ -608,9 +628,23 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev)
a1 += sizeof(struct vnic_devcmd_notify);
vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ vdev->notify = NULL;
+ vdev->notify_pa = 0;
vdev->notify_sz = 0;
}
+void vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ if (vdev->notify) {
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ }
+
+ vnic_dev_notify_unsetcmd(vdev);
+}
+
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
u32 *words;
@@ -652,6 +686,56 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
return r;
}
+int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ *done = 0;
+
+ ret = vnic_dev_cmd(vdev, CMD_INIT_STATUS, &a0, &a1, wait);
+ if (ret)
+ return ret;
+
+ *done = (a0 == 0);
+
+ *err = (a0 == 0) ? a1 : 0;
+
+ return 0;
+}
+
+int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
+{
+ u64 a0, a1 = len;
+ int wait = 1000;
+ u64 prov_pa;
+ void *prov_buf;
+ int ret;
+
+ prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
+ if (!prov_buf)
+ return -ENOMEM;
+
+ memcpy(prov_buf, buf, len);
+
+ a0 = prov_pa;
+
+ ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO, &a0, &a1, wait);
+
+ pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
+
+ return ret;
+}
+
+int vnic_dev_deinit(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
+}
+
int vnic_dev_link_status(struct vnic_dev *vdev)
{
if (vdev->linkstatus)
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index fc5e3eb..caccce3 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -103,11 +103,14 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
-void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
-void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
void vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
u32 vnic_dev_port_speed(struct vnic_dev *vdev);
@@ -121,6 +124,9 @@ int vnic_dev_disable(struct vnic_dev *vdev);
int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
+int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_deinit(struct vnic_dev *vdev);
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index e186efa..cc580cf 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -168,10 +168,10 @@ int vnic_rq_disable(struct vnic_rq *rq)
iowrite32(0, &rq->ctrl->enable);
/* Wait for HW to ACK disable request */
- for (wait = 0; wait < 100; wait++) {
+ for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&rq->ctrl->running)))
return 0;
- udelay(1);
+ udelay(10);
}
printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
new file mode 100644
index 0000000..d769772
--- /dev/null
+++ b/drivers/net/enic/vnic_vic.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "vnic_vic.h"
+
+struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
+{
+ struct vic_provinfo *vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
+
+ if (!vp || !oui)
+ return NULL;
+
+ memcpy(vp->oui, oui, sizeof(vp->oui));
+ vp->type = type;
+ vp->length = htonl(sizeof(vp->num_tlvs));
+
+ return vp;
+}
+
+void vic_provinfo_free(struct vic_provinfo *vp)
+{
+ kfree(vp);
+}
+
+int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
+ void *value)
+{
+ struct vic_provinfo_tlv *tlv;
+
+ if (!vp || !value)
+ return -EINVAL;
+
+ if (ntohl(vp->length) + sizeof(*tlv) + length >
+ VIC_PROVINFO_MAX_TLV_DATA)
+ return -ENOMEM;
+
+ tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv +
+ ntohl(vp->length) - sizeof(vp->num_tlvs));
+
+ tlv->type = htons(type);
+ tlv->length = htons(length);
+ memcpy(tlv->value, value, length);
+
+ vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
+ vp->length = htonl(ntohl(vp->length) + sizeof(*tlv) + length);
+
+ return 0;
+}
+
+size_t vic_provinfo_size(struct vic_provinfo *vp)
+{
+ return vp ? ntohl(vp->length) + sizeof(*vp) - sizeof(vp->num_tlvs) : 0;
+}
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
new file mode 100644
index 0000000..085c2a2
--- /dev/null
+++ b/drivers/net/enic/vnic_vic.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_VIC_H_
+#define _VNIC_VIC_H_
+
+/* Note: All integer fields in NETWORK byte order */
+
+/* Note: String field lengths include null char */
+
+#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c }
+#define VIC_PROVINFO_LINUX_TYPE 0x2
+
+enum vic_linux_prov_tlv_type {
+ VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
+ VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR = 1, /* u8[6] */
+ VIC_LINUX_PROV_TLV_CLIENT_NAME_STR = 2,
+ VIC_LINUX_PROV_TLV_HOST_UUID_STR = 8,
+ VIC_LINUX_PROV_TLV_CLIENT_UUID_STR = 9,
+};
+
+struct vic_provinfo {
+ u8 oui[3]; /* OUI of data provider */
+ u8 type; /* provider-specific type */
+ u32 length; /* length of data below */
+ u32 num_tlvs; /* number of tlvs */
+ struct vic_provinfo_tlv {
+ u16 type;
+ u16 length;
+ u8 value[0];
+ } tlv[0];
+} __attribute__ ((packed));
+
+#define VIC_PROVINFO_MAX_DATA 1385
+#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
+ sizeof(struct vic_provinfo))
+
+struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type);
+void vic_provinfo_free(struct vic_provinfo *vp);
+int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
+ void *value);
+size_t vic_provinfo_size(struct vic_provinfo *vp);
+
+#endif /* _VNIC_VIC_H_ */
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index d5f9843..1378afb 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -161,10 +161,10 @@ int vnic_wq_disable(struct vnic_wq *wq)
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
- for (wait = 0; wait < 100; wait++) {
+ for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
- udelay(1);
+ udelay(10);
}
printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 7a567201..6838dfc 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -652,7 +652,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
break;
}
- return;
}
@@ -840,7 +839,6 @@ static void epic_restart(struct net_device *dev)
" interrupt %4.4x.\n",
dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
(int)inl(ioaddr + INTSTAT));
- return;
}
static void check_media(struct net_device *dev)
@@ -908,7 +906,7 @@ static void epic_tx_timeout(struct net_device *dev)
outl(TxQueued, dev->base_addr + COMMAND);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
ep->stats.tx_errors++;
if (!ep->tx_full)
netif_wake_queue(dev);
@@ -958,7 +956,6 @@ static void epic_init_ring(struct net_device *dev)
(i+1)*sizeof(struct epic_tx_desc);
}
ep->tx_ring[i-1].next = ep->tx_ring_dma;
- return;
}
static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1006,7 +1003,6 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Trigger an immediate transmit demand. */
outl(TxQueued, dev->base_addr + COMMAND);
- dev->trans_start = jiffies;
if (debug > 4)
printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
"flag %2.2x Tx status %8.8x.\n",
@@ -1399,12 +1395,12 @@ static void set_rx_mode(struct net_device *dev)
outl(0x0004, ioaddr + RxCtrl);
return;
} else { /* Never executed, for now. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit_nr =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit_nr >> 3] |= (1 << bit_nr);
}
}
@@ -1414,7 +1410,6 @@ static void set_rx_mode(struct net_device *dev)
outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
}
- return;
}
static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index b34a2dd..dda2c79 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -288,7 +288,7 @@ static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return eql_s_master_cfg(dev, ifr->ifr_data);
default:
return -EOPNOTSUPP;
- };
+ }
}
/* queue->lock must be held */
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 5569f2f..0ba5e7b 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -319,8 +319,6 @@ static void es_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(0x01, ioaddr + ES_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/*
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index d4e24f0..874973f 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1027,7 +1027,7 @@ static void eth16i_timeout(struct net_device *dev)
inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
"IRQ conflict" : "network cable problem");
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Let's dump all registers */
if(eth16i_debug > 0) {
@@ -1047,7 +1047,7 @@ static void eth16i_timeout(struct net_device *dev)
}
dev->stats.tx_errors++;
eth16i_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
netif_wake_queue(dev);
}
@@ -1109,7 +1109,6 @@ static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev)
outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a8d9250..14cbde5 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -756,7 +756,7 @@ static void ethoc_set_multicast_list(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
u32 mode = ethoc_read(priv, MODER);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u32 hash[2] = { 0, 0 };
/* set loopback mode if requested */
@@ -784,8 +784,8 @@ static void ethoc_set_multicast_list(struct net_device *dev)
hash[0] = 0xffffffff;
hash[1] = 0xffffffff;
} else {
- netdev_for_each_mc_addr(mc, dev) {
- u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr);
int bit = (crc >> 26) & 0x3f;
hash[bit >> 5] |= 1 << (bit & 0x1f);
}
@@ -851,7 +851,6 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
spin_unlock_irq(&priv->lock);
out:
dev_kfree_skb(skb);
@@ -1040,7 +1039,6 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->features |= 0;
/* setup NAPI */
- memset(&priv->napi, 0, sizeof(priv->napi));
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
spin_lock_init(&priv->rx_lock);
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 91e59f3..380d061 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -757,7 +757,7 @@ static void ewrk3_timeout(struct net_device *dev)
*/
ENABLE_IRQs;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -862,7 +862,6 @@ static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq (&lp->hw_lock);
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
/* Check for free resources: stop Tx queue if there are none */
@@ -1169,7 +1168,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct ewrk3_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u_long iobase = dev->base_addr;
int i;
char *addrs, bit, byte;
@@ -1213,8 +1212,8 @@ static void SetMulticastFilter(struct net_device *dev)
}
/* Update table */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
@@ -1370,8 +1369,6 @@ static void __init EthwrkSignature(char *name, char *eeprom_image)
name[EWRK3_STRLEN] = '\0';
} else
name[0] = '\0';
-
- return;
}
/*
@@ -1776,8 +1773,7 @@ static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
case EWRK3_SET_MCA: /* Set a multicast address */
if (capable(CAP_NET_ADMIN)) {
- if (ioc->len > 1024)
- {
+ if (ioc->len > HASH_TABLE_LEN) {
status = -EINVAL;
break;
}
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index d11ae51..15f4f8d 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1233,7 +1233,7 @@ static void fealnx_tx_timeout(struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev); /* or .._start_.. ?? */
}
@@ -1374,7 +1374,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
++np->really_tx_count;
iowrite32(0, np->mem + TXPDR);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&np->lock, flags);
return NETDEV_TX_OK;
@@ -1791,12 +1790,12 @@ static void __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_AB | CR_W_AM;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit;
- bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
mc_filter[bit >> 5] |= (1 << bit);
}
rx_mode = CR_W_AB | CR_W_AM;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 9b4e8f7..42d9ac9 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -40,6 +40,7 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/phy.h>
#include <asm/cacheflush.h>
@@ -61,7 +62,6 @@
* Define the fixed address of the FEC hardware.
*/
#if defined(CONFIG_M5272)
-#define HAVE_mii_link_interrupt
static unsigned char fec_mac_default[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -86,23 +86,6 @@ static unsigned char fec_mac_default[] = {
#endif
#endif /* CONFIG_M5272 */
-/* Forward declarations of some structures to support different PHYs */
-
-typedef struct {
- uint mii_data;
- void (*funct)(uint mii_reg, struct net_device *dev);
-} phy_cmd_t;
-
-typedef struct {
- uint id;
- char *name;
-
- const phy_cmd_t *config;
- const phy_cmd_t *startup;
- const phy_cmd_t *ack_int;
- const phy_cmd_t *shutdown;
-} phy_info_t;
-
/* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best
* to keep them that size.
@@ -189,29 +172,21 @@ struct fec_enet_private {
uint tx_full;
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
- /* hold while accessing the mii_list_t() elements */
- spinlock_t mii_lock;
-
- uint phy_id;
- uint phy_id_done;
- uint phy_status;
- uint phy_speed;
- phy_info_t const *phy;
- struct work_struct phy_task;
- uint sequence_done;
- uint mii_phy_task_queued;
+ struct platform_device *pdev;
- uint phy_addr;
+ int opened;
+ /* Phylib and MDIO interface */
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
int index;
- int opened;
int link;
- int old_link;
int full_duplex;
};
-static void fec_enet_mii(struct net_device *dev);
static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
static void fec_enet_tx(struct net_device *dev);
static void fec_enet_rx(struct net_device *dev);
@@ -219,67 +194,20 @@ static int fec_enet_close(struct net_device *dev);
static void fec_restart(struct net_device *dev, int duplex);
static void fec_stop(struct net_device *dev);
+/* FEC MII MMFR bits definition */
+#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
+#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
+#define FEC_MMFR_TA (2 << 16)
+#define FEC_MMFR_DATA(v) (v & 0xffff)
-/* MII processing. We keep this as simple as possible. Requests are
- * placed on the list (if there is room). When the request is finished
- * by the MII, an optional function may be called.
- */
-typedef struct mii_list {
- uint mii_regval;
- void (*mii_func)(uint val, struct net_device *dev);
- struct mii_list *mii_next;
-} mii_list_t;
-
-#define NMII 20
-static mii_list_t mii_cmds[NMII];
-static mii_list_t *mii_free;
-static mii_list_t *mii_head;
-static mii_list_t *mii_tail;
-
-static int mii_queue(struct net_device *dev, int request,
- void (*func)(uint, struct net_device *));
-
-/* Make MII read/write commands for the FEC */
-#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
-#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
- (VAL & 0xffff))
-#define mk_mii_end 0
+#define FEC_MII_TIMEOUT 10000
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
-/* Register definitions for the PHY */
-
-#define MII_REG_CR 0 /* Control Register */
-#define MII_REG_SR 1 /* Status Register */
-#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
-#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
-#define MII_REG_ANAR 4 /* A-N Advertisement Register */
-#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
-#define MII_REG_ANER 6 /* A-N Expansion Register */
-#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
-#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
-
-/* values for phy_status */
-
-#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
-#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
-#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
-#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
-#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
-#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
-#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
-
-#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
-#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
-#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
-#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
-#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
-#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
-#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
-#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
-
-
static int
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -347,8 +275,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
- dev->trans_start = jiffies;
-
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
@@ -406,12 +332,6 @@ fec_enet_interrupt(int irq, void * dev_id)
ret = IRQ_HANDLED;
fec_enet_tx(dev);
}
-
- if (int_events & FEC_ENET_MII) {
- ret = IRQ_HANDLED;
- fec_enet_mii(dev);
- }
-
} while (int_events);
return ret;
@@ -607,827 +527,311 @@ rx_processing_done:
spin_unlock(&fep->hw_lock);
}
-/* called from interrupt context */
-static void
-fec_enet_mii(struct net_device *dev)
-{
- struct fec_enet_private *fep;
- mii_list_t *mip;
-
- fep = netdev_priv(dev);
- spin_lock(&fep->mii_lock);
-
- if ((mip = mii_head) == NULL) {
- printk("MII and no head!\n");
- goto unlock;
- }
-
- if (mip->mii_func != NULL)
- (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
-
- mii_head = mip->mii_next;
- mip->mii_next = mii_free;
- mii_free = mip;
-
- if ((mip = mii_head) != NULL)
- writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
-
-unlock:
- spin_unlock(&fep->mii_lock);
-}
-
-static int
-mii_queue_unlocked(struct net_device *dev, int regval,
- void (*func)(uint, struct net_device *))
+/* ------------------------------------------------------------------------- */
+#ifdef CONFIG_M5272
+static void __inline__ fec_get_mac(struct net_device *dev)
{
- struct fec_enet_private *fep;
- mii_list_t *mip;
- int retval;
-
- /* Add PHY address to register command */
- fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(dev);
+ unsigned char *iap, tmpaddr[ETH_ALEN];
- regval |= fep->phy_addr << 23;
- retval = 0;
-
- if ((mip = mii_free) != NULL) {
- mii_free = mip->mii_next;
- mip->mii_regval = regval;
- mip->mii_func = func;
- mip->mii_next = NULL;
- if (mii_head) {
- mii_tail->mii_next = mip;
- mii_tail = mip;
- } else {
- mii_head = mii_tail = mip;
- writel(regval, fep->hwp + FEC_MII_DATA);
- }
+ if (FEC_FLASHMAC) {
+ /*
+ * Get MAC address from FLASH.
+ * If it is all 1's or 0's, use the default.
+ */
+ iap = (unsigned char *)FEC_FLASHMAC;
+ if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
+ (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
+ iap = fec_mac_default;
+ if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
+ (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
+ iap = fec_mac_default;
} else {
- retval = 1;
+ *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
+ *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ iap = &tmpaddr[0];
}
- return retval;
-}
-
-static int
-mii_queue(struct net_device *dev, int regval,
- void (*func)(uint, struct net_device *))
-{
- struct fec_enet_private *fep;
- unsigned long flags;
- int retval;
- fep = netdev_priv(dev);
- spin_lock_irqsave(&fep->mii_lock, flags);
- retval = mii_queue_unlocked(dev, regval, func);
- spin_unlock_irqrestore(&fep->mii_lock, flags);
- return retval;
-}
-
-static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
-{
- if(!c)
- return;
+ memcpy(dev->dev_addr, iap, ETH_ALEN);
- for (; c->mii_data != mk_mii_end; c++)
- mii_queue(dev, c->mii_data, c->funct);
+ /* Adjust MAC if using default MAC address */
+ if (iap == fec_mac_default)
+ dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
}
+#endif
-static void mii_parse_sr(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
-
- if (mii_reg & 0x0004)
- status |= PHY_STAT_LINK;
- if (mii_reg & 0x0010)
- status |= PHY_STAT_FAULT;
- if (mii_reg & 0x0020)
- status |= PHY_STAT_ANC;
- *s = status;
-}
+/* ------------------------------------------------------------------------- */
-static void mii_parse_cr(uint mii_reg, struct net_device *dev)
+/*
+ * Phy section
+ */
+static void fec_enet_adjust_link(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
-
- if (mii_reg & 0x1000)
- status |= PHY_CONF_ANE;
- if (mii_reg & 0x4000)
- status |= PHY_CONF_LOOP;
- *s = status;
-}
+ struct phy_device *phy_dev = fep->phy_dev;
+ unsigned long flags;
-static void mii_parse_anar(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_CONF_SPMASK);
-
- if (mii_reg & 0x0020)
- status |= PHY_CONF_10HDX;
- if (mii_reg & 0x0040)
- status |= PHY_CONF_10FDX;
- if (mii_reg & 0x0080)
- status |= PHY_CONF_100HDX;
- if (mii_reg & 0x00100)
- status |= PHY_CONF_100FDX;
- *s = status;
-}
+ int status_change = 0;
-/* ------------------------------------------------------------------------- */
-/* The Level one LXT970 is used by many boards */
+ spin_lock_irqsave(&fep->hw_lock, flags);
-#define MII_LXT970_MIRROR 16 /* Mirror register */
-#define MII_LXT970_IER 17 /* Interrupt Enable Register */
-#define MII_LXT970_ISR 18 /* Interrupt Status Register */
-#define MII_LXT970_CONFIG 19 /* Configuration Register */
-#define MII_LXT970_CSR 20 /* Chip Status Register */
+ /* Prevent a state halted on mii error */
+ if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
+ phy_dev->state = PHY_RESUMING;
+ goto spin_unlock;
+ }
-static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
+ /* Duplex link change */
+ if (phy_dev->link) {
+ if (fep->full_duplex != phy_dev->duplex) {
+ fec_restart(dev, phy_dev->duplex);
+ status_change = 1;
+ }
+ }
- status = *s & ~(PHY_STAT_SPMASK);
- if (mii_reg & 0x0800) {
- if (mii_reg & 0x1000)
- status |= PHY_STAT_100FDX;
- else
- status |= PHY_STAT_100HDX;
- } else {
- if (mii_reg & 0x1000)
- status |= PHY_STAT_10FDX;
+ /* Link on or off change */
+ if (phy_dev->link != fep->link) {
+ fep->link = phy_dev->link;
+ if (phy_dev->link)
+ fec_restart(dev, phy_dev->duplex);
else
- status |= PHY_STAT_10HDX;
+ fec_stop(dev);
+ status_change = 1;
}
- *s = status;
-}
-static phy_cmd_t const phy_cmd_lxt970_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
- /* read SR and ISR to acknowledge */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_LXT970_ISR), NULL },
-
- /* find out the current status */
- { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_lxt970 = {
- .id = 0x07810000,
- .name = "LXT970",
- .config = phy_cmd_lxt970_config,
- .startup = phy_cmd_lxt970_startup,
- .ack_int = phy_cmd_lxt970_ack_int,
- .shutdown = phy_cmd_lxt970_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* The Level one LXT971 is used on some of my custom boards */
-
-/* register definitions for the 971 */
+spin_unlock:
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
-#define MII_LXT971_PCR 16 /* Port Control Register */
-#define MII_LXT971_SR2 17 /* Status Register 2 */
-#define MII_LXT971_IER 18 /* Interrupt Enable Register */
-#define MII_LXT971_ISR 19 /* Interrupt Status Register */
-#define MII_LXT971_LCR 20 /* LED Control Register */
-#define MII_LXT971_TCR 30 /* Transmit Control Register */
+ if (status_change)
+ phy_print_status(phy_dev);
+}
/*
- * I had some nice ideas of running the MDIO faster...
- * The 971 should support 8MHz and I tried it, but things acted really
- * weird, so 2.5 MHz ought to be enough for anyone...
+ * NOTE: a MII transaction is during around 25 us, so polling it...
*/
-
-static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
+static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
+ struct fec_enet_private *fep = bus->priv;
+ int timeout = FEC_MII_TIMEOUT;
- status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
+ fep->mii_timeout = 0;
- if (mii_reg & 0x0400) {
- fep->link = 1;
- status |= PHY_STAT_LINK;
- } else {
- fep->link = 0;
- }
- if (mii_reg & 0x0080)
- status |= PHY_STAT_ANC;
- if (mii_reg & 0x4000) {
- if (mii_reg & 0x0200)
- status |= PHY_STAT_100FDX;
- else
- status |= PHY_STAT_100HDX;
- } else {
- if (mii_reg & 0x0200)
- status |= PHY_STAT_10FDX;
- else
- status |= PHY_STAT_10HDX;
+ /* clear MII end of transfer bit*/
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+ /* start a read op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
+ cpu_relax();
+ if (timeout-- < 0) {
+ fep->mii_timeout = 1;
+ printk(KERN_ERR "FEC: MDIO read timeout\n");
+ return -ETIMEDOUT;
+ }
}
- if (mii_reg & 0x0008)
- status |= PHY_STAT_FAULT;
- *s = status;
+ /* return value */
+ return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
}
-static phy_cmd_t const phy_cmd_lxt971_config[] = {
- /* limit to 10MBit because my prototype board
- * doesn't work with 100. */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
- /* Somehow does the 971 tell me that the link is down
- * the first read after power-up.
- * read here to get a valid value in ack_int */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
- /* acknowledge the int before reading status ! */
- { mk_mii_read(MII_LXT971_ISR), NULL },
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_lxt971 = {
- .id = 0x0001378e,
- .name = "LXT971",
- .config = phy_cmd_lxt971_config,
- .startup = phy_cmd_lxt971_startup,
- .ack_int = phy_cmd_lxt971_ack_int,
- .shutdown = phy_cmd_lxt971_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
-
-/* register definitions */
-
-#define MII_QS6612_MCR 17 /* Mode Control Register */
-#define MII_QS6612_FTR 27 /* Factory Test Register */
-#define MII_QS6612_MCO 28 /* Misc. Control Register */
-#define MII_QS6612_ISR 29 /* Interrupt Source Register */
-#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
-#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
-
-static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
+static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_STAT_SPMASK);
+ struct fec_enet_private *fep = bus->priv;
+ int timeout = FEC_MII_TIMEOUT;
- switch((mii_reg >> 2) & 7) {
- case 1: status |= PHY_STAT_10HDX; break;
- case 2: status |= PHY_STAT_100HDX; break;
- case 5: status |= PHY_STAT_10FDX; break;
- case 6: status |= PHY_STAT_100FDX; break;
-}
-
- *s = status;
-}
+ fep->mii_timeout = 0;
-static phy_cmd_t const phy_cmd_qs6612_config[] = {
- /* The PHY powers up isolated on the RPX,
- * so send a command to allow operation.
- */
- { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
-
- /* parse cr and anar to get some info */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
- /* we need to read ISR, SR and ANER to acknowledge */
- { mk_mii_read(MII_QS6612_ISR), NULL },
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_REG_ANER), NULL },
-
- /* read pcr to get info */
- { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_qs6612 = {
- .id = 0x00181440,
- .name = "QS6612",
- .config = phy_cmd_qs6612_config,
- .startup = phy_cmd_qs6612_startup,
- .ack_int = phy_cmd_qs6612_ack_int,
- .shutdown = phy_cmd_qs6612_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* AMD AM79C874 phy */
+ /* clear MII end of transfer bit*/
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
-/* register definitions for the 874 */
+ /* start a read op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
+ cpu_relax();
+ if (timeout-- < 0) {
+ fep->mii_timeout = 1;
+ printk(KERN_ERR "FEC: MDIO write timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
-#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */
-#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */
-#define MII_AM79C874_DR 18 /* Diagnostic Register */
-#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
-#define MII_AM79C874_MCR 21 /* ModeControl Register */
-#define MII_AM79C874_DC 23 /* Disconnect Counter */
-#define MII_AM79C874_REC 24 /* Recieve Error Counter */
+ return 0;
+}
-static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev)
+static int fec_enet_mdio_reset(struct mii_bus *bus)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
-
- if (mii_reg & 0x0080)
- status |= PHY_STAT_ANC;
- if (mii_reg & 0x0400)
- status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
- else
- status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
-
- *s = status;
+ return 0;
}
-static phy_cmd_t const phy_cmd_am79c874_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
- /* we only need to read ISR to acknowledge */
- { mk_mii_read(MII_AM79C874_ICSR), NULL },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_am79c874 = {
- .id = 0x00022561,
- .name = "AM79C874",
- .config = phy_cmd_am79c874_config,
- .startup = phy_cmd_am79c874_startup,
- .ack_int = phy_cmd_am79c874_ack_int,
- .shutdown = phy_cmd_am79c874_shutdown
-};
-
-
-/* ------------------------------------------------------------------------- */
-/* Kendin KS8721BL phy */
-
-/* register definitions for the 8721 */
-
-#define MII_KS8721BL_RXERCR 21
-#define MII_KS8721BL_ICSR 27
-#define MII_KS8721BL_PHYCR 31
-
-static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- /* we only need to read ISR to acknowledge */
- { mk_mii_read(MII_KS8721BL_ICSR), NULL },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_ks8721bl = {
- .id = 0x00022161,
- .name = "KS8721BL",
- .config = phy_cmd_ks8721bl_config,
- .startup = phy_cmd_ks8721bl_startup,
- .ack_int = phy_cmd_ks8721bl_ack_int,
- .shutdown = phy_cmd_ks8721bl_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* register definitions for the DP83848 */
-
-#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
-
-static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
+static int fec_enet_mii_probe(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
-
- *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
-
- /* Link up */
- if (mii_reg & 0x0001) {
- fep->link = 1;
- *s |= PHY_STAT_LINK;
- } else
- fep->link = 0;
- /* Status of link */
- if (mii_reg & 0x0010) /* Autonegotioation complete */
- *s |= PHY_STAT_ANC;
- if (mii_reg & 0x0002) { /* 10MBps? */
- if (mii_reg & 0x0004) /* Full Duplex? */
- *s |= PHY_STAT_10FDX;
- else
- *s |= PHY_STAT_10HDX;
- } else { /* 100 Mbps? */
- if (mii_reg & 0x0004) /* Full Duplex? */
- *s |= PHY_STAT_100FDX;
- else
- *s |= PHY_STAT_100HDX;
- }
- if (mii_reg & 0x0008)
- *s |= PHY_STAT_FAULT;
-}
+ struct phy_device *phy_dev = NULL;
+ int phy_addr;
-static phy_info_t phy_info_dp83848= {
- 0x020005c9,
- "DP83848",
-
- (const phy_cmd_t []) { /* config */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* startup - enable interrupts */
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* shutdown */
- { mk_mii_end, }
- },
-};
+ /* find the first phy */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (fep->mii_bus->phy_map[phy_addr]) {
+ phy_dev = fep->mii_bus->phy_map[phy_addr];
+ break;
+ }
+ }
-static phy_info_t phy_info_lan8700 = {
- 0x0007C0C,
- "LAN8700",
- (const phy_cmd_t []) { /* config */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* startup */
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* act_int */
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* shutdown */
- { mk_mii_end, }
- },
-};
-/* ------------------------------------------------------------------------- */
+ if (!phy_dev) {
+ printk(KERN_ERR "%s: no PHY found\n", dev->name);
+ return -ENODEV;
+ }
-static phy_info_t const * const phy_info[] = {
- &phy_info_lxt970,
- &phy_info_lxt971,
- &phy_info_qs6612,
- &phy_info_am79c874,
- &phy_info_ks8721bl,
- &phy_info_dp83848,
- &phy_info_lan8700,
- NULL
-};
+ /* attach the mac to the phy */
+ phy_dev = phy_connect(dev, dev_name(&phy_dev->dev),
+ &fec_enet_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phy_dev);
+ }
-/* ------------------------------------------------------------------------- */
-#ifdef HAVE_mii_link_interrupt
-static irqreturn_t
-mii_link_interrupt(int irq, void * dev_id);
+ /* mask with MAC supported features */
+ phy_dev->supported &= PHY_BASIC_FEATURES;
+ phy_dev->advertising = phy_dev->supported;
-/*
- * This is specific to the MII interrupt setup of the M5272EVB.
- */
-static void __inline__ fec_request_mii_intr(struct net_device *dev)
-{
- if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
- printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
-}
+ fep->phy_dev = phy_dev;
+ fep->link = 0;
+ fep->full_duplex = 0;
-static void __inline__ fec_disable_phy_intr(struct net_device *dev)
-{
- free_irq(66, dev);
+ return 0;
}
-#endif
-#ifdef CONFIG_M5272
-static void __inline__ fec_get_mac(struct net_device *dev)
+static int fec_enet_mii_init(struct platform_device *pdev)
{
+ struct net_device *dev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(dev);
- unsigned char *iap, tmpaddr[ETH_ALEN];
+ int err = -ENXIO, i;
- if (FEC_FLASHMAC) {
- /*
- * Get MAC address from FLASH.
- * If it is all 1's or 0's, use the default.
- */
- iap = (unsigned char *)FEC_FLASHMAC;
- if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
- (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
- iap = fec_mac_default;
- if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
- (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
- iap = fec_mac_default;
- } else {
- *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
- *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
- iap = &tmpaddr[0];
- }
+ fep->mii_timeout = 0;
- memcpy(dev->dev_addr, iap, ETH_ALEN);
-
- /* Adjust MAC if using default MAC address */
- if (iap == fec_mac_default)
- dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
-}
-#endif
-
-/* ------------------------------------------------------------------------- */
-
-static void mii_display_status(struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
+ /*
+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ */
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- if (!fep->link && !fep->old_link) {
- /* Link is still down - don't print anything */
- return;
+ fep->mii_bus = mdiobus_alloc();
+ if (fep->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto err_out;
}
- printk("%s: status: ", dev->name);
-
- if (!fep->link) {
- printk("link down");
- } else {
- printk("link up");
-
- switch(*s & PHY_STAT_SPMASK) {
- case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break;
- case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break;
- case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break;
- case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break;
- default:
- printk(", Unknown speed/duplex");
- }
-
- if (*s & PHY_STAT_ANC)
- printk(", auto-negotiation complete");
+ fep->mii_bus->name = "fec_enet_mii_bus";
+ fep->mii_bus->read = fec_enet_mdio_read;
+ fep->mii_bus->write = fec_enet_mdio_write;
+ fep->mii_bus->reset = fec_enet_mdio_reset;
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ fep->mii_bus->priv = fep;
+ fep->mii_bus->parent = &pdev->dev;
+
+ fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!fep->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mdiobus;
}
- if (*s & PHY_STAT_FAULT)
- printk(", remote fault");
-
- printk(".\n");
-}
-
-static void mii_display_config(struct work_struct *work)
-{
- struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
- struct net_device *dev = fep->netdev;
- uint status = fep->phy_status;
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ fep->mii_bus->irq[i] = PHY_POLL;
- /*
- ** When we get here, phy_task is already removed from
- ** the workqueue. It is thus safe to allow to reuse it.
- */
- fep->mii_phy_task_queued = 0;
- printk("%s: config: auto-negotiation ", dev->name);
-
- if (status & PHY_CONF_ANE)
- printk("on");
- else
- printk("off");
+ platform_set_drvdata(dev, fep->mii_bus);
- if (status & PHY_CONF_100FDX)
- printk(", 100FDX");
- if (status & PHY_CONF_100HDX)
- printk(", 100HDX");
- if (status & PHY_CONF_10FDX)
- printk(", 10FDX");
- if (status & PHY_CONF_10HDX)
- printk(", 10HDX");
- if (!(status & PHY_CONF_SPMASK))
- printk(", No speed/duplex selected?");
+ if (mdiobus_register(fep->mii_bus))
+ goto err_out_free_mdio_irq;
- if (status & PHY_CONF_LOOP)
- printk(", loopback enabled");
+ if (fec_enet_mii_probe(dev) != 0)
+ goto err_out_unregister_bus;
- printk(".\n");
+ return 0;
- fep->sequence_done = 1;
+err_out_unregister_bus:
+ mdiobus_unregister(fep->mii_bus);
+err_out_free_mdio_irq:
+ kfree(fep->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(fep->mii_bus);
+err_out:
+ return err;
}
-static void mii_relink(struct work_struct *work)
+static void fec_enet_mii_remove(struct fec_enet_private *fep)
{
- struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
- struct net_device *dev = fep->netdev;
- int duplex;
-
- /*
- ** When we get here, phy_task is already removed from
- ** the workqueue. It is thus safe to allow to reuse it.
- */
- fep->mii_phy_task_queued = 0;
- fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
- mii_display_status(dev);
- fep->old_link = fep->link;
-
- if (fep->link) {
- duplex = 0;
- if (fep->phy_status
- & (PHY_STAT_100FDX | PHY_STAT_10FDX))
- duplex = 1;
- fec_restart(dev, duplex);
- } else
- fec_stop(dev);
+ if (fep->phy_dev)
+ phy_disconnect(fep->phy_dev);
+ mdiobus_unregister(fep->mii_bus);
+ kfree(fep->mii_bus->irq);
+ mdiobus_free(fep->mii_bus);
}
-/* mii_queue_relink is called in interrupt context from mii_link_interrupt */
-static void mii_queue_relink(uint mii_reg, struct net_device *dev)
+static int fec_enet_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
- /*
- * We cannot queue phy_task twice in the workqueue. It
- * would cause an endless loop in the workqueue.
- * Fortunately, if the last mii_relink entry has not yet been
- * executed now, it will do the job for the current interrupt,
- * which is just what we want.
- */
- if (fep->mii_phy_task_queued)
- return;
+ if (!phydev)
+ return -ENODEV;
- fep->mii_phy_task_queued = 1;
- INIT_WORK(&fep->phy_task, mii_relink);
- schedule_work(&fep->phy_task);
+ return phy_ethtool_gset(phydev, cmd);
}
-/* mii_queue_config is called in interrupt context from fec_enet_mii */
-static void mii_queue_config(uint mii_reg, struct net_device *dev)
+static int fec_enet_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
- if (fep->mii_phy_task_queued)
- return;
+ if (!phydev)
+ return -ENODEV;
- fep->mii_phy_task_queued = 1;
- INIT_WORK(&fep->phy_task, mii_display_config);
- schedule_work(&fep->phy_task);
+ return phy_ethtool_sset(phydev, cmd);
}
-phy_cmd_t const phy_cmd_relink[] = {
- { mk_mii_read(MII_REG_CR), mii_queue_relink },
- { mk_mii_end, }
- };
-phy_cmd_t const phy_cmd_config[] = {
- { mk_mii_read(MII_REG_CR), mii_queue_config },
- { mk_mii_end, }
- };
-
-/* Read remainder of PHY ID. */
-static void
-mii_discover_phy3(uint mii_reg, struct net_device *dev)
+static void fec_enet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
- struct fec_enet_private *fep;
- int i;
-
- fep = netdev_priv(dev);
- fep->phy_id |= (mii_reg & 0xffff);
- printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
-
- for(i = 0; phy_info[i]; i++) {
- if(phy_info[i]->id == (fep->phy_id >> 4))
- break;
- }
-
- if (phy_info[i])
- printk(" -- %s\n", phy_info[i]->name);
- else
- printk(" -- unknown PHY!\n");
+ struct fec_enet_private *fep = netdev_priv(dev);
- fep->phy = phy_info[i];
- fep->phy_id_done = 1;
+ strcpy(info->driver, fep->pdev->dev.driver->name);
+ strcpy(info->version, "Revision: 1.0");
+ strcpy(info->bus_info, dev_name(&dev->dev));
}
-/* Scan all of the MII PHY addresses looking for someone to respond
- * with a valid ID. This usually happens quickly.
- */
-static void
-mii_discover_phy(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep;
- uint phytype;
-
- fep = netdev_priv(dev);
-
- if (fep->phy_addr < 32) {
- if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
-
- /* Got first part of ID, now get remainder */
- fep->phy_id = phytype << 16;
- mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2),
- mii_discover_phy3);
- } else {
- fep->phy_addr++;
- mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1),
- mii_discover_phy);
- }
- } else {
- printk("FEC: No PHY device found.\n");
- /* Disable external MII interface */
- writel(0, fep->hwp + FEC_MII_SPEED);
- fep->phy_speed = 0;
-#ifdef HAVE_mii_link_interrupt
- fec_disable_phy_intr(dev);
-#endif
- }
-}
+static struct ethtool_ops fec_enet_ethtool_ops = {
+ .get_settings = fec_enet_get_settings,
+ .set_settings = fec_enet_set_settings,
+ .get_drvinfo = fec_enet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
-/* This interrupt occurs when the PHY detects a link change */
-#ifdef HAVE_mii_link_interrupt
-static irqreturn_t
-mii_link_interrupt(int irq, void * dev_id)
+static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct net_device *dev = dev_id;
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
- mii_do_cmd(dev, fep->phy->ack_int);
- mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
+ if (!phydev)
+ return -ENODEV;
- return IRQ_HANDLED;
+ return phy_mii_ioctl(phydev, if_mii(rq), cmd);
}
-#endif
static void fec_enet_free_buffers(struct net_device *dev)
{
@@ -1509,35 +913,8 @@ fec_enet_open(struct net_device *dev)
if (ret)
return ret;
- fep->sequence_done = 0;
- fep->link = 0;
-
- fec_restart(dev, 1);
-
- if (fep->phy) {
- mii_do_cmd(dev, fep->phy->ack_int);
- mii_do_cmd(dev, fep->phy->config);
- mii_do_cmd(dev, phy_cmd_config); /* display configuration */
-
- /* Poll until the PHY tells us its configuration
- * (not link state).
- * Request is initiated by mii_do_cmd above, but answer
- * comes by interrupt.
- * This should take about 25 usec per register at 2.5 MHz,
- * and we read approximately 5 registers.
- */
- while(!fep->sequence_done)
- schedule();
-
- mii_do_cmd(dev, fep->phy->startup);
- }
-
- /* Set the initial link state to true. A lot of hardware
- * based on this device does not implement a PHY interrupt,
- * so we are never notified of link change.
- */
- fep->link = 1;
-
+ /* schedule a link state check */
+ phy_start(fep->phy_dev);
netif_start_queue(dev);
fep->opened = 1;
return 0;
@@ -1550,6 +927,7 @@ fec_enet_close(struct net_device *dev)
/* Don't know what to do yet. */
fep->opened = 0;
+ phy_stop(fep->phy_dev);
netif_stop_queue(dev);
fec_stop(dev);
@@ -1574,7 +952,7 @@ fec_enet_close(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
@@ -1604,16 +982,16 @@ static void set_multicast_list(struct net_device *dev)
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now */
- if (!(dmi->dmi_addr[0] & 1))
+ if (!(ha->addr[0] & 1))
continue;
/* calculate crc32 value of mac address */
crc = 0xffffffff;
- for (i = 0; i < dmi->dmi_addrlen; i++) {
- data = dmi->dmi_addr[i];
+ for (i = 0; i < dev->addr_len; i++) {
+ data = ha->addr[i];
for (bit = 0; bit < 8; bit++, data >>= 1) {
crc = (crc >> 1) ^
(((crc ^ data) & 1) ? CRC32_POLY : 0);
@@ -1666,6 +1044,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
+ .ndo_do_ioctl = fec_enet_ioctl,
};
/*
@@ -1689,7 +1068,6 @@ static int fec_enet_init(struct net_device *dev, int index)
}
spin_lock_init(&fep->hw_lock);
- spin_lock_init(&fep->mii_lock);
fep->index = index;
fep->hwp = (void __iomem *)dev->base_addr;
@@ -1716,20 +1094,10 @@ static int fec_enet_init(struct net_device *dev, int index)
fep->rx_bd_base = cbd_base;
fep->tx_bd_base = cbd_base + RX_RING_SIZE;
-#ifdef HAVE_mii_link_interrupt
- fec_request_mii_intr(dev);
-#endif
/* The FEC Ethernet specific entries in the device structure */
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &fec_netdev_ops;
-
- for (i=0; i<NMII-1; i++)
- mii_cmds[i].mii_next = &mii_cmds[i+1];
- mii_free = mii_cmds;
-
- /* Set MII speed to 2.5 MHz */
- fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
- / 2500000) / 2) & 0x3F) << 1;
+ dev->ethtool_ops = &fec_enet_ethtool_ops;
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
@@ -1760,13 +1128,6 @@ static int fec_enet_init(struct net_device *dev, int index)
fec_restart(dev, 0);
- /* Queue up command to detect the PHY and initialize the
- * remainder of the interface.
- */
- fep->phy_id_done = 0;
- fep->phy_addr = 0;
- mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
-
return 0;
}
@@ -1835,8 +1196,7 @@ fec_restart(struct net_device *dev, int duplex)
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
/* Enable interrupts we wish to service */
- writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
- fep->hwp + FEC_IMASK);
+ writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK);
}
static void
@@ -1859,7 +1219,6 @@ fec_stop(struct net_device *dev)
/* Clear outstanding MII command interrupts. */
writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
}
@@ -1891,6 +1250,7 @@ fec_probe(struct platform_device *pdev)
memset(fep, 0, sizeof(*fep));
ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
+ fep->pdev = pdev;
if (!ndev->base_addr) {
ret = -ENOMEM;
@@ -1926,13 +1286,24 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_init;
+ ret = fec_enet_mii_init(pdev);
+ if (ret)
+ goto failed_mii_init;
+
ret = register_netdev(ndev);
if (ret)
goto failed_register;
+ printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+ fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
+ fep->phy_dev->irq);
+
return 0;
failed_register:
+ fec_enet_mii_remove(fep);
+failed_mii_init:
failed_init:
clk_disable(fep->clk);
clk_put(fep->clk);
@@ -1959,6 +1330,7 @@ fec_drv_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
fec_stop(ndev);
+ fec_enet_mii_remove(fep);
clk_disable(fep->clk);
clk_put(fep->clk);
iounmap((void __iomem *)ndev->base_addr);
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 4a43e56..221f440 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -327,7 +327,6 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock_irqsave(&priv->lock, flags);
- dev->trans_start = jiffies;
bd = (struct bcom_fec_bd *)
bcom_prepare_next_buffer(priv->tx_dmatsk);
@@ -436,7 +435,6 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
DMA_FROM_DEVICE);
length = status & BCOM_FEC_RX_BD_LEN_MASK;
skb_put(rskb, length - 4); /* length without CRC32 */
- rskb->dev = dev;
rskb->protocol = eth_type_trans(rskb, dev);
netif_rx(rskb);
@@ -576,12 +574,12 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
out_be32(&fec->gaddr2, 0xffffffff);
} else {
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 gaddr1 = 0x00000000;
u32 gaddr2 = 0x00000000;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr) >> 26;
if (crc >= 32)
gaddr1 |= 1 << (crc-32);
else
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5c98f7c..268ea4d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1104,20 +1104,16 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
static void nv_napi_enable(struct net_device *dev)
{
-#ifdef CONFIG_FORCEDETH_NAPI
struct fe_priv *np = get_nvpriv(dev);
napi_enable(&np->napi);
-#endif
}
static void nv_napi_disable(struct net_device *dev)
{
-#ifdef CONFIG_FORCEDETH_NAPI
struct fe_priv *np = get_nvpriv(dev);
napi_disable(&np->napi);
-#endif
}
#define MII_READ (-1)
@@ -1810,7 +1806,6 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
}
/* If rx bufs are exhausted called after 50ms to attempt to refresh */
-#ifdef CONFIG_FORCEDETH_NAPI
static void nv_do_rx_refill(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
@@ -1819,41 +1814,6 @@ static void nv_do_rx_refill(unsigned long data)
/* Just reschedule NAPI rx processing */
napi_schedule(&np->napi);
}
-#else
-static void nv_do_rx_refill(unsigned long data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
- int retcode;
-
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- disable_irq(np->pci_dev->irq);
- } else {
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- }
- if (!nv_optimized(np))
- retcode = nv_alloc_rx(dev);
- else
- retcode = nv_alloc_rx_optimized(dev);
- if (retcode) {
- spin_lock_irq(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock_irq(&np->lock);
- }
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- enable_irq(np->pci_dev->irq);
- } else {
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- }
-}
-#endif
static void nv_init_rx(struct net_device *dev)
{
@@ -2148,7 +2108,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int i;
u32 offset = 0;
u32 bcnt;
- u32 size = skb->len-skb->data_len;
+ u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
struct ring_desc* put_tx;
@@ -2254,7 +2214,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
dprintk("\n");
}
- dev->trans_start = jiffies;
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2269,7 +2228,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
unsigned int i;
u32 offset = 0;
u32 bcnt;
- u32 size = skb->len-skb->data_len;
+ u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
struct ring_desc_ex* put_tx;
@@ -2409,7 +2368,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
dprintk("\n");
}
- dev->trans_start = jiffies;
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2816,11 +2774,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
dev->name, len, skb->protocol);
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
next_pkt:
@@ -2909,27 +2863,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
dev->name, len, skb->protocol);
if (likely(!np->vlangrp)) {
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
} else {
vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
-#ifdef CONFIG_FORCEDETH_NAPI
- vlan_hwaccel_receive_skb(skb, np->vlangrp,
- vlanflags & NV_RX3_VLAN_TAG_MASK);
-#else
- vlan_hwaccel_rx(skb, np->vlangrp,
- vlanflags & NV_RX3_VLAN_TAG_MASK);
-#endif
+ vlan_gro_receive(&np->napi, np->vlangrp,
+ vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
} else {
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
}
}
@@ -3104,12 +3045,14 @@ static void nv_set_multicast(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
} else {
- struct dev_mc_list *walk;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(walk, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned char *addr = ha->addr;
u32 a, b;
- a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
- b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
+
+ a = le32_to_cpu(*(__le32 *) addr);
+ b = le16_to_cpu(*(__le16 *) (&addr[4]));
alwaysOn[0] &= a;
alwaysOff[0] &= ~a;
alwaysOn[1] &= b;
@@ -3494,10 +3437,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
-#ifndef CONFIG_FORCEDETH_NAPI
- int total_work = 0;
- int loop_count = 0;
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
@@ -3514,7 +3453,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
nv_msi_workaround(np);
-#ifdef CONFIG_FORCEDETH_NAPI
if (napi_schedule_prep(&np->napi)) {
/*
* Disable further irq's (msix not enabled with napi)
@@ -3523,65 +3461,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
__napi_schedule(&np->napi);
}
-#else
- do
- {
- int work = 0;
- if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
- if (unlikely(nv_alloc_rx(dev))) {
- spin_lock(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
- }
- }
-
- spin_lock(&np->lock);
- work += nv_tx_done(dev, TX_WORK_PER_LOOP);
- spin_unlock(&np->lock);
-
- if (!work)
- break;
-
- total_work += work;
-
- loop_count++;
- }
- while (loop_count < max_interrupt_work);
-
- if (nv_change_interrupt_mode(dev, total_work)) {
- /* setup new irq mask */
- writel(np->irqmask, base + NvRegIrqMask);
- }
-
- if (unlikely(np->events & NVREG_IRQ_LINK)) {
- spin_lock(&np->lock);
- nv_link_irq(dev);
- spin_unlock(&np->lock);
- }
- if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
- spin_lock(&np->lock);
- nv_linkchange(dev);
- spin_unlock(&np->lock);
- np->link_timeout = jiffies + LINK_TIMEOUT;
- }
- if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
- spin_lock(&np->lock);
- /* disable interrupts on the nic */
- if (!(np->msi_flags & NV_MSI_X_ENABLED))
- writel(0, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
- pci_push(base);
-
- if (!np->in_shutdown) {
- np->nic_poll_irq = np->irqmask;
- np->recover_error = 1;
- mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
- }
- spin_unlock(&np->lock);
- }
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
return IRQ_HANDLED;
@@ -3597,10 +3476,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
-#ifndef CONFIG_FORCEDETH_NAPI
- int total_work = 0;
- int loop_count = 0;
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
@@ -3617,7 +3492,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
nv_msi_workaround(np);
-#ifdef CONFIG_FORCEDETH_NAPI
if (napi_schedule_prep(&np->napi)) {
/*
* Disable further irq's (msix not enabled with napi)
@@ -3625,66 +3499,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
writel(0, base + NvRegIrqMask);
__napi_schedule(&np->napi);
}
-#else
- do
- {
- int work = 0;
- if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
- if (unlikely(nv_alloc_rx_optimized(dev))) {
- spin_lock(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
- }
- }
-
- spin_lock(&np->lock);
- work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
- spin_unlock(&np->lock);
-
- if (!work)
- break;
-
- total_work += work;
-
- loop_count++;
- }
- while (loop_count < max_interrupt_work);
-
- if (nv_change_interrupt_mode(dev, total_work)) {
- /* setup new irq mask */
- writel(np->irqmask, base + NvRegIrqMask);
- }
-
- if (unlikely(np->events & NVREG_IRQ_LINK)) {
- spin_lock(&np->lock);
- nv_link_irq(dev);
- spin_unlock(&np->lock);
- }
- if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
- spin_lock(&np->lock);
- nv_linkchange(dev);
- spin_unlock(&np->lock);
- np->link_timeout = jiffies + LINK_TIMEOUT;
- }
- if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
- spin_lock(&np->lock);
- /* disable interrupts on the nic */
- if (!(np->msi_flags & NV_MSI_X_ENABLED))
- writel(0, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
- pci_push(base);
-
- if (!np->in_shutdown) {
- np->nic_poll_irq = np->irqmask;
- np->recover_error = 1;
- mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
- }
- spin_unlock(&np->lock);
- }
-
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
return IRQ_HANDLED;
@@ -3733,7 +3547,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
return IRQ_RETVAL(i);
}
-#ifdef CONFIG_FORCEDETH_NAPI
static int nv_napi_poll(struct napi_struct *napi, int budget)
{
struct fe_priv *np = container_of(napi, struct fe_priv, napi);
@@ -3741,23 +3554,27 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
u8 __iomem *base = get_hwbase(dev);
unsigned long flags;
int retcode;
- int tx_work, rx_work;
+ int rx_count, tx_work=0, rx_work=0;
- if (!nv_optimized(np)) {
- spin_lock_irqsave(&np->lock, flags);
- tx_work = nv_tx_done(dev, np->tx_ring_size);
- spin_unlock_irqrestore(&np->lock, flags);
+ do {
+ if (!nv_optimized(np)) {
+ spin_lock_irqsave(&np->lock, flags);
+ tx_work += nv_tx_done(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
- rx_work = nv_rx_process(dev, budget);
- retcode = nv_alloc_rx(dev);
- } else {
- spin_lock_irqsave(&np->lock, flags);
- tx_work = nv_tx_done_optimized(dev, np->tx_ring_size);
- spin_unlock_irqrestore(&np->lock, flags);
+ rx_count = nv_rx_process(dev, budget - rx_work);
+ retcode = nv_alloc_rx(dev);
+ } else {
+ spin_lock_irqsave(&np->lock, flags);
+ tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
- rx_work = nv_rx_process_optimized(dev, budget);
- retcode = nv_alloc_rx_optimized(dev);
- }
+ rx_count = nv_rx_process_optimized(dev,
+ budget - rx_work);
+ retcode = nv_alloc_rx_optimized(dev);
+ }
+ } while (retcode == 0 &&
+ rx_count > 0 && (rx_work += rx_count) < budget);
if (retcode) {
spin_lock_irqsave(&np->lock, flags);
@@ -3800,7 +3617,6 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
}
return rx_work;
}
-#endif
static irqreturn_t nv_nic_irq_rx(int foo, void *data)
{
@@ -5706,6 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
dev->features |= NETIF_F_TSO;
+ dev->features |= NETIF_F_GRO;
}
np->vlanctl_bits = 0;
@@ -5758,9 +5575,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
else
dev->netdev_ops = &nv_netdev_ops_optimized;
-#ifdef CONFIG_FORCEDETH_NAPI
netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
-#endif
SET_ETHTOOL_OPS(dev, &ops);
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
@@ -5863,7 +5678,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* msix has had reported issues when modifying irqmask
as in the case of napi, therefore, disable for now
*/
-#ifndef CONFIG_FORCEDETH_NAPI
+#if 0
np->msi_flags |= NV_MSI_X_CAPABLE;
#endif
}
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 0770e2f..0fb0fef 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -674,8 +674,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data, skb->len, DMA_TO_DEVICE));
CBDW_DATLEN(bdp, skb->len);
- dev->trans_start = jiffies;
-
/*
* If this was the last BD in the ring, start at the beginning again.
*/
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 0a973e7..714da96 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -231,12 +231,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index ec81f50..7eff92e 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -232,12 +232,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 34d3da7..7f0591e 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -223,12 +223,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 3acac5f..ff028f5 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -277,15 +277,17 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
int tbiaddr = -1;
const u32 *addrp;
u64 addr = 0, size = 0;
- int err = 0;
+ int err;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
new_bus = mdiobus_alloc();
- if (NULL == new_bus)
+ if (!new_bus) {
+ err = -ENOMEM;
goto err_free_priv;
+ }
new_bus->name = "Freescale PowerQUICC MII Bus",
new_bus->read = &fsl_pq_mdio_read,
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 5d3763f..c6791cd 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -82,6 +82,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in.h>
+#include <linux/net_tstamp.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev)
rctrl |= RCTRL_PADDING(priv->padding);
}
+ /* Insert receive time stamps into padding alignment bytes */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
+ priv->padding = 8;
+ }
+
/* keep vlan related bits if it's enabled */
if (priv->vlgrp) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv)
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
- return priv->vlgrp || priv->rx_csum_enable;
+ return priv->vlgrp || priv->rx_csum_enable ||
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
static void free_tx_pointers(struct gfar_private *priv)
@@ -738,7 +747,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL);
@@ -768,6 +778,48 @@ err_grp_init:
return err;
}
+static int gfar_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->hwts_rx_en = 0;
+ break;
+ default:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_rx_en = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
/* Ioctl MII Interface */
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
@@ -776,6 +828,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP)
+ return gfar_hwtstamp_ioctl(dev, rq, cmd);
+
if (!priv->phydev)
return -ENODEV;
@@ -978,7 +1033,8 @@ static int gfar_probe(struct of_device *ofdev,
else
priv->padding = 0;
- if (dev->features & NETIF_F_IP_CSUM)
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->hard_header_len += GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
@@ -1288,21 +1344,9 @@ static struct dev_pm_ops gfar_pm_ops = {
#define GFAR_PM_OPS (&gfar_pm_ops)
-static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
-{
- return gfar_suspend(&ofdev->dev);
-}
-
-static int gfar_legacy_resume(struct of_device *ofdev)
-{
- return gfar_resume(&ofdev->dev);
-}
-
#else
#define GFAR_PM_OPS NULL
-#define gfar_legacy_suspend NULL
-#define gfar_legacy_resume NULL
#endif
@@ -1683,7 +1727,7 @@ void gfar_start(struct net_device *dev)
gfar_write(&regs->imask, IMASK_DEFAULT);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
void gfar_configure_coalescing(struct gfar_private *priv,
@@ -1923,23 +1967,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
struct gfar __iomem *regs = NULL;
struct txfcb *fcb = NULL;
- struct txbd8 *txbdp, *txbdp_start, *base;
+ struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
u32 lstatus;
- int i, rq = 0;
+ int i, rq = 0, do_tstamp = 0;
u32 bufaddr;
unsigned long flags;
- unsigned int nr_frags, length;
-
+ unsigned int nr_frags, nr_txbds, length;
+ union skb_shared_tx *shtx;
rq = skb->queue_mapping;
tx_queue = priv->tx_queue[rq];
txq = netdev_get_tx_queue(dev, rq);
base = tx_queue->tx_bd_base;
regs = tx_queue->grp->regs;
+ shtx = skb_tx(skb);
+
+ /* check if time stamp should be generated */
+ if (unlikely(shtx->hardware && priv->hwts_tx_en))
+ do_tstamp = 1;
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- (priv->vlgrp && vlan_tx_tag_present(skb))) &&
+ (priv->vlgrp && vlan_tx_tag_present(skb)) ||
+ unlikely(do_tstamp)) &&
(skb_headroom(skb) < GMAC_FCB_LEN)) {
struct sk_buff *skb_new;
@@ -1956,8 +2006,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* total number of fragments in the SKB */
nr_frags = skb_shinfo(skb)->nr_frags;
+ /* calculate the required number of TxBDs for this skb */
+ if (unlikely(do_tstamp))
+ nr_txbds = nr_frags + 2;
+ else
+ nr_txbds = nr_frags + 1;
+
/* check if there is space to queue this packet */
- if ((nr_frags+1) > tx_queue->num_txbdfree) {
+ if (nr_txbds > tx_queue->num_txbdfree) {
/* no space, stop the queue */
netif_tx_stop_queue(txq);
dev->stats.tx_fifo_errors++;
@@ -1969,9 +2025,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txq->tx_packets ++;
txbdp = txbdp_start = tx_queue->cur_tx;
+ lstatus = txbdp->lstatus;
+
+ /* Time stamp insertion requires one additional TxBD */
+ if (unlikely(do_tstamp))
+ txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+ tx_queue->tx_ring_size);
if (nr_frags == 0) {
- lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (unlikely(do_tstamp))
+ txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
+ TXBD_INTERRUPT);
+ else
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
/* Place the fragment addresses and lengths into the TxBDs */
for (i = 0; i < nr_frags; i++) {
@@ -2017,11 +2083,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
gfar_tx_vlan(skb, fcb);
}
- /* setup the TxBD length and buffer pointer for the first BD */
+ /* Setup tx hardware time stamping if requested */
+ if (unlikely(do_tstamp)) {
+ shtx->in_progress = 1;
+ if (fcb == NULL)
+ fcb = gfar_add_fcb(skb);
+ fcb->ptp = 1;
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ /*
+ * If time stamping is requested one additional TxBD must be set up. The
+ * first TxBD points to the FCB and must have a data length of
+ * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
+ * the full frame length.
+ */
+ if (unlikely(do_tstamp)) {
+ txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
+ txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
+ (skb_headlen(skb) - GMAC_FCB_LEN);
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+ } else {
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ }
/*
* We can work in parallel with gfar_clean_tx_ring(), except
@@ -2061,9 +2148,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
/* reduce TxBD free count */
- tx_queue->num_txbdfree -= (nr_frags + 1);
-
- dev->trans_start = jiffies;
+ tx_queue->num_txbdfree -= (nr_txbds);
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
@@ -2251,16 +2336,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
struct net_device *dev = tx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
- struct txbd8 *bdp;
+ struct txbd8 *bdp, *next = NULL;
struct txbd8 *lbdp = NULL;
struct txbd8 *base = tx_queue->tx_bd_base;
struct sk_buff *skb;
int skb_dirtytx;
int tx_ring_size = tx_queue->tx_ring_size;
- int frags = 0;
+ int frags = 0, nr_txbds = 0;
int i;
int howmany = 0;
u32 lstatus;
+ size_t buflen;
+ union skb_shared_tx *shtx;
rx_queue = priv->rx_queue[tx_queue->qindex];
bdp = tx_queue->dirty_tx;
@@ -2270,7 +2357,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
unsigned long flags;
frags = skb_shinfo(skb)->nr_frags;
- lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
+
+ /*
+ * When time stamping, one additional TxBD must be freed.
+ * Also, we need to dma_unmap_single() the TxPAL.
+ */
+ shtx = skb_tx(skb);
+ if (unlikely(shtx->in_progress))
+ nr_txbds = frags + 2;
+ else
+ nr_txbds = frags + 1;
+
+ lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
lstatus = lbdp->lstatus;
@@ -2279,10 +2377,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
- dma_unmap_single(&priv->ofdev->dev,
- bdp->bufPtr,
- bdp->length,
- DMA_TO_DEVICE);
+ if (unlikely(shtx->in_progress)) {
+ next = next_txbd(bdp, base, tx_ring_size);
+ buflen = next->length + GMAC_FCB_LEN;
+ } else
+ buflen = bdp->length;
+
+ dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ buflen, DMA_TO_DEVICE);
+
+ if (unlikely(shtx->in_progress)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next;
+ }
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2314,7 +2426,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
howmany++;
spin_lock_irqsave(&tx_queue->txlock, flags);
- tx_queue->num_txbdfree += frags + 1;
+ tx_queue->num_txbdfree += nr_txbds;
spin_unlock_irqrestore(&tx_queue->txlock, flags);
}
@@ -2470,6 +2582,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
skb_pull(skb, amount_pull);
}
+ /* Get receive timestamp from the skb */
+ if (priv->hwts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ u64 *ns = (u64 *) skb->data;
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+ }
+
+ if (priv->padding)
+ skb_pull(skb, priv->padding);
+
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
@@ -2506,8 +2629,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = rx_queue->cur_rx;
base = rx_queue->rx_bd_base;
- amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
- priv->padding;
+ amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
@@ -2794,7 +2916,7 @@ static void adjust_link(struct net_device *dev)
* whenever dev->flags is changed */
static void gfar_set_multi(struct net_device *dev)
{
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
@@ -2867,17 +2989,14 @@ static void gfar_set_multi(struct net_device *dev)
return;
/* Parse the list, and set the appropriate bits */
- netdev_for_each_mc_addr(mc_ptr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (idx < em_num) {
- gfar_set_mac_for_addr(dev, idx,
- mc_ptr->dmi_addr);
+ gfar_set_mac_for_addr(dev, idx, ha->addr);
idx++;
} else
- gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
+ gfar_set_hash_for_addr(dev, ha->addr);
}
}
-
- return;
}
@@ -2918,8 +3037,6 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
tempval = gfar_read(priv->hash_regs[whichreg]);
tempval |= value;
gfar_write(priv->hash_regs[whichreg], tempval);
-
- return;
}
@@ -3055,8 +3172,6 @@ static struct of_platform_driver gfar_driver = {
.probe = gfar_probe,
.remove = gfar_remove,
- .suspend = gfar_legacy_suspend,
- .resume = gfar_legacy_resume,
.driver.pm = GFAR_PM_OPS,
};
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 17d25e7..ac4a92e 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -262,6 +262,7 @@ extern const char gfar_driver_version[];
#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
+#define RCTRL_TS_ENABLE 0x01000000
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_VLEX 0x00002000
#define RCTRL_FILREN 0x00001000
@@ -539,7 +540,7 @@ struct txbd8
struct txfcb {
u8 flags;
- u8 reserved;
+ u8 ptp; /* Flag to enable tx timestamping */
u8 l4os; /* Level 4 Header Offset */
u8 l3os; /* Level 3 Header Offset */
u16 phcs; /* Pseudo-header Checksum */
@@ -885,6 +886,7 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
+#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
#if (MAXGROUPS == 2)
#define DEFAULT_MAPPING 0xAA
@@ -1100,6 +1102,10 @@ struct gfar_private {
/* Network Statistics */
struct gfar_extra_stats extra_stats;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
};
extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 3a90430..fd491e4 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -895,7 +895,6 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
else
skb->ip_summed = CHECKSUM_NONE;
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
netif_receive_skb(skb);
@@ -990,7 +989,7 @@ static u32 greth_hash_get_index(__u8 *addr)
static void greth_set_hash_filter(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
struct greth_private *greth = netdev_priv(dev);
struct greth_regs *regs = (struct greth_regs *) greth->regs;
u32 mc_filter[2];
@@ -998,8 +997,8 @@ static void greth_set_hash_filter(struct net_device *dev)
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = greth_hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = greth_hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 5d6f13879..61f2b1c 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -859,7 +859,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
- return;
}
@@ -1225,8 +1224,6 @@ static void hamachi_init_ring(struct net_device *dev)
}
/* Mark the last entry of the ring */
hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
-
- return;
}
@@ -1857,12 +1854,12 @@ static void set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
- writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
+ netdev_for_each_mc_addr(ha, dev) {
+ writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8);
+ writel(0x20000 | (*(u16 *)&ha->addr[4]),
ioaddr + 0x104 + i*8);
i++;
}
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 0cab992..3e25f10 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -429,7 +429,7 @@ static int ser12_open(struct net_device *dev)
return -EINVAL;
}
if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) {
- printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy \n",
+ printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy\n",
dev->base_addr);
return -EACCES;
}
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index f3a96b8..9f64c86 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1629,7 +1629,6 @@ static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
skb->protocol = ax25_type_trans(skb, scc->dev);
netif_rx(skb);
- return;
}
/* ----> transmit frame <---- */
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index efdbcad..82bffc3 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -351,7 +351,6 @@ hpp_reset_8390(struct net_device *dev)
printk("%s: hp_reset_8390() did not complete.\n", dev->name);
if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
- return;
}
/* The programmed-I/O version of reading the 4 byte 8390 specific header.
@@ -422,7 +421,6 @@ hpp_io_block_output(struct net_device *dev, int count,
int ioaddr = dev->base_addr - NIC_OFFSET;
outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
- return;
}
static void
@@ -436,8 +434,6 @@ hpp_mem_block_output(struct net_device *dev, int count,
outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
memcpy_toio(ei_status.mem, buf, (count + 3) & ~3);
outw(option_reg, ioaddr + HPP_OPTION);
-
- return;
}
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 5c4d78c..86ececd 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -240,7 +240,6 @@ hp_reset_8390(struct net_device *dev)
printk("%s: hp_reset_8390() did not complete.\n", dev->name);
if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
- return;
}
static void
@@ -360,7 +359,6 @@ hp_block_output(struct net_device *dev, int count,
dev->name, (start_page << 8) + count, addr);
}
outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
- return;
}
/* This function resets the ethercard if something screws up. */
@@ -371,7 +369,6 @@ hp_init_card(struct net_device *dev)
NS8390p_init(dev, 0);
outb_p(irqmap[irq&0x0f] | HP_RUN,
dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
- return;
}
#ifdef MODULE
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 4daad8c..68e5ac8 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev)
return -EAGAIN;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_start_queue(dev);
lp->lan_type = hp100_sense_lan(dev);
@@ -1510,7 +1510,7 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
#endif
/* not waited long enough since last tx? */
- if (time_before(jiffies, dev->trans_start + HZ))
+ if (time_before(jiffies, dev_trans_start(dev) + HZ))
goto drop;
if (hp100_check_lan(dev))
@@ -1547,7 +1547,6 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
}
}
- dev->trans_start = jiffies;
goto drop;
}
@@ -1585,7 +1584,6 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
/* Update statistics */
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
@@ -1663,7 +1661,7 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
#endif
/* not waited long enough since last failed tx try? */
- if (time_before(jiffies, dev->trans_start + HZ)) {
+ if (time_before(jiffies, dev_trans_start(dev) + HZ)) {
#ifdef HP100_DEBUG
printk("hp100: %s: trans_start timing problem\n",
dev->name);
@@ -1701,7 +1699,6 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
mdelay(1);
}
}
- dev->trans_start = jiffies;
goto drop;
}
@@ -1745,7 +1742,6 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
hp100_ints_on();
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2099,15 +2095,15 @@ static void hp100_set_multicast_list(struct net_device *dev)
} else {
int i, idx;
u_char *addrs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
memset(&lp->hash_bytes, 0x00, 8);
#ifdef HP100_DEBUG
printk("hp100: %s: computing hash filter - mc_count = %i\n",
dev->name, netdev_mc_count(dev));
#endif
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 0x01) { /* multicast address? */
#ifdef HP100_DEBUG
printk("hp100: %s: multicast = %pM, ",
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index dd873cc..2484e9e 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -389,18 +389,19 @@ static void emac_hash_mc(struct emac_instance *dev)
const int regs = EMAC_XAHT_REGS(dev);
u32 *gaht_base = emac_gaht_base(dev);
u32 gaht_temp[regs];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
memset(gaht_temp, 0, sizeof (gaht_temp));
- netdev_for_each_mc_addr(dmi, dev->ndev) {
+ netdev_for_each_mc_addr(ha, dev->ndev) {
int slot, reg, mask;
- DBG2(dev, "mc %pM" NL, dmi->dmi_addr);
+ DBG2(dev, "mc %pM" NL, ha->addr);
- slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
+ slot = EMAC_XAHT_CRC_TO_SLOT(dev,
+ ether_crc(ETH_ALEN, ha->addr));
reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
@@ -1177,7 +1178,7 @@ static int emac_open(struct net_device *ndev)
netif_carrier_on(dev->ndev);
/* Required for Pause packet support in EMAC */
- dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
+ dev_mc_add_global(ndev, default_mcast_addr);
emac_configure(dev);
mal_poll_add(dev->mal, &dev->commac);
@@ -1700,7 +1701,6 @@ static int emac_poll_rx(void *param, int budget)
skb_put(skb, len);
push_packet:
- skb->dev = dev->ndev;
skb->protocol = eth_type_trans(skb, dev->ndev);
emac_rx_csum(dev, skb, ctrl);
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 7d6cf33..294ccfb 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -384,7 +384,7 @@ static void InitBoard(struct net_device *dev)
int camcnt;
camentry_t cams[16];
u32 cammask;
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u16 rcrval;
/* reset the SONIC */
@@ -419,8 +419,8 @@ static void InitBoard(struct net_device *dev)
/* start putting the multicast addresses into the CAM list. Stop if
it is full. */
- netdev_for_each_mc_addr(mcptr, dev) {
- putcam(cams, &camcnt, mcptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ putcam(cams, &camcnt, ha->addr);
if (camcnt == 16)
break;
}
@@ -478,7 +478,7 @@ static void InitBoard(struct net_device *dev)
/* if still multicast addresses left or ALLMULTI is set, set the multicast
enable bit */
- if ((dev->flags & IFF_ALLMULTI) || (mcptr != NULL))
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
rcrval |= RCREG_AMC;
/* promiscous mode ? */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index cd508a8..7acb3ed 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -45,6 +45,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/pm.h>
#include <linux/ethtool.h>
#include <linux/proc_fs.h>
#include <linux/in.h>
@@ -199,7 +200,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
return -1;
}
- pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
+ pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
if(!pool->skbuff) {
kfree(pool->dma_addr);
@@ -210,7 +211,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
return -1;
}
- memset(pool->skbuff, 0, sizeof(void*) * pool->size);
memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
for(i = 0; i < pool->size; ++i) {
@@ -957,7 +957,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
} else {
tx_packets++;
tx_bytes += skb->len;
- netdev->trans_start = jiffies;
+ netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
}
if (!used_bounce)
@@ -1073,7 +1073,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
}
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* clear the filter table & disable filtering */
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
@@ -1084,10 +1084,10 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
- netdev_for_each_mc_addr(mclist, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
// add the multicast address to the filter table
unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
+ memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1421,7 +1421,6 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
if (!entry)
ibmveth_error_printk("Cannot create adapter proc entry");
}
- return;
}
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
@@ -1589,6 +1588,12 @@ static struct kobj_type ktype_veth_pool = {
.default_attrs = veth_pool_attrs,
};
+static int ibmveth_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ ibmveth_interrupt(netdev->irq, netdev);
+ return 0;
+}
static struct vio_device_id ibmveth_device_table[] __devinitdata= {
{ "network", "IBM,l-lan"},
@@ -1596,6 +1601,10 @@ static struct vio_device_id ibmveth_device_table[] __devinitdata= {
};
MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
+static struct dev_pm_ops ibmveth_pm_ops = {
+ .resume = ibmveth_resume
+};
+
static struct vio_driver ibmveth_driver = {
.id_table = ibmveth_device_table,
.probe = ibmveth_probe,
@@ -1604,6 +1613,7 @@ static struct vio_driver ibmveth_driver = {
.driver = {
.name = ibmveth_driver_name,
.owner = THIS_MODULE,
+ .pm = &ibmveth_pm_ops,
}
};
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index f4081c0..ab9f675 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -182,7 +182,6 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
skb_queue_tail(&dp->rq, skb);
if (!dp->tasklet_pending) {
dp->tasklet_pending = 1;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 4a32bed..86438b5 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -104,6 +104,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_82580_COPPER_DUAL:
mac->type = e1000_82580;
break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ mac->type = e1000_i350;
+ break;
default:
return -E1000_ERR_MAC_INIT;
break;
@@ -153,8 +159,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
if (mac->type == e1000_82580)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ if (mac->type == e1000_i350)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
/* reset */
- if (mac->type == e1000_82580)
+ if (mac->type >= e1000_82580)
mac->ops.reset_hw = igb_reset_hw_82580;
else
mac->ops.reset_hw = igb_reset_hw_82575;
@@ -225,7 +233,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
- } else if (hw->mac.type == e1000_82580) {
+ } else if (hw->mac.type >= e1000_82580) {
phy->ops.reset = igb_phy_hw_reset;
phy->ops.read_reg = igb_read_phy_reg_82580;
phy->ops.write_reg = igb_write_phy_reg_82580;
@@ -261,6 +269,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
break;
case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
phy->type = e1000_phy_82580;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
phy->ops.get_cable_length = igb_get_cable_length_82580;
@@ -1205,8 +1214,6 @@ void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
/* If the management interface is not enabled, then power down */
if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
igb_power_down_phy_copper(hw);
-
- return;
}
/**
@@ -1445,7 +1452,6 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
**/
static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
{
- u32 mdicnfg = 0;
s32 ret_val;
@@ -1453,15 +1459,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
if (ret_val)
goto out;
- /*
- * We config the phy address in MDICNFG register now. Same bits
- * as before. The values in MDIC can be written but will be
- * ignored. This allows us to call the old function after
- * configuring the PHY address in the new register
- */
- mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
- wr32(E1000_MDICNFG, mdicnfg);
-
ret_val = igb_read_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
@@ -1480,7 +1477,6 @@ out:
**/
static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
{
- u32 mdicnfg = 0;
s32 ret_val;
@@ -1488,15 +1484,6 @@ static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
if (ret_val)
goto out;
- /*
- * We config the phy address in MDICNFG register now. Same bits
- * as before. The values in MDIC can be written but will be
- * ignored. This allows us to call the old function after
- * configuring the PHY address in the new register
- */
- mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
- wr32(E1000_MDICNFG, mdicnfg);
-
ret_val = igb_write_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index fbe1c99..cbd1e12 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -38,9 +38,10 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_OFF1_ON2))
-#define E1000_RAR_ENTRIES_82575 16
-#define E1000_RAR_ENTRIES_82576 24
-#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_82575 16
+#define E1000_RAR_ENTRIES_82576 24
+#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_I350 32
#define E1000_SW_SYNCH_MB 0x00000100
#define E1000_STAT_DEV_RST_SET 0x00100000
@@ -52,6 +53,7 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
#define E1000_SRRCTL_DROP_EN 0x80000000
+#define E1000_SRRCTL_TIMESTAMP 0x40000000
#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
#define E1000_MRQC_ENABLE_VMDQ 0x00000003
@@ -108,6 +110,7 @@ union e1000_adv_rx_desc {
#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index fe6cf1b..24d9be6 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -610,11 +610,7 @@
#define IGP_LED3_MODE 0x07000000
/* PCI/PCI-X/PCI-EX Config space */
-#define PCIE_LINK_STATUS 0x12
#define PCIE_DEVICE_CONTROL2 0x28
-
-#define PCIE_LINK_WIDTH_MASK 0x3F0
-#define PCIE_LINK_WIDTH_SHIFT 4
#define PCIE_DEVICE_CONTROL2_16ms 0x0005
#define PHY_REVISION_MASK 0xFFFFFFF0
@@ -629,6 +625,7 @@
#define M88E1111_I_PHY_ID 0x01410CC0
#define IGP03E1000_E_PHY_ID 0x02A80390
#define I82580_I_PHY_ID 0x015403A0
+#define I350_I_PHY_ID 0x015403B0
#define M88_VENDOR 0x0141
/* M88E1000 Specific Registers */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 82a533f..cb8db78 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/netdevice.h>
#include "e1000_regs.h"
#include "e1000_defines.h"
@@ -53,6 +54,10 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_I350_COPPER 0x1521
+#define E1000_DEV_ID_I350_FIBER 0x1522
+#define E1000_DEV_ID_I350_SERDES 0x1523
+#define E1000_DEV_ID_I350_SGMII 0x1524
#define E1000_REVISION_2 2
#define E1000_REVISION_4 4
@@ -72,6 +77,7 @@ enum e1000_mac_type {
e1000_82575,
e1000_82576,
e1000_82580,
+ e1000_i350,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
};
@@ -502,14 +508,11 @@ struct e1000_hw {
u8 revision_id;
};
-#ifdef DEBUG
-extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
+extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
#define hw_dbg(format, arg...) \
- printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg)
-#else
-#define hw_dbg(format, arg...)
-#endif
-#endif
+ netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
+
/* These functions must be implemented by drivers */
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index be8d010..90c5e01 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -53,17 +53,30 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
u16 pcie_link_status;
bus->type = e1000_bus_type_pci_express;
- bus->speed = e1000_bus_speed_2500;
ret_val = igb_read_pcie_cap_reg(hw,
- PCIE_LINK_STATUS,
- &pcie_link_status);
- if (ret_val)
+ PCI_EXP_LNKSTA,
+ &pcie_link_status);
+ if (ret_val) {
bus->width = e1000_bus_width_unknown;
- else
+ bus->speed = e1000_bus_speed_unknown;
+ } else {
+ switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ bus->speed = e1000_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ bus->speed = e1000_bus_speed_5000;
+ break;
+ default:
+ bus->speed = e1000_bus_speed_unknown;
+ break;
+ }
+
bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCIE_LINK_WIDTH_MASK) >>
- PCIE_LINK_WIDTH_SHIFT);
+ PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT);
+ }
reg = rd32(E1000_STATUS);
bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 3b772b8..6e63d9a 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -107,6 +107,7 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
+#define IGB_RXBUFFER_64 64 /* Used for packet split */
#define IGB_RXBUFFER_128 128 /* Used for packet split */
#define IGB_RXBUFFER_1024 1024
#define IGB_RXBUFFER_2048 2048
@@ -140,8 +141,10 @@ struct igb_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
- u16 mapped_as_page;
+ unsigned int bytecount;
u16 gso_segs;
+ union skb_shared_tx shtx;
+ u8 mapped_as_page;
};
/* RX */
struct {
@@ -185,7 +188,7 @@ struct igb_q_vector {
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
- struct pci_dev *pdev; /* pci device for dma mapping */
+ struct device *dev; /* device pointer for dma mapping */
dma_addr_t dma; /* phys address of the ring */
void *desc; /* descriptor ring memory */
unsigned int size; /* length of desc. ring in bytes */
@@ -323,6 +326,7 @@ struct igb_adapter {
#define IGB_82576_TSYNC_SHIFT 19
#define IGB_82580_TSYNC_SHIFT 24
+#define IGB_TS_HDR_LEN 16
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
@@ -336,7 +340,6 @@ enum igb_boards {
extern char igb_driver_name[];
extern char igb_driver_version[];
-extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 7430384..f2ebf927 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -902,6 +902,49 @@ struct igb_reg_test {
#define TABLE64_TEST_LO 5
#define TABLE64_TEST_HI 6
+/* i350 reg test */
+static struct igb_reg_test reg_test_i350[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i350, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
/* 82580 reg test */
static struct igb_reg_test reg_test_82580[] = {
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1077,6 +1120,10 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
u32 i, toggle;
switch (adapter->hw.mac.type) {
+ case e1000_i350:
+ test = reg_test_i350;
+ toggle = 0x7FEFF3FF;
+ break;
case e1000_82580:
test = reg_test_82580;
toggle = 0x7FEFF3FF;
@@ -1238,6 +1285,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
case e1000_82580:
ics_mask = 0x77DCFED5;
break;
+ case e1000_i350:
+ ics_mask = 0x77DCFED5;
+ break;
default:
ics_mask = 0x7FFFFFFF;
break;
@@ -1344,7 +1394,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IGB_DEFAULT_TXD;
- tx_ring->pdev = adapter->pdev;
+ tx_ring->dev = &adapter->pdev->dev;
tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1358,7 +1408,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
rx_ring->count = IGB_DEFAULT_RXD;
- rx_ring->pdev = adapter->pdev;
+ rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev;
rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
rx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1554,10 +1604,10 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
buffer_info = &rx_ring->buffer_info[rx_ntc];
/* unmap rx buffer, will be remapped by alloc_rx_buffers */
- pci_unmap_single(rx_ring->pdev,
+ dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* verify contents of skb */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c9baa2a..3881918 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -62,6 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
};
static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -197,6 +201,336 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+struct igb_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct igb_reg_info igb_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* RX Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN(0), "RDLEN"},
+ {E1000_RDH(0), "RDH"},
+ {E1000_RDT(0), "RDT"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_RDBAL(0), "RDBAL"},
+ {E1000_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL(0), "TDBAL"},
+ {E1000_TDBAH(0), "TDBAH"},
+ {E1000_TDLEN(0), "TDLEN"},
+ {E1000_TDH(0), "TDH"},
+ {E1000_TDT(0), "TDT"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * igb_regdump - register printout routine
+ */
+static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDLEN(n));
+ break;
+ case E1000_RDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDH(n));
+ break;
+ case E1000_RDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDT(n));
+ break;
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RXDCTL(n));
+ break;
+ case E1000_RDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAL(n));
+ break;
+ case E1000_RDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAH(n));
+ break;
+ case E1000_TDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAL(n));
+ break;
+ case E1000_TDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDBAH(n));
+ break;
+ case E1000_TDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDLEN(n));
+ break;
+ case E1000_TDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDH(n));
+ break;
+ case E1000_TDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDT(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TXDCTL(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, rd32(reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 4; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * igb_dump - Print registers, tx-rings and rx-rings
+ */
+static void igb_dump(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_reg_info *reginfo;
+ int n = 0;
+ struct igb_ring *tx_ring;
+ union e1000_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct igb_buffer *buffer_info;
+ struct igb_ring *rx_ring;
+ union e1000_adv_rx_desc *rx_desc;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ igb_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 15 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "T [desc] [address 63:0 ] "
+ "[PlPOCIStDDM Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ " %04X %3X %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO " %5d %5X %5X\n", n,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(buffer_info->dma),
+ rx_ring->rx_buffer_len, true);
+ if (rx_ring->rx_buffer_len
+ < IGB_RXBUFFER_1024)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(
+ buffer_info->page_dma +
+ buffer_info->page_offset),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
+
/**
* igb_read_clock - read raw cycle counter (to be used by time counter)
*/
@@ -223,41 +557,15 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
return stamp;
}
-#ifdef DEBUG
/**
- * igb_get_hw_dev_name - return device name string
+ * igb_get_hw_dev - return device
* used by hardware layer to print debugging information
**/
-char *igb_get_hw_dev_name(struct e1000_hw *hw)
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
{
struct igb_adapter *adapter = hw->back;
- return adapter->netdev->name;
-}
-
-/**
- * igb_get_time_str - format current NIC and system time as string
- */
-static char *igb_get_time_str(struct igb_adapter *adapter,
- char buffer[160])
-{
- cycle_t hw = adapter->cycles.read(&adapter->cycles);
- struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
- struct timespec sys;
- struct timespec delta;
- getnstimeofday(&sys);
-
- delta = timespec_sub(nic, sys);
-
- sprintf(buffer,
- "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
- hw,
- (long)nic.tv_sec, nic.tv_nsec,
- (long)sys.tv_sec, sys.tv_nsec,
- (long)delta.tv_sec, delta.tv_nsec);
-
- return buffer;
+ return adapter->netdev;
}
-#endif
/**
* igb_init_module - Driver Registration Routine
@@ -328,6 +636,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
case e1000_82575:
case e1000_82580:
+ case e1000_i350:
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -371,7 +680,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
- ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
@@ -385,7 +694,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
- ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
@@ -471,6 +780,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
q_vector->eims_value = 1 << msix_vector;
break;
case e1000_82580:
+ case e1000_i350:
/* 82580 uses the same table-based approach as 82576 but has fewer
entries as a result we carry over for queues greater than 4. */
if (rx_queue > IGB_N0_QUEUE) {
@@ -551,6 +861,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
case e1000_82576:
case e1000_82580:
+ case e1000_i350:
/* Turn on MSI-X capability first, or our settings
* won't stick. And it will take days to debug. */
wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@ -743,7 +1054,6 @@ msi_only:
out:
/* Notify the stack of the (possibly) reduced Tx Queue count. */
adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
- return;
}
/**
@@ -1253,6 +1563,7 @@ void igb_reset(struct igb_adapter *adapter)
* To take effect CTRL.RST is required.
*/
switch (mac->type) {
+ case e1000_i350:
case e1000_82580:
pba = rd32(E1000_RXPBS);
pba = igb_rxpbs_adjust_82580(pba);
@@ -1416,15 +1727,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -1656,6 +1967,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
netdev->name,
((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
"unknown"),
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
(hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
@@ -1826,6 +2138,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
switch (hw->mac.type) {
+ case e1000_i350:
case e1000_82580:
memset(&adapter->cycles, 0, sizeof(adapter->cycles));
adapter->cycles.read = igb_read_clock;
@@ -2096,7 +2409,7 @@ static int igb_close(struct net_device *netdev)
**/
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
int size;
size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2109,9 +2422,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev,
- tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(dev,
+ tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2122,7 +2436,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
err:
vfree(tx_ring->buffer_info);
- dev_err(&pdev->dev,
+ dev_err(dev,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
@@ -2246,7 +2560,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
- struct pci_dev *pdev = rx_ring->pdev;
+ struct device *dev = rx_ring->dev;
int size, desc_len;
size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2261,8 +2575,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(dev,
+ rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -2275,8 +2591,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
err:
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- dev_err(&pdev->dev, "Unable to allocate memory for "
- "the receive descriptor ring\n");
+ dev_err(dev, "Unable to allocate memory for the receive descriptor"
+ " ring\n");
return -ENOMEM;
}
@@ -2339,6 +2655,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (adapter->vfs_allocated_count) {
/* 82575 and 82576 supports 2 RSS queues for VMDq */
switch (hw->mac.type) {
+ case e1000_i350:
case e1000_82580:
num_rx_queues = 1;
shift = 0;
@@ -2590,6 +2907,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
+ if (hw->mac.type == e1000_82580)
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -2649,8 +2968,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
if (!tx_ring->desc)
return;
- pci_free_consistent(tx_ring->pdev, tx_ring->size,
- tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -2674,15 +2993,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(tx_ring->pdev,
+ dma_unmap_page(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(tx_ring->pdev,
+ dma_unmap_single(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -2753,8 +3072,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->desc)
return;
- pci_free_consistent(rx_ring->pdev, rx_ring->size,
- rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -2790,10 +3109,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_single(rx_ring->pdev,
+ dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -2802,10 +3121,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
buffer_info->skb = NULL;
}
if (buffer_info->page_dma) {
- pci_unmap_page(rx_ring->pdev,
+ dma_unmap_page(rx_ring->dev,
buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
}
if (buffer_info->page) {
@@ -2876,7 +3195,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list;
int i;
@@ -2893,8 +3212,8 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
/* The shared function expects a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
igb_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -3397,8 +3716,6 @@ set_itr_now:
q_vector->itr_val = new_itr;
q_vector->set_itr = 1;
}
-
- return;
}
#define IGB_TX_FLAGS_CSUM 0x00000001
@@ -3493,7 +3810,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
struct e1000_adv_tx_context_desc *context_desc;
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
struct igb_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
unsigned int i;
@@ -3544,7 +3861,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
break;
default:
if (unlikely(net_ratelimit()))
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"partial checksum but proto=%x!\n",
skb->protocol);
break;
@@ -3578,59 +3895,61 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
unsigned int first)
{
struct igb_buffer *buffer_info;
- struct pci_dev *pdev = tx_ring->pdev;
- unsigned int len = skb_headlen(skb);
+ struct device *dev = tx_ring->dev;
+ unsigned int hlen = skb_headlen(skb);
unsigned int count = 0, i;
unsigned int f;
+ u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
- BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
- buffer_info->length = len;
+ BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
+ buffer_info->length = hlen;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(dev, skb->data, hlen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
+ unsigned int len = frag->size;
count++;
i++;
if (i == tx_ring->count)
i = 0;
- frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
-
buffer_info = &tx_ring->buffer_info[i];
BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(dev,
frag->page,
frag->page_offset,
len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
+ tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
+ /* multiply data chunks by size of headers */
+ tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
+ tx_ring->buffer_info[i].gso_segs = gso_segs;
tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed buffer_info mapping */
buffer_info->dma = 0;
@@ -3868,6 +4187,8 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ igb_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
}
@@ -3920,6 +4241,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
* i.e. RXBUFFER_2048 --> size-4096 slab
*/
+ if (adapter->hw.mac.type == e1000_82580)
+ max_frame += IGB_TS_HDR_LEN;
+
if (max_frame <= IGB_RXBUFFER_1024)
rx_buffer_len = IGB_RXBUFFER_1024;
else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
@@ -3927,6 +4251,14 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
else
rx_buffer_len = IGB_RXBUFFER_128;
+ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
+ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
+ rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
+
+ if ((adapter->hw.mac.type == e1000_82580) &&
+ (rx_buffer_len == IGB_RXBUFFER_128))
+ rx_buffer_len += IGB_RXBUFFER_64;
+
if (netif_running(netdev))
igb_down(adapter);
@@ -4955,22 +5287,21 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
/**
* igb_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: pointer to q_vector containing needed info
- * @skb: packet that was just sent
+ * @buffer: pointer to igb_buffer structure
*
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
*/
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
{
struct igb_adapter *adapter = q_vector->adapter;
- union skb_shared_tx *shtx = skb_tx(skb);
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
/* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!shtx->hardware) ||
+ if (likely(!buffer_info->shtx.hardware) ||
!(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
return;
@@ -4978,7 +5309,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
regval |= (u64)rd32(E1000_TXSTMPH) << 32;
igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
- skb_tstamp_tx(skb, &shhwtstamps);
+ skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
}
/**
@@ -4993,7 +5324,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
struct net_device *netdev = tx_ring->netdev;
struct e1000_hw *hw = &adapter->hw;
struct igb_buffer *buffer_info;
- struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int i, eop, count = 0;
@@ -5009,19 +5339,12 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
- skb = buffer_info->skb;
- if (skb) {
- unsigned int segs, bytecount;
+ if (buffer_info->skb) {
+ total_bytes += buffer_info->bytecount;
/* gso_segs is currently only valid for tcp */
- segs = buffer_info->gso_segs;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_packets += segs;
- total_bytes += bytecount;
-
- igb_tx_hwtstamp(q_vector, skb);
+ total_packets += buffer_info->gso_segs;
+ igb_tx_hwtstamp(q_vector, buffer_info);
}
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
@@ -5061,7 +5384,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- dev_err(&tx_ring->pdev->dev,
+ dev_err(tx_ring->dev,
"Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH <%x>\n"
@@ -5140,10 +5463,10 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
+ dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
}
-static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
+static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
@@ -5161,13 +5484,18 @@ static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
* If nothing went wrong, then it should have a skb_shared_tx that we
* can turn into a skb_shared_hwtstamps.
*/
- if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
- return;
- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
+ if (staterr & E1000_RXDADV_STAT_TSIP) {
+ u32 *stamp = (u32 *)skb->data;
+ regval = le32_to_cpu(*(stamp + 2));
+ regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
+ skb_pull(skb, IGB_TS_HDR_LEN);
+ } else {
+ if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ }
igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
@@ -5190,7 +5518,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
{
struct igb_ring *rx_ring = q_vector->rx_ring;
struct net_device *netdev = rx_ring->netdev;
- struct pci_dev *pdev = rx_ring->pdev;
+ struct device *dev = rx_ring->dev;
union e1000_adv_rx_desc *rx_desc , *next_rxd;
struct igb_buffer *buffer_info , *next_buffer;
struct sk_buff *skb;
@@ -5230,9 +5558,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
cleaned_count++;
if (buffer_info->dma) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(dev, buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
skb_put(skb, length);
@@ -5242,11 +5570,11 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
}
if (length) {
- pci_unmap_page(pdev, buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(dev, buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
buffer_info->page,
buffer_info->page_offset,
length);
@@ -5275,7 +5603,8 @@ send_up:
goto next_desc;
}
- igb_rx_hwtstamp(q_vector, staterr, skb);
+ if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
+ igb_rx_hwtstamp(q_vector, staterr, skb);
total_bytes += skb->len;
total_packets++;
@@ -5350,12 +5679,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(rx_ring->pdev, buffer_info->page,
+ dma_map_page(rx_ring->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->pdev,
- buffer_info->page_dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ buffer_info->page_dma)) {
buffer_info->page_dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
@@ -5373,12 +5702,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->skb = skb;
}
if (!buffer_info->dma) {
- buffer_info->dma = pci_map_single(rx_ring->pdev,
+ buffer_info->dma = dma_map_single(rx_ring->dev,
skb->data,
bufsz,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->pdev,
- buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ buffer_info->dma)) {
buffer_info->dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
@@ -5555,6 +5884,16 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
return 0;
}
+ /*
+ * Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as
+ * long as one rx filter was configured.
+ */
+ if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
+ tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ }
+
/* enable/disable TX */
regval = rd32(E1000_TSYNCTXCTL);
regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -6131,19 +6470,25 @@ static void igb_vmm_control(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 reg;
- /* replication is not supported for 82575 */
- if (hw->mac.type == e1000_82575)
+ switch (hw->mac.type) {
+ case e1000_82575:
+ default:
+ /* replication is not supported for 82575 */
return;
-
- /* enable replication vlan tag stripping */
- reg = rd32(E1000_RPLOLR);
- reg |= E1000_RPLOLR_STRVLAN;
- wr32(E1000_RPLOLR, reg);
-
- /* notify HW that the MAC is adding vlan tags */
- reg = rd32(E1000_DTXCTL);
- reg |= E1000_DTXCTL_VLAN_ADDED;
- wr32(E1000_DTXCTL, reg);
+ case e1000_82576:
+ /* notify HW that the MAC is adding vlan tags */
+ reg = rd32(E1000_DTXCTL);
+ reg |= E1000_DTXCTL_VLAN_ADDED;
+ wr32(E1000_DTXCTL, reg);
+ case e1000_82580:
+ /* enable replication vlan tag stripping */
+ reg = rd32(E1000_RPLOLR);
+ reg |= E1000_RPLOLR_STRVLAN;
+ wr32(E1000_RPLOLR, reg);
+ case e1000_i350:
+ /* none of the above registers are supported by i350 */
+ break;
+ }
if (adapter->vfs_allocated_count) {
igb_vmdq_set_loopback_pf(hw, true);
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 8afff07..103b3aa 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -390,8 +390,6 @@ static void igbvf_get_wol(struct net_device *netdev,
{
wol->supported = 0;
wol->wolopts = 0;
-
- return;
}
static int igbvf_set_wol(struct net_device *netdev,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index f16e981..5e2b2a8 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -165,10 +165,10 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(pdev, buffer_info->page,
+ dma_map_page(&pdev->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!buffer_info->skb) {
@@ -179,9 +179,9 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bufsz,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -269,28 +269,28 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
prefetch(skb->data - NET_IP_ALIGN);
buffer_info->skb = NULL;
if (!adapter->rx_ps_hdr_size) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
skb_put(skb, length);
goto send_up;
}
if (!skb_shinfo(skb)->nr_frags) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_put(skb, hlen);
}
if (length) {
- pci_unmap_page(pdev, buffer_info->page_dma,
+ dma_unmap_page(&pdev->dev, buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
buffer_info->page,
buffer_info->page_offset,
length);
@@ -370,15 +370,15 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -439,8 +439,8 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -481,8 +481,8 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -550,7 +550,8 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -575,13 +576,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->rx_ps_hdr_size){
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
buffer_info->dma = 0;
}
@@ -593,9 +594,10 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
if (buffer_info->page) {
if (buffer_info->page_dma)
- pci_unmap_page(pdev, buffer_info->page_dma,
+ dma_unmap_page(&pdev->dev,
+ buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
put_page(buffer_info->page);
buffer_info->page = NULL;
buffer_info->page_dma = 0;
@@ -1399,7 +1401,7 @@ static void igbvf_set_multi(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list = NULL;
int i;
@@ -1414,8 +1416,8 @@ static void igbvf_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
kfree(mta_list);
@@ -2105,9 +2107,9 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -2128,12 +2130,12 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
frag->page,
frag->page_offset,
len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
}
@@ -2645,16 +2647,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8f6197d..e3b5e94 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1503,7 +1503,6 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
BARRIER();
- dev->trans_start = jiffies;
ip->tx_skbs[produce] = skb; /* Remember skb */
produce = (produce + 1) & 127;
ip->tx_pi = produce;
@@ -1665,7 +1664,7 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void ioc3_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
u64 ehar = 0;
@@ -1689,8 +1688,8 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
- netdev_for_each_mc_addr(dmi, dev) {
- char *addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addr = ha->addr;
if (!(*addr & 1))
continue;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 639bf9f..72e3d2d 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -570,7 +570,7 @@ static int ipg_config_autoneg(struct net_device *dev)
static void ipg_nic_set_multicast_list(struct net_device *dev)
{
void __iomem *ioaddr = ipg_ioaddr(dev);
- struct dev_mc_list *mc_list_ptr;
+ struct netdev_hw_addr *ha;
unsigned int hashindex;
u32 hashtable[2];
u8 receivemode;
@@ -609,9 +609,9 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
hashtable[1] = 0x00000000;
/* Cycle through all multicast addresses to filter. */
- netdev_for_each_mc_addr(mc_list_ptr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Calculate CRC result for each multicast address. */
- hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr,
+ hashindex = crc32_le(0xffffffff, ha->addr,
ETH_ALEN);
/* Use only the least significant 6 bits. */
@@ -1548,8 +1548,6 @@ static void ipg_reset_after_host_error(struct work_struct *work)
container_of(work, struct ipg_nic_private, task.work);
struct net_device *dev = sp->dev;
- IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL));
-
/*
* Acknowledge HostError interrupt by resetting
* IPG DMA and HOST.
@@ -1826,9 +1824,6 @@ static int ipg_nic_stop(struct net_device *dev)
netif_stop_queue(dev);
- IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount);
- IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount);
- IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount);
IPG_DUMPTFDLIST(dev);
do {
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index dfc2541..6ce0273 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -29,7 +29,7 @@
/* GMII based PHY IDs */
#define NS 0x2000
#define MARVELL 0x0141
-#define ICPLUS_PHY 0x243
+#define ICPLUS_PHY 0x243
/* NIC Physical Layer Device MII register fields. */
#define MII_PHY_SELECTOR_IEEE8023 0x0001
@@ -96,31 +96,31 @@ enum ipg_regs {
};
/* Ethernet MIB statistic register offsets. */
-#define IPG_OCTETRCVOK 0xA8
+#define IPG_OCTETRCVOK 0xA8
#define IPG_MCSTOCTETRCVDOK 0xAC
#define IPG_BCSTOCTETRCVOK 0xB0
#define IPG_FRAMESRCVDOK 0xB4
#define IPG_MCSTFRAMESRCVDOK 0xB8
#define IPG_BCSTFRAMESRCVDOK 0xBE
#define IPG_MACCONTROLFRAMESRCVD 0xC6
-#define IPG_FRAMETOOLONGERRRORS 0xC8
-#define IPG_INRANGELENGTHERRORS 0xCA
-#define IPG_FRAMECHECKSEQERRORS 0xCC
-#define IPG_FRAMESLOSTRXERRORS 0xCE
-#define IPG_OCTETXMTOK 0xD0
+#define IPG_FRAMETOOLONGERRRORS 0xC8
+#define IPG_INRANGELENGTHERRORS 0xCA
+#define IPG_FRAMECHECKSEQERRORS 0xCC
+#define IPG_FRAMESLOSTRXERRORS 0xCE
+#define IPG_OCTETXMTOK 0xD0
#define IPG_MCSTOCTETXMTOK 0xD4
#define IPG_BCSTOCTETXMTOK 0xD8
#define IPG_FRAMESXMTDOK 0xDC
#define IPG_MCSTFRAMESXMTDOK 0xE0
-#define IPG_FRAMESWDEFERREDXMT 0xE4
+#define IPG_FRAMESWDEFERREDXMT 0xE4
#define IPG_LATECOLLISIONS 0xE8
#define IPG_MULTICOLFRAMES 0xEC
#define IPG_SINGLECOLFRAMES 0xF0
#define IPG_BCSTFRAMESXMTDOK 0xF6
-#define IPG_CARRIERSENSEERRORS 0xF8
+#define IPG_CARRIERSENSEERRORS 0xF8
#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
-#define IPG_FRAMESABORTXSCOLLS 0xFC
-#define IPG_FRAMESWEXDEFERRAL 0xFE
+#define IPG_FRAMESABORTXSCOLLS 0xFC
+#define IPG_FRAMESWEXDEFERRAL 0xFE
/* RMON statistic register offsets. */
#define IPG_ETHERSTATSCOLLISIONS 0x100
@@ -134,8 +134,8 @@ enum ipg_regs {
#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
-#define IPG_ETHERSTATSFRAGMENTS 0x12C
-#define IPG_ETHERSTATSJABBERS 0x130
+#define IPG_ETHERSTATSFRAGMENTS 0x12C
+#define IPG_ETHERSTATSJABBERS 0x130
#define IPG_ETHERSTATSOCTETS 0x134
#define IPG_ETHERSTATSPKTS 0x138
#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
@@ -154,10 +154,10 @@ enum ipg_regs {
#define IPG_ETHERSTATSDROPEVENTS 0xCE
/* Serial EEPROM offsets */
-#define IPG_EEPROM_CONFIGPARAM 0x00
+#define IPG_EEPROM_CONFIGPARAM 0x00
#define IPG_EEPROM_ASICCTRL 0x01
#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
-#define IPG_EEPROM_SUBSYSTEMID 0x03
+#define IPG_EEPROM_SUBSYSTEMID 0x03
#define IPG_EEPROM_STATIONADDRESS0 0x10
#define IPG_EEPROM_STATIONADDRESS1 0x11
#define IPG_EEPROM_STATIONADDRESS2 0x12
@@ -168,16 +168,16 @@ enum ipg_regs {
/* IOBaseAddress */
#define IPG_PIB_RSVD_MASK 0xFFFFFE01
-#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
-#define IPG_PIB_IOBASEADDRIND 0x00000001
+#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
+#define IPG_PIB_IOBASEADDRIND 0x00000001
/* MemBaseAddress */
#define IPG_PMB_RSVD_MASK 0xFFFFFE07
-#define IPG_PMB_MEMBASEADDRIND 0x00000001
+#define IPG_PMB_MEMBASEADDRIND 0x00000001
#define IPG_PMB_MEMMAPTYPE 0x00000006
#define IPG_PMB_MEMMAPTYPE0 0x00000002
#define IPG_PMB_MEMMAPTYPE1 0x00000004
-#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
+#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
/* ConfigStatus */
#define IPG_CS_RSVD_MASK 0xFFB0
@@ -196,20 +196,20 @@ enum ipg_regs {
/* TFDList, TFC */
#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF
-#define IPG_TFC_FRAMEID 0x000000000000FFFF
+#define IPG_TFC_FRAMEID 0x000000000000FFFF
#define IPG_TFC_WORDALIGN 0x0000000000030000
#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000
-#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
+#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000
#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000
#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000
#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000
#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000
#define IPG_TFC_TXINDICATE 0x0000000000400000
-#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
+#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
#define IPG_TFC_FRAGCOUNT 0x000000000F000000
-#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
-#define IPG_TFC_TFDDONE 0x0000000080000000
+#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
+#define IPG_TFC_TFDDONE 0x0000000080000000
#define IPG_TFC_VID 0x00000FFF00000000
#define IPG_TFC_CFI 0x0000100000000000
#define IPG_TFC_USERPRIORITY 0x0000E00000000000
@@ -217,35 +217,35 @@ enum ipg_regs {
/* TFDList, FragInfo */
#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF
-#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
+#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
/* RFD data structure masks. */
/* RFDList, RFS */
#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF
#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF
-#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
+#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
#define IPG_RFS_RXRUNTFRAME 0x0000000000020000
#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000
#define IPG_RFS_RXFCSERROR 0x0000000000080000
#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000
-#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
+#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
#define IPG_RFS_VLANDETECTED 0x0000000000400000
#define IPG_RFS_TCPDETECTED 0x0000000000800000
#define IPG_RFS_TCPERROR 0x0000000001000000
#define IPG_RFS_UDPDETECTED 0x0000000002000000
#define IPG_RFS_UDPERROR 0x0000000004000000
#define IPG_RFS_IPDETECTED 0x0000000008000000
-#define IPG_RFS_IPERROR 0x0000000010000000
+#define IPG_RFS_IPERROR 0x0000000010000000
#define IPG_RFS_FRAMESTART 0x0000000020000000
#define IPG_RFS_FRAMEEND 0x0000000040000000
-#define IPG_RFS_RFDDONE 0x0000000080000000
+#define IPG_RFS_RFDDONE 0x0000000080000000
#define IPG_RFS_TCI 0x0000FFFF00000000
/* RFDList, FragInfo */
#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF
-#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
+#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
/* I/O Register masks. */
@@ -254,37 +254,37 @@ enum ipg_regs {
/* Statistics Mask */
#define IPG_SM_ALL 0x0FFFFFFF
-#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
-#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
-#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
+#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
+#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
+#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
#define IPG_SM_RXJUMBOFRAMES 0x00000008
#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
-#define IPG_SM_IPCHECKSUMERRORS 0x00000020
+#define IPG_SM_IPCHECKSUMERRORS 0x00000020
#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
#define IPG_SM_INRANGELENGTHERRORS 0x00000200
#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
-#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
-#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
-#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
+#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
+#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
+#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
-#define IPG_SM_LATECOLLISIONS 0x00010000
-#define IPG_SM_MULTICOLFRAMES 0x00020000
-#define IPG_SM_SINGLECOLFRAMES 0x00040000
+#define IPG_SM_LATECOLLISIONS 0x00010000
+#define IPG_SM_MULTICOLFRAMES 0x00020000
+#define IPG_SM_SINGLECOLFRAMES 0x00040000
#define IPG_SM_TXJUMBOFRAMES 0x00080000
#define IPG_SM_CARRIERSENSEERRORS 0x00100000
#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
-#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
+#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
/* Countdown */
#define IPG_CD_RSVD_MASK 0x0700FFFF
#define IPG_CD_COUNT 0x0000FFFF
-#define IPG_CD_COUNTDOWNSPEED 0x01000000
+#define IPG_CD_COUNTDOWNSPEED 0x01000000
#define IPG_CD_COUNTDOWNMODE 0x02000000
-#define IPG_CD_COUNTINTENABLED 0x04000000
+#define IPG_CD_COUNTINTENABLED 0x04000000
/* TxDMABurstThresh */
#define IPG_TB_RSVD_MASK 0xFF
@@ -653,15 +653,28 @@ enum ipg_regs {
* Miscellaneous macros.
*/
-/* Marco for printing debug statements. */
+/* Macros for printing debug statements. */
#ifdef IPG_DEBUG
-# define IPG_DEBUG_MSG(args...)
-# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " args)
+# define IPG_DEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DDEBUG_MSG(fmt, args...) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args)
# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
#else
-# define IPG_DEBUG_MSG(args...)
-# define IPG_DDEBUG_MSG(args...)
+# define IPG_DEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DDEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
# define IPG_DUMPRFDLIST(args)
# define IPG_DUMPTFDLIST(args)
#endif
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index af10e97..25bb2a0 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -397,5 +397,11 @@ config MCS_FIR
To compile it as a module, choose M here: the module will be called
mcs7780.
+config SH_IRDA
+ tristate "SuperH IrDA driver"
+ depends on IRDA && ARCH_SHMOBILE
+ help
+ Say Y here if your want to enable SuperH IrDA devices.
+
endmenu
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index e030d47..dfc6453 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o
obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
obj-$(CONFIG_MCS_FIR) += mcs7780.o
obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
+obj-$(CONFIG_SH_IRDA) += sh_irda.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 28992c8..a3cb109 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -753,18 +753,18 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__);
}
if (ali_ircc_dma_receive_complete(self))
{
- IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__);
self->ier = IER_EOM;
}
else
{
- IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__);
self->ier = IER_EOM | IER_TIMER;
}
@@ -777,7 +777,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__);
}
/* Disable Timer */
switch_bank(iobase, BANK1);
@@ -942,7 +942,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
// benjamin 2000/11/10 06:32PM
if (self->io.speed > 115200)
{
- IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __func__ );
+ IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ );
self->ier = IER_EOM;
// SetCOMInterrupts(self, TRUE);
@@ -970,7 +970,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(2, "%s(), setting speed = %d \n", __func__ , baud);
+ IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud);
/* This function *must* be called with irq off and spin-lock.
* - Jean II */
@@ -1500,7 +1500,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
diff = self->now.tv_usec - self->stamp.tv_usec;
/* self->stamp is set from ali_ircc_dma_receive_complete() */
- IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __func__ , diff);
+ IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff);
if (diff < 0)
diff += 1000000;
@@ -1641,7 +1641,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
tmp = inb(iobase+FIR_LCR_B);
tmp &= ~0x20; // Disable SIP
outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
- IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __func__ , inb(iobase+FIR_LCR_B));
+ IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
outb(0, iobase+FIR_LSR);
@@ -1768,7 +1768,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
//switch_bank(iobase, BANK0);
tmp = inb(iobase+FIR_LCR_B);
outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
- IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __func__ , inb(iobase+FIR_LCR_B));
+ IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
/* Set Rx Threshold */
switch_bank(iobase, BANK1);
@@ -1840,7 +1840,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
/* Check for errors */
if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
{
- IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ );
/* Skip frame */
self->netdev->stats.rx_errors++;
@@ -1850,29 +1850,29 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
if (status & LSR_FIFO_UR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ );
}
if (status & LSR_FRAME_ERROR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ );
}
if (status & LSR_CRC_ERROR)
{
self->netdev->stats.rx_crc_errors++;
- IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ );
}
if(self->rcvFramesOverflow)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ );
}
if(len == 0)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ );
}
}
else
@@ -1884,7 +1884,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
val = inb(iobase+FIR_BSR);
if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
{
- IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __func__ );
+ IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ );
/* Put this entry back in fifo */
st_fifo->head--;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index b5cbd39..a3d696a 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -546,7 +546,6 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b7e6625..48bd5ec 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1002,8 +1002,6 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
toshoboe_checkstuck (self);
- dev->trans_start = jiffies;
-
/* Check if we need to change the speed */
/* But not now. Wait after transmission if mtt not required */
speed=irda_get_next_speed(skb);
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 2c9b3af..4441fa3 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -839,7 +839,7 @@ static void irda_usb_receive(struct urb *urb)
/* Usually precursor to a hot-unplug on OHCI. */
default:
self->netdev->stats.rx_errors++;
- IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
+ IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
break;
}
/* If we received an error, we don't want to resubmit the
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index c0e0bb9..5b1036a 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -434,8 +434,6 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
mcs->netdev->stats.rx_packets++;
mcs->netdev->stats.rx_bytes += new_len;
-
- return;
}
/* Unwrap received packets at FIR speed. A 32 bit crc_ccitt checksum is
@@ -487,8 +485,6 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
mcs->netdev->stats.rx_packets++;
mcs->netdev->stats.rx_bytes += new_len;
-
- return;
}
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1a54f6b..c192c31 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -556,7 +556,6 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
}
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 1dcdce0..da27050 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -715,8 +715,6 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE;
}
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
new file mode 100644
index 0000000..9a828b0
--- /dev/null
+++ b/drivers/net/irda/sh_irda.c
@@ -0,0 +1,865 @@
+/*
+ * SuperH IrDA Driver
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on sh_sir.c
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * CAUTION
+ *
+ * This driver is very simple.
+ * So, it doesn't have below support now
+ * - MIR/FIR support
+ * - DMA transfer support
+ * - FIFO mode support
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#define DRIVER_NAME "sh_irda"
+
+#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
+#define __IRDARAM_LEN 0x13FF
+#else
+#define __IRDARAM_LEN 0x1039
+#endif
+
+#define IRTMR 0x1F00 /* Transfer mode */
+#define IRCFR 0x1F02 /* Configuration */
+#define IRCTR 0x1F04 /* IR control */
+#define IRTFLR 0x1F20 /* Transmit frame length */
+#define IRTCTR 0x1F22 /* Transmit control */
+#define IRRFLR 0x1F40 /* Receive frame length */
+#define IRRCTR 0x1F42 /* Receive control */
+#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
+#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
+#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
+#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
+#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
+#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
+#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
+#define CRCCTR 0x1F80 /* CRC engine control */
+#define CRCIR 0x1F86 /* CRC engine input data */
+#define CRCCR 0x1F8A /* CRC engine calculation */
+#define CRCOR 0x1F8E /* CRC engine output data */
+#define FIFOCP 0x1FC0 /* FIFO current pointer */
+#define FIFOFP 0x1FC2 /* FIFO follow pointer */
+#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
+#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
+#define FIFOSEL 0x1FC8 /* FIFO select */
+#define FIFORS 0x1FCA /* FIFO receive status */
+#define FIFORFL 0x1FCC /* FIFO receive frame length */
+#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
+#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
+#define BIFCTL 0x1FD2 /* BUS interface control */
+#define IRDARAM 0x0000 /* IrDA buffer RAM */
+#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
+
+/* IRTMR */
+#define TMD_MASK (0x3 << 14) /* Transfer Mode */
+#define TMD_SIR (0x0 << 14)
+#define TMD_MIR (0x3 << 14)
+#define TMD_FIR (0x2 << 14)
+
+#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
+#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
+#define SIM (1 << 0) /* SIR Interrupt Mask */
+#define xIM_MASK (FIFORIM | MIM | SIM)
+
+/* IRCFR */
+#define RTO_SHIFT 8 /* shift for Receive Timeout */
+#define RTO (0x3 << RTO_SHIFT)
+
+/* IRTCTR */
+#define ARMOD (1 << 15) /* Auto-Receive Mode */
+#define TE (1 << 0) /* Transmit Enable */
+
+/* IRRFLR */
+#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
+
+/* IRRCTR */
+#define RE (1 << 0) /* Receive Enable */
+
+/*
+ * SIRISR, SIRIMR, SIRICR,
+ * MFIRISR, MFIRIMR, MFIRICR
+ */
+#define FRE (1 << 15) /* Frame Receive End */
+#define TROV (1 << 11) /* Transfer Area Overflow */
+#define xIR_9 (1 << 9)
+#define TOT xIR_9 /* for SIR Timeout */
+#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
+#define xIR_8 (1 << 8)
+#define FER xIR_8 /* for SIR Framing Error */
+#define CRCER xIR_8 /* for MIR/FIR CRC error */
+#define FTE (1 << 7) /* Frame Transmit End */
+#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
+
+/* SIRBCR */
+#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
+
+/* CRCCTR */
+#define CRC_RST (1 << 15) /* CRC Engine Reset */
+#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
+
+/* CRCIR */
+#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
+
+/************************************************************************
+
+
+ enum / structure
+
+
+************************************************************************/
+enum sh_irda_mode {
+ SH_IRDA_NONE = 0,
+ SH_IRDA_SIR,
+ SH_IRDA_MIR,
+ SH_IRDA_FIR,
+};
+
+struct sh_irda_self;
+struct sh_irda_xir_func {
+ int (*xir_fre) (struct sh_irda_self *self);
+ int (*xir_trov) (struct sh_irda_self *self);
+ int (*xir_9) (struct sh_irda_self *self);
+ int (*xir_8) (struct sh_irda_self *self);
+ int (*xir_fte) (struct sh_irda_self *self);
+};
+
+struct sh_irda_self {
+ void __iomem *membase;
+ unsigned int irq;
+ struct clk *clk;
+
+ struct net_device *ndev;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ enum sh_irda_mode mode;
+ spinlock_t lock;
+
+ struct sh_irda_xir_func *xir_func;
+};
+
+/************************************************************************
+
+
+ common function
+
+
+************************************************************************/
+static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ iowrite16(data, self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
+{
+ unsigned long flags;
+ u16 ret;
+
+ spin_lock_irqsave(&self->lock, flags);
+ ret = ioread16(self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return ret;
+}
+
+static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
+ u16 mask, u16 data)
+{
+ unsigned long flags;
+ u16 old, new;
+
+ spin_lock_irqsave(&self->lock, flags);
+ old = ioread16(self->membase + offset);
+ new = (old & ~mask) | data;
+ if (old != new)
+ iowrite16(data, self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+/************************************************************************
+
+
+ mode function
+
+
+************************************************************************/
+/*=====================================
+ *
+ * common
+ *
+ *=====================================*/
+static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
+{
+ struct device *dev = &self->ndev->dev;
+
+ sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
+ dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
+}
+
+static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
+{
+ struct device *dev = &self->ndev->dev;
+
+ if (SH_IRDA_SIR != self->mode)
+ interval = 0;
+
+ if (interval < 0 || interval > 2) {
+ dev_err(dev, "unsupported timeout interval\n");
+ return -EINVAL;
+ }
+
+ sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
+ return 0;
+}
+
+static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
+{
+ struct device *dev = &self->ndev->dev;
+ u16 val;
+
+ if (baudrate < 0)
+ return 0;
+
+ if (SH_IRDA_SIR != self->mode) {
+ dev_err(dev, "it is not SIR mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Baud rate (bits/s) =
+ * (48 MHz / 26) / (baud rate counter value + 1) x 16
+ */
+ val = (48000000 / 26 / 16 / baudrate) - 1;
+ dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
+
+ sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
+
+ return 0;
+}
+
+static int xir_get_rcv_length(struct sh_irda_self *self)
+{
+ return RFL_MASK & sh_irda_read(self, IRRFLR);
+}
+
+/*=====================================
+ *
+ * NONE MODE
+ *
+ *=====================================*/
+static int xir_fre(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: frame recv\n");
+ return 0;
+}
+
+static int xir_trov(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: buffer ram over\n");
+ return 0;
+}
+
+static int xir_9(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: time over\n");
+ return 0;
+}
+
+static int xir_8(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: framing error\n");
+ return 0;
+}
+
+static int xir_fte(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: frame transmit end\n");
+ return 0;
+}
+
+static struct sh_irda_xir_func xir_func = {
+ .xir_fre = xir_fre,
+ .xir_trov = xir_trov,
+ .xir_9 = xir_9,
+ .xir_8 = xir_8,
+ .xir_fte = xir_fte,
+};
+
+/*=====================================
+ *
+ * MIR/FIR MODE
+ *
+ * MIR/FIR are not supported now
+ *=====================================*/
+static struct sh_irda_xir_func mfir_func = {
+ .xir_fre = xir_fre,
+ .xir_trov = xir_trov,
+ .xir_9 = xir_9,
+ .xir_8 = xir_8,
+ .xir_fte = xir_fte,
+};
+
+/*=====================================
+ *
+ * SIR MODE
+ *
+ *=====================================*/
+static int sir_fre(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ u16 data16;
+ u8 *data = (u8 *)&data16;
+ int len = xir_get_rcv_length(self);
+ int i, j;
+
+ if (len > IRDARAM_LEN)
+ len = IRDARAM_LEN;
+
+ dev_dbg(dev, "frame recv length = %d\n", len);
+
+ for (i = 0; i < len; i++) {
+ j = i % 2;
+ if (!j)
+ data16 = sh_irda_read(self, IRDARAM + i);
+
+ async_unwrap_char(self->ndev, &self->ndev->stats,
+ &self->rx_buff, data[j]);
+ }
+ self->ndev->last_rx = jiffies;
+
+ sh_irda_rcv_ctrl(self, 1);
+
+ return 0;
+}
+
+static int sir_trov(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "buffer ram over\n");
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_tot(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "time over\n");
+ sh_irda_set_baudrate(self, 9600);
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_fer(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "framing error\n");
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_fte(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_dbg(dev, "frame transmit end\n");
+ netif_wake_queue(self->ndev);
+
+ return 0;
+}
+
+static struct sh_irda_xir_func sir_func = {
+ .xir_fre = sir_fre,
+ .xir_trov = sir_trov,
+ .xir_9 = sir_tot,
+ .xir_8 = sir_fer,
+ .xir_fte = sir_fte,
+};
+
+static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
+{
+ struct device *dev = &self->ndev->dev;
+ struct sh_irda_xir_func *func;
+ const char *name;
+ u16 data;
+
+ switch (mode) {
+ case SH_IRDA_SIR:
+ name = "SIR";
+ data = TMD_SIR;
+ func = &sir_func;
+ break;
+ case SH_IRDA_MIR:
+ name = "MIR";
+ data = TMD_MIR;
+ func = &mfir_func;
+ break;
+ case SH_IRDA_FIR:
+ name = "FIR";
+ data = TMD_FIR;
+ func = &mfir_func;
+ break;
+ default:
+ name = "NONE";
+ data = 0;
+ func = &xir_func;
+ break;
+ }
+
+ self->mode = mode;
+ self->xir_func = func;
+ sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
+
+ dev_dbg(dev, "switch to %s mode", name);
+}
+
+/************************************************************************
+
+
+ irq function
+
+
+************************************************************************/
+static void sh_irda_set_irq_mask(struct sh_irda_self *self)
+{
+ u16 tmr_hole;
+ u16 xir_reg;
+
+ /* set all mask */
+ sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
+ sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
+ sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
+
+ /* clear irq */
+ sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
+ sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
+
+ switch (self->mode) {
+ case SH_IRDA_SIR:
+ tmr_hole = SIM;
+ xir_reg = SIRIMR;
+ break;
+ case SH_IRDA_MIR:
+ case SH_IRDA_FIR:
+ tmr_hole = MIM;
+ xir_reg = MFIRIMR;
+ break;
+ default:
+ tmr_hole = 0;
+ xir_reg = 0;
+ break;
+ }
+
+ /* open mask */
+ if (xir_reg) {
+ sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
+ sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
+ }
+}
+
+static irqreturn_t sh_irda_irq(int irq, void *dev_id)
+{
+ struct sh_irda_self *self = dev_id;
+ struct sh_irda_xir_func *func = self->xir_func;
+ u16 isr = sh_irda_read(self, SIRISR);
+
+ /* clear irq */
+ sh_irda_write(self, SIRICR, isr);
+
+ if (isr & FRE)
+ func->xir_fre(self);
+ if (isr & TROV)
+ func->xir_trov(self);
+ if (isr & xIR_9)
+ func->xir_9(self);
+ if (isr & xIR_8)
+ func->xir_8(self);
+ if (isr & FTE)
+ func->xir_fte(self);
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ CRC function
+
+
+************************************************************************/
+static void sh_irda_crc_reset(struct sh_irda_self *self)
+{
+ sh_irda_write(self, CRCCTR, CRC_RST);
+}
+
+static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
+{
+ sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
+}
+
+static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
+{
+ return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
+}
+
+static u16 sh_irda_crc_out(struct sh_irda_self *self)
+{
+ return sh_irda_read(self, CRCOR);
+}
+
+static int sh_irda_crc_init(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ int ret = -EIO;
+ u16 val;
+
+ sh_irda_crc_reset(self);
+
+ sh_irda_crc_add(self, 0xCC);
+ sh_irda_crc_add(self, 0xF5);
+ sh_irda_crc_add(self, 0xF1);
+ sh_irda_crc_add(self, 0xA7);
+
+ val = sh_irda_crc_cnt(self);
+ if (4 != val) {
+ dev_err(dev, "CRC count error %x\n", val);
+ goto crc_init_out;
+ }
+
+ val = sh_irda_crc_out(self);
+ if (0x51DF != val) {
+ dev_err(dev, "CRC result error%x\n", val);
+ goto crc_init_out;
+ }
+
+ ret = 0;
+
+crc_init_out:
+
+ sh_irda_crc_reset(self);
+ return ret;
+}
+
+/************************************************************************
+
+
+ iobuf function
+
+
+************************************************************************/
+static void sh_irda_remove_iobuf(struct sh_irda_self *self)
+{
+ kfree(self->rx_buff.head);
+
+ self->tx_buff.head = NULL;
+ self->tx_buff.data = NULL;
+ self->rx_buff.head = NULL;
+ self->rx_buff.data = NULL;
+}
+
+static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
+{
+ if (self->rx_buff.head ||
+ self->tx_buff.head) {
+ dev_err(&self->ndev->dev, "iobuff has already existed.");
+ return -EINVAL;
+ }
+
+ /* rx_buff */
+ self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
+ if (!self->rx_buff.head)
+ return -ENOMEM;
+
+ self->rx_buff.truesize = rxsize;
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* tx_buff */
+ self->tx_buff.head = self->membase + IRDARAM;
+ self->tx_buff.truesize = IRDARAM_LEN;
+
+ return 0;
+}
+
+/************************************************************************
+
+
+ net_device_ops function
+
+
+************************************************************************/
+static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+ struct device *dev = &self->ndev->dev;
+ int speed = irda_get_next_speed(skb);
+ int ret;
+
+ dev_dbg(dev, "hard xmit\n");
+
+ netif_stop_queue(ndev);
+ sh_irda_rcv_ctrl(self, 0);
+
+ ret = sh_irda_set_baudrate(self, speed);
+ if (ret < 0)
+ return ret;
+
+ self->tx_buff.len = 0;
+ if (skb->len) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ self->tx_buff.len = async_wrap_skb(skb,
+ self->tx_buff.head,
+ self->tx_buff.truesize);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ if (self->tx_buff.len > self->tx_buff.truesize)
+ self->tx_buff.len = self->tx_buff.truesize;
+
+ sh_irda_write(self, IRTFLR, self->tx_buff.len);
+ sh_irda_write(self, IRTCTR, ARMOD | TE);
+ }
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
+{
+ /*
+ * FIXME
+ *
+ * This function is needed for irda framework.
+ * But nothing to do now
+ */
+ return 0;
+}
+
+static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ return &self->ndev->stats;
+}
+
+static int sh_irda_open(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+ int err;
+
+ clk_enable(self->clk);
+ err = sh_irda_crc_init(self);
+ if (err)
+ goto open_err;
+
+ sh_irda_set_mode(self, SH_IRDA_SIR);
+ sh_irda_set_timeout(self, 2);
+ sh_irda_set_baudrate(self, 9600);
+
+ self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
+ if (!self->irlap) {
+ err = -ENODEV;
+ goto open_err;
+ }
+
+ netif_start_queue(ndev);
+ sh_irda_rcv_ctrl(self, 1);
+ sh_irda_set_irq_mask(self);
+
+ dev_info(&ndev->dev, "opened\n");
+
+ return 0;
+
+open_err:
+ clk_disable(self->clk);
+
+ return err;
+}
+
+static int sh_irda_stop(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(ndev);
+
+ dev_info(&ndev->dev, "stoped\n");
+
+ return 0;
+}
+
+static const struct net_device_ops sh_irda_ndo = {
+ .ndo_open = sh_irda_open,
+ .ndo_stop = sh_irda_stop,
+ .ndo_start_xmit = sh_irda_hard_xmit,
+ .ndo_do_ioctl = sh_irda_ioctl,
+ .ndo_get_stats = sh_irda_stats,
+};
+
+/************************************************************************
+
+
+ platform_driver function
+
+
+************************************************************************/
+static int __devinit sh_irda_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct sh_irda_self *self;
+ struct resource *res;
+ char clk_name[8];
+ unsigned int irq;
+ int err = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ dev_err(&pdev->dev, "Not enough platform resources.\n");
+ goto exit;
+ }
+
+ ndev = alloc_irdadev(sizeof(*self));
+ if (!ndev)
+ goto exit;
+
+ self = netdev_priv(ndev);
+ self->membase = ioremap_nocache(res->start, resource_size(res));
+ if (!self->membase) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap.\n");
+ goto err_mem_1;
+ }
+
+ err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_2;
+
+ snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
+ self->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(self->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ goto err_mem_3;
+ }
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ ndev->netdev_ops = &sh_irda_ndo;
+ ndev->irq = irq;
+
+ self->ndev = ndev;
+ self->qos.baud_rate.bits &= IR_9600; /* FIXME */
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+ spin_lock_init(&self->lock);
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_mem_4;
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
+ dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
+ goto err_mem_4;
+ }
+
+ dev_info(&pdev->dev, "SuperH IrDA probed\n");
+
+ goto exit;
+
+err_mem_4:
+ clk_put(self->clk);
+err_mem_3:
+ sh_irda_remove_iobuf(self);
+err_mem_2:
+ iounmap(self->membase);
+err_mem_1:
+ free_netdev(ndev);
+exit:
+ return err;
+}
+
+static int __devexit sh_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ if (!self)
+ return 0;
+
+ unregister_netdev(ndev);
+ clk_put(self->clk);
+ sh_irda_remove_iobuf(self);
+ iounmap(self->membase);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sh_irda_driver = {
+ .probe = sh_irda_probe,
+ .remove = __devexit_p(sh_irda_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sh_irda_init(void)
+{
+ return platform_driver_register(&sh_irda_driver);
+}
+
+static void __exit sh_irda_exit(void)
+{
+ platform_driver_unregister(&sh_irda_driver);
+}
+
+module_init(sh_irda_init);
+module_exit(sh_irda_exit);
+
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_DESCRIPTION("SuperH IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 0745581..5c5f99d 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -646,8 +646,10 @@ static int sh_sir_open(struct net_device *ndev)
sh_sir_set_baudrate(self, 9600);
self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
- if (!self->irlap)
+ if (!self->irlap) {
+ err = -ENODEV;
goto open_err;
+ }
/*
* Now enable the interrupt then start the queue
@@ -707,7 +709,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
struct sh_sir_self *self;
struct resource *res;
char clk_name[8];
- void __iomem *base;
unsigned int irq;
int err = -ENOMEM;
@@ -722,14 +723,14 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
if (!ndev)
goto exit;
- base = ioremap_nocache(res->start, resource_size(res));
- if (!base) {
+ self = netdev_priv(ndev);
+ self->membase = ioremap_nocache(res->start, resource_size(res));
+ if (!self->membase) {
err = -ENXIO;
dev_err(&pdev->dev, "Unable to ioremap.\n");
goto err_mem_1;
}
- self = netdev_priv(ndev);
err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
if (err)
goto err_mem_2;
@@ -746,7 +747,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
ndev->netdev_ops = &sh_sir_ndo;
ndev->irq = irq;
- self->membase = base;
self->ndev = ndev;
self->qos.baud_rate.bits &= IR_9600; /* FIXME */
self->qos.min_turn_time.bits = 1; /* 10 ms or more */
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index de91cd1..1b051da 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -655,7 +655,6 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
if (likely(actual > 0)) {
dev->tx_skb = skb;
- ndev->trans_start = jiffies;
dev->tx_buff.data += actual;
dev->tx_buff.len -= actual;
}
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 6af84d8..d67e484 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -868,7 +868,7 @@ static void smsc_ircc_timeout(struct net_device *dev)
spin_lock_irqsave(&self->lock, flags);
smsc_ircc_sir_start(self);
smsc_ircc_change_speed(self, self->io.speed);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&self->lock, flags);
}
@@ -2822,7 +2822,6 @@ static void __init preconfigure_ali_port(struct pci_dev *dev,
tmpbyte |= mask;
pci_write_config_byte(dev, reg, tmpbyte);
IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
- return;
}
static int __init preconfigure_through_ali(struct pci_dev *dev,
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index d9d1db0..5a84822 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -774,7 +774,7 @@ static void SetBaudRate(__u16 iobase, __u32 rate)
break;
default:
break;
- };
+ }
} else if (IsMIROn(iobase)) {
value = 0; // will automatically be fixed in 1.152M
} else if (IsFIROn(iobase)) {
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 209d4bc..c3d0738 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1037,7 +1037,6 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
wmb();
outw(0, iobase+VLSI_PIO_PROMPT);
}
- ndev->trans_start = jiffies;
if (ring_put(r) == NULL) {
netif_stop_queue(ndev);
@@ -1742,7 +1741,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice \n",
+ IRDA_ERROR("%s - %s: no netdevice\n",
__func__, pci_name(pdev));
return 0;
}
@@ -1781,7 +1780,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice \n",
+ IRDA_ERROR("%s - %s: no netdevice\n",
__func__, pci_name(pdev));
return 0;
}
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index cb0cb75..1f9c3f08 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -515,7 +515,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
/* Check for empty frame */
if (!skb->len) {
w83977af_change_speed(self, speed);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
@@ -549,7 +548,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
switch_bank(iobase, SET0);
outb(ICR_ETXTHI, iobase+ICR);
}
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
/* Restore set register */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 773c59c..ba1de59 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -962,15 +962,15 @@ static void veth_set_multicast_list(struct net_device *dev)
(netdev_mc_count(dev) > VETH_MAX_MCAST)) {
port->promiscuous = 1;
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
port->promiscuous = 0;
/* Update table */
port->num_mcast = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- u8 *addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *addr = ha->addr;
u64 xaddr = 0;
if (addr[0] & 0x01) {/* multicast address? */
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 92d2e71..521c0c7 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -78,9 +78,13 @@ struct ixgb_adapter;
#define PFX "ixgb: "
#ifdef _DEBUG_DRIVER_
-#define IXGB_DBG(args...) printk(KERN_DEBUG PFX args)
+#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args)
#else
-#define IXGB_DBG(args...)
+#define IXGB_DBG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG PFX fmt, ##args); \
+} while (0)
#endif
/* TX/RX descriptor defines */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 89ffa72..813993f 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb_hw.h"
#include "ixgb_ee.h"
/* Local prototypes */
@@ -56,7 +58,6 @@ ixgb_raise_clock(struct ixgb_hw *hw,
*eecd_reg = *eecd_reg | IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -75,7 +76,6 @@ ixgb_lower_clock(struct ixgb_hw *hw,
*eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -125,7 +125,6 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
/* We leave the "DI" bit set to "0" when we leave this routine. */
eecd_reg &= ~IXGB_EECD_DI;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
- return;
}
/******************************************************************************
@@ -190,7 +189,6 @@ ixgb_setup_eeprom(struct ixgb_hw *hw)
/* Set CS */
eecd_reg |= IXGB_EECD_CS;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
- return;
}
/******************************************************************************
@@ -224,7 +222,6 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -248,7 +245,6 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -268,7 +264,6 @@ ixgb_cleanup_eeprom(struct ixgb_hw *hw)
IXGB_WRITE_REG(hw, EECD, eecd_reg);
ixgb_clock_eeprom(hw);
- return;
}
/******************************************************************************
@@ -357,7 +352,6 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
checksum = (u16) EEPROM_SUM - checksum;
ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
- return;
}
/******************************************************************************
@@ -412,8 +406,6 @@ ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
/* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
-
- return;
}
/******************************************************************************
@@ -467,11 +459,11 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
u16 checksum = 0;
struct ixgb_ee_map_type *ee_map;
- DEBUGFUNC("ixgb_get_eeprom_data");
+ ENTER();
ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- DEBUGOUT("ixgb_ee: Reading eeprom data\n");
+ pr_debug("Reading eeprom data\n");
for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
u16 ee_data;
ee_data = ixgb_read_eeprom(hw, i);
@@ -480,7 +472,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
}
if (checksum != (u16) EEPROM_SUM) {
- DEBUGOUT("ixgb_ee: Checksum invalid.\n");
+ pr_debug("Checksum invalid\n");
/* clear the init_ctrl_reg_1 to signify that the cache is
* invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
@@ -489,7 +481,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
!= cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- DEBUGOUT("ixgb_ee: Signature invalid.\n");
+ pr_debug("Signature invalid\n");
return(false);
}
@@ -555,13 +547,13 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
int i;
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- DEBUGFUNC("ixgb_get_ee_mac_addr");
+ ENTER();
if (ixgb_check_and_get_eeprom_data(hw) == true) {
for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
mac_addr[i] = ee_map->mac_addr[i];
- DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
}
+ pr_debug("eeprom mac address = %pM\n", mac_addr);
}
}
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index ff67a84..397acab 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -30,9 +30,13 @@
* Shared functions for accessing and configuring the adapter
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb_hw.h"
#include "ixgb_ids.h"
+#include <linux/etherdevice.h>
+
/* Local function prototypes */
static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
@@ -120,13 +124,13 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
u32 ctrl_reg;
u32 icr_reg;
- DEBUGFUNC("ixgb_adapter_stop");
+ ENTER();
/* If we are stopped or resetting exit gracefully and wait to be
* started again before accessing the hardware.
*/
if (hw->adapter_stopped) {
- DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
+ pr_debug("Exiting because the adapter is already stopped!!!\n");
return false;
}
@@ -136,7 +140,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
hw->adapter_stopped = true;
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ pr_debug("Masking off all interrupts\n");
IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
/* Disable the Transmit and Receive units. Then delay to allow
@@ -152,12 +156,12 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ pr_debug("Issuing a global reset to MAC\n");
ctrl_reg = ixgb_mac_reset(hw);
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ pr_debug("Masking off all interrupts\n");
IXGB_WRITE_REG(hw, IMC, 0xffffffff);
/* Clear any pending interrupt events. */
@@ -183,7 +187,7 @@ ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
u16 vendor_name[5];
ixgb_xpak_vendor xpak_vendor;
- DEBUGFUNC("ixgb_identify_xpak_vendor");
+ ENTER();
/* Read the first few bytes of the vendor string from the XPAK NVR
* registers. These are standard XENPAK/XPAK registers, so all XPAK
@@ -222,12 +226,12 @@ ixgb_identify_phy(struct ixgb_hw *hw)
ixgb_phy_type phy_type;
ixgb_xpak_vendor xpak_vendor;
- DEBUGFUNC("ixgb_identify_phy");
+ ENTER();
/* Infer the transceiver/phy type from the device id */
switch (hw->device_id) {
case IXGB_DEVICE_ID_82597EX:
- DEBUGOUT("Identified TXN17401 optics\n");
+ pr_debug("Identified TXN17401 optics\n");
phy_type = ixgb_phy_type_txn17401;
break;
@@ -237,30 +241,30 @@ ixgb_identify_phy(struct ixgb_hw *hw)
* type of optics. */
xpak_vendor = ixgb_identify_xpak_vendor(hw);
if (xpak_vendor == ixgb_xpak_vendor_intel) {
- DEBUGOUT("Identified TXN17201 optics\n");
+ pr_debug("Identified TXN17201 optics\n");
phy_type = ixgb_phy_type_txn17201;
} else {
- DEBUGOUT("Identified G6005 optics\n");
+ pr_debug("Identified G6005 optics\n");
phy_type = ixgb_phy_type_g6005;
}
break;
case IXGB_DEVICE_ID_82597EX_LR:
- DEBUGOUT("Identified G6104 optics\n");
+ pr_debug("Identified G6104 optics\n");
phy_type = ixgb_phy_type_g6104;
break;
case IXGB_DEVICE_ID_82597EX_CX4:
- DEBUGOUT("Identified CX4\n");
+ pr_debug("Identified CX4\n");
xpak_vendor = ixgb_identify_xpak_vendor(hw);
if (xpak_vendor == ixgb_xpak_vendor_intel) {
- DEBUGOUT("Identified TXN17201 optics\n");
+ pr_debug("Identified TXN17201 optics\n");
phy_type = ixgb_phy_type_txn17201;
} else {
- DEBUGOUT("Identified G6005 optics\n");
+ pr_debug("Identified G6005 optics\n");
phy_type = ixgb_phy_type_g6005;
}
break;
default:
- DEBUGOUT("Unknown physical layer module\n");
+ pr_debug("Unknown physical layer module\n");
phy_type = ixgb_phy_type_unknown;
break;
}
@@ -296,18 +300,18 @@ ixgb_init_hw(struct ixgb_hw *hw)
u32 ctrl_reg;
bool status;
- DEBUGFUNC("ixgb_init_hw");
+ ENTER();
/* Issue a global reset to the MAC. This will reset the chip's
* transmit, receive, DMA, and link units. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ pr_debug("Issuing a global reset to MAC\n");
ctrl_reg = ixgb_mac_reset(hw);
- DEBUGOUT("Issuing an EE reset to MAC\n");
+ pr_debug("Issuing an EE reset to MAC\n");
#ifdef HP_ZX1
/* Workaround for 82597EX reset errata */
IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
@@ -335,7 +339,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
* If it is not valid, we fail hardware init.
*/
if (!mac_addr_valid(hw->curr_mac_addr)) {
- DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
+ pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
return(false);
}
@@ -346,7 +350,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
ixgb_get_bus_info(hw);
/* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
+ pr_debug("Zeroing the MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
@@ -379,7 +383,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
{
u32 i;
- DEBUGFUNC("ixgb_init_rx_addrs");
+ ENTER();
/*
* If the current mac address is valid, assume it is a software override
@@ -391,35 +395,24 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
/* Get the MAC address from the eeprom for later reference */
ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
- DEBUGOUT3(" Keeping Permanent MAC Addr =%.2X %.2X %.2X ",
- hw->curr_mac_addr[0],
- hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
- DEBUGOUT3("%.2X %.2X %.2X\n",
- hw->curr_mac_addr[3],
- hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+ pr_debug("Keeping Permanent MAC Addr = %pM\n",
+ hw->curr_mac_addr);
} else {
/* Setup the receive address. */
- DEBUGOUT("Overriding MAC Address in RAR[0]\n");
- DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
- hw->curr_mac_addr[0],
- hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
- DEBUGOUT3("%.2X %.2X %.2X\n",
- hw->curr_mac_addr[3],
- hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+ pr_debug("Overriding MAC Address in RAR[0]\n");
+ pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
ixgb_rar_set(hw, hw->curr_mac_addr, 0);
}
/* Zero out the other 15 receive addresses. */
- DEBUGOUT("Clearing RAR[1-15]\n");
+ pr_debug("Clearing RAR[1-15]\n");
for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
/* Write high reg first to disable the AV bit first */
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
}
-
- return;
}
/******************************************************************************
@@ -444,65 +437,50 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
u32 hash_value;
u32 i;
u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
+ u8 *mca;
- DEBUGFUNC("ixgb_mc_addr_list_update");
+ ENTER();
/* Set the new number of MC addresses that we are being requested to use. */
hw->num_mc_addrs = mc_addr_count;
/* Clear RAR[1-15] */
- DEBUGOUT(" Clearing RAR[1-15]\n");
+ pr_debug("Clearing RAR[1-15]\n");
for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
}
/* Clear the MTA */
- DEBUGOUT(" Clearing MTA\n");
+ pr_debug("Clearing MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* Add the new addresses */
+ mca = mc_addr_list;
for (i = 0; i < mc_addr_count; i++) {
- DEBUGOUT(" Adding the multicast addresses:\n");
- DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 1],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 2],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 3],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 4],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 5]);
+ pr_debug("Adding the multicast addresses:\n");
+ pr_debug("MC Addr #%d = %pM\n", i, mca);
/* Place this multicast address in the RAR if there is room, *
* else put it in the MTA
*/
if (rar_used_count < IXGB_RAR_ENTRIES) {
- ixgb_rar_set(hw,
- mc_addr_list +
- (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
- rar_used_count);
- DEBUGOUT1("Added a multicast address to RAR[%d]\n", i);
+ ixgb_rar_set(hw, mca, rar_used_count);
+ pr_debug("Added a multicast address to RAR[%d]\n", i);
rar_used_count++;
} else {
- hash_value = ixgb_hash_mc_addr(hw,
- mc_addr_list +
- (i *
- (IXGB_ETH_LENGTH_OF_ADDRESS
- + pad)));
+ hash_value = ixgb_hash_mc_addr(hw, mca);
- DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
+ pr_debug("Hash value = 0x%03X\n", hash_value);
ixgb_mta_set(hw, hash_value);
}
+
+ mca += IXGB_ETH_LENGTH_OF_ADDRESS + pad;
}
- DEBUGOUT("MC Update Complete\n");
- return;
+ pr_debug("MC Update Complete\n");
}
/******************************************************************************
@@ -520,7 +498,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
{
u32 hash_value = 0;
- DEBUGFUNC("ixgb_hash_mc_addr");
+ ENTER();
/* The portion of the address that is used for the hash table is
* determined by the mc_filter_type setting.
@@ -547,7 +525,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
break;
default:
/* Invalid mc_filter_type, what should we do? */
- DEBUGOUT("MC filter type param set incorrectly\n");
+ pr_debug("MC filter type param set incorrectly\n");
ASSERT(0);
break;
}
@@ -585,8 +563,6 @@ ixgb_mta_set(struct ixgb_hw *hw,
mta_reg |= (1 << hash_bit);
IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
-
- return;
}
/******************************************************************************
@@ -603,7 +579,7 @@ ixgb_rar_set(struct ixgb_hw *hw,
{
u32 rar_low, rar_high;
- DEBUGFUNC("ixgb_rar_set");
+ ENTER();
/* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
@@ -619,7 +595,6 @@ ixgb_rar_set(struct ixgb_hw *hw,
IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
- return;
}
/******************************************************************************
@@ -635,7 +610,6 @@ ixgb_write_vfta(struct ixgb_hw *hw,
u32 value)
{
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
- return;
}
/******************************************************************************
@@ -650,7 +624,6 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
- return;
}
/******************************************************************************
@@ -666,7 +639,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
u32 pap_reg = 0; /* by default, assume no pause time */
bool status = true;
- DEBUGFUNC("ixgb_setup_fc");
+ ENTER();
/* Get the current control reg 0 settings */
ctrl_reg = IXGB_READ_REG(hw, CTRL0);
@@ -710,7 +683,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
break;
default:
/* We should never get here. The value should be 0-3. */
- DEBUGOUT("Flow control param set incorrectly\n");
+ pr_debug("Flow control param set incorrectly\n");
ASSERT(0);
break;
}
@@ -940,7 +913,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
u32 status_reg;
u32 xpcss_reg;
- DEBUGFUNC("ixgb_check_for_link");
+ ENTER();
xpcss_reg = IXGB_READ_REG(hw, XPCSS);
status_reg = IXGB_READ_REG(hw, STATUS);
@@ -950,7 +923,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
hw->link_up = true;
} else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
(status_reg & IXGB_STATUS_LU)) {
- DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
+ pr_debug("XPCSS Not Aligned while Status:LU is set\n");
hw->link_up = ixgb_link_reset(hw);
} else {
/*
@@ -981,8 +954,7 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
newRFC = IXGB_READ_REG(hw, RFC);
if ((hw->lastLFC + 250 < newLFC)
|| (hw->lastRFC + 250 < newRFC)) {
- DEBUGOUT
- ("BAD LINK! too many LFC/RFC since last check\n");
+ pr_debug("BAD LINK! too many LFC/RFC since last check\n");
bad_link_returncode = true;
}
hw->lastLFC = newLFC;
@@ -1002,11 +974,11 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
volatile u32 temp_reg;
- DEBUGFUNC("ixgb_clear_hw_cntrs");
+ ENTER();
/* if we are stopped or resetting exit gracefully */
if (hw->adapter_stopped) {
- DEBUGOUT("Exiting because the adapter is stopped!!!\n");
+ pr_debug("Exiting because the adapter is stopped!!!\n");
return;
}
@@ -1070,7 +1042,6 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
temp_reg = IXGB_READ_REG(hw, XOFFRXC);
temp_reg = IXGB_READ_REG(hw, XOFFTXC);
temp_reg = IXGB_READ_REG(hw, RJC);
- return;
}
/******************************************************************************
@@ -1086,7 +1057,6 @@ ixgb_led_on(struct ixgb_hw *hw)
/* To turn on the LED, clear software-definable pin 0 (SDP0). */
ctrl0_reg &= ~IXGB_CTRL0_SDP0;
IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
- return;
}
/******************************************************************************
@@ -1102,7 +1072,6 @@ ixgb_led_off(struct ixgb_hw *hw)
/* To turn off the LED, set software-definable pin 0 (SDP0). */
ctrl0_reg |= IXGB_CTRL0_SDP0;
IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
- return;
}
/******************************************************************************
@@ -1142,8 +1111,6 @@ ixgb_get_bus_info(struct ixgb_hw *hw)
hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
ixgb_bus_width_64 : ixgb_bus_width_32;
-
- return;
}
/******************************************************************************
@@ -1156,26 +1123,21 @@ static bool
mac_addr_valid(u8 *mac_addr)
{
bool is_valid = true;
- DEBUGFUNC("mac_addr_valid");
+ ENTER();
/* Make sure it is not a multicast address */
- if (IS_MULTICAST(mac_addr)) {
- DEBUGOUT("MAC address is multicast\n");
+ if (is_multicast_ether_addr(mac_addr)) {
+ pr_debug("MAC address is multicast\n");
is_valid = false;
}
/* Not a broadcast address */
- else if (IS_BROADCAST(mac_addr)) {
- DEBUGOUT("MAC address is broadcast\n");
+ else if (is_broadcast_ether_addr(mac_addr)) {
+ pr_debug("MAC address is broadcast\n");
is_valid = false;
}
/* Reject the zero address */
- else if (mac_addr[0] == 0 &&
- mac_addr[1] == 0 &&
- mac_addr[2] == 0 &&
- mac_addr[3] == 0 &&
- mac_addr[4] == 0 &&
- mac_addr[5] == 0) {
- DEBUGOUT("MAC address is all zeros\n");
+ else if (is_zero_ether_addr(mac_addr)) {
+ pr_debug("MAC address is all zeros\n");
is_valid = false;
}
return (is_valid);
@@ -1235,8 +1197,6 @@ ixgb_optics_reset(struct ixgb_hw *hw)
IXGB_PHY_ADDRESS,
MDIO_MMD_PMAPMD);
}
-
- return;
}
/******************************************************************************
@@ -1297,6 +1257,4 @@ ixgb_optics_reset_bcm(struct ixgb_hw *hw)
/* SerDes needs extra delay */
msleep(IXGB_SUN_PHY_RESET_DELAY);
-
- return;
}
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index af6ca3a..873d32b 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -636,18 +636,6 @@ struct ixgb_flash_buffer {
u8 filler3[0xAAAA];
};
-/*
- * This is a little-endian specific check.
- */
-#define IS_MULTICAST(Address) \
- (bool)(((u8 *)(Address))[0] & ((u8)0x01))
-
-/*
- * Check whether an address is broadcast.
- */
-#define IS_BROADCAST(Address) \
- ((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff)))
-
/* Flow control parameters */
struct ixgb_fc {
u32 high_water; /* Flow Control High-water */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index c9fef65..c6b75c8 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb.h"
char ixgb_driver_name[] = "ixgb";
@@ -146,10 +148,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static int __init
ixgb_init_module(void)
{
- printk(KERN_INFO "%s - version %s\n",
- ixgb_driver_string, ixgb_driver_version);
-
- printk(KERN_INFO "%s\n", ixgb_copyright);
+ pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
+ pr_info("%s\n", ixgb_copyright);
return pci_register_driver(&ixgb_driver);
}
@@ -368,17 +368,22 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
- !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
- pci_using_dac = 1;
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- printk(KERN_ERR
- "ixgb: No usable DMA configuration, aborting\n");
- goto err_dma_mask;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
+ }
}
- pci_using_dac = 0;
}
err = pci_request_regions(pdev, ixgb_driver_name);
@@ -674,7 +679,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
netif_err(adapter, probe, adapter->netdev,
@@ -763,7 +769,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
@@ -884,8 +891,8 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
vfree(adapter->tx_ring.buffer_info);
adapter->tx_ring.buffer_info = NULL;
- pci_free_consistent(pdev, adapter->tx_ring.size,
- adapter->tx_ring.desc, adapter->tx_ring.dma);
+ dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
+ adapter->tx_ring.desc, adapter->tx_ring.dma);
adapter->tx_ring.desc = NULL;
}
@@ -896,12 +903,11 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
@@ -967,7 +973,8 @@ ixgb_free_rx_resources(struct ixgb_adapter *adapter)
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -991,10 +998,10 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
buffer_info->length = 0;
}
@@ -1058,7 +1065,7 @@ ixgb_set_multi(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
int i;
@@ -1089,9 +1096,9 @@ ixgb_set_multi(struct net_device *netdev)
IXGB_WRITE_REG(hw, RCTL, rctl);
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
+ netdev_for_each_mc_addr(ha, netdev)
memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
- mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
+ ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS);
ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
@@ -1118,15 +1125,14 @@ ixgb_watchdog(unsigned long data)
if (adapter->hw.link_up) {
if (!netif_carrier_ok(netdev)) {
- printk(KERN_INFO "ixgb: %s NIC Link is Up 10 Gbps "
- "Full Duplex, Flow Control: %s\n",
- netdev->name,
- (adapter->hw.fc.type == ixgb_fc_full) ?
- "RX/TX" :
- ((adapter->hw.fc.type == ixgb_fc_rx_pause) ?
- "RX" :
- ((adapter->hw.fc.type == ixgb_fc_tx_pause) ?
- "TX" : "None")));
+ netdev_info(netdev,
+ "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
+ (adapter->hw.fc.type == ixgb_fc_full) ?
+ "RX/TX" :
+ (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
+ "RX" :
+ (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
+ "TX" : "None");
adapter->link_speed = 10000;
adapter->link_duplex = FULL_DUPLEX;
netif_carrier_on(netdev);
@@ -1135,8 +1141,7 @@ ixgb_watchdog(unsigned long data)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- printk(KERN_INFO "ixgb: %s NIC Link is Down\n",
- netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
}
}
@@ -1303,9 +1308,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
WARN_ON(buffer_info->dma != 0);
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
@@ -1344,10 +1350,9 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
buffer_info->dma =
- pci_map_page(pdev, frag->page,
- offset, size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ dma_map_page(&pdev->dev, frag->page,
+ offset, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
@@ -1916,6 +1921,31 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
}
}
+/*
+ * this should improve performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+static void ixgb_check_copybreak(struct net_device *netdev,
+ struct ixgb_buffer *buffer_info,
+ u32 length, struct sk_buff **skb)
+{
+ struct sk_buff *new_skb;
+
+ if (length > copybreak)
+ return;
+
+ new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ if (!new_skb)
+ return;
+
+ skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
+ (*skb)->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = *skb;
+ *skb = new_skb;
+}
+
/**
* ixgb_clean_rx_irq - Send received data up the network stack,
* @adapter: board private structure
@@ -1952,11 +1982,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
prefetch(skb->data - NET_IP_ALIGN);
- if (++i == rx_ring->count) i = 0;
+ if (++i == rx_ring->count)
+ i = 0;
next_rxd = IXGB_RX_DESC(*rx_ring, i);
prefetch(next_rxd);
- if ((j = i + 1) == rx_ring->count) j = 0;
+ j = i + 1;
+ if (j == rx_ring->count)
+ j = 0;
next2_buffer = &rx_ring->buffer_info[j];
prefetch(next2_buffer);
@@ -1965,10 +1998,10 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
cleaned = true;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -1992,25 +2025,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
goto rxdesc_done;
}
- /* code added for copybreak, this should improve
- * performance for small packets with large amounts
- * of reassembly being done in the stack */
- if (length < copybreak) {
- struct sk_buff *new_skb =
- netdev_alloc_skb_ip_align(netdev, length);
- if (new_skb) {
- skb_copy_to_linear_data_offset(new_skb,
- -NET_IP_ALIGN,
- (skb->data -
- NET_IP_ALIGN),
- (length +
- NET_IP_ALIGN));
- /* save the skb in buffer_info as good */
- buffer_info->skb = skb;
- skb = new_skb;
- }
- }
- /* end copybreak code */
+ ixgb_check_copybreak(netdev, buffer_info, length, &skb);
/* Good Receive */
skb_put(skb, length);
@@ -2091,10 +2106,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
- buffer_info->dma = pci_map_single(pdev,
+ buffer_info->dma = dma_map_single(&pdev->dev,
skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2322,7 +2337,7 @@ static void ixgb_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (ixgb_up(adapter)) {
- printk ("ixgb: can't bring device back up after reset\n");
+ pr_err("can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 371a6be..e361185 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -41,20 +41,8 @@
#undef ASSERT
#define ASSERT(x) BUG_ON(!(x))
-#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
-
-#ifdef DBG
-#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
-#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
-#else
-#define DEBUGOUT(S)
-#define DEBUGOUT1(S, A...)
-#endif
-
-#define DEBUGFUNC(F) DEBUGOUT(F)
-#define DEBUGOUT2 DEBUGOUT1
-#define DEBUGOUT3 DEBUGOUT2
-#define DEBUGOUT7 DEBUGOUT3
+
+#define ENTER() pr_debug("%s\n", __func__);
#define IXGB_WRITE_REG(a, reg, value) ( \
writel((value), ((a)->hw_addr + IXGB_##reg)))
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index af35e1d..88a08f0 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb.h"
/* This is the only thing that needs to be changed to adjust the
@@ -209,16 +211,16 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- printk(KERN_INFO "%s Enabled\n", opt->name);
+ pr_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- printk(KERN_INFO "%s Disabled\n", opt->name);
+ pr_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- printk(KERN_INFO "%s set to %i\n", opt->name, *value);
+ pr_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
@@ -230,7 +232,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- printk(KERN_INFO "%s\n", ent->str);
+ pr_info("%s\n", ent->str);
return 0;
}
}
@@ -240,8 +242,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
BUG();
}
- printk(KERN_INFO "Invalid %s specified (%i) %s\n",
- opt->name, *value, opt->err);
+ pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -261,9 +262,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
{
int bd = adapter->bd_number;
if (bd >= IXGB_MAX_NIC) {
- printk(KERN_NOTICE
- "Warning: no configuration for board #%i\n", bd);
- printk(KERN_NOTICE "Using defaults for all values\n");
+ pr_notice("Warning: no configuration for board #%i\n", bd);
+ pr_notice("Using defaults for all values\n");
}
{ /* Transmit Descriptor Count */
@@ -363,8 +363,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.high_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring RxFCHighThresh when no RxFC\n");
+ pr_info("Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
const struct ixgb_option opt = {
@@ -383,8 +382,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.low_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring RxFCLowThresh when no RxFC\n");
+ pr_info("Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request*/
const struct ixgb_option opt = {
@@ -404,17 +402,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.pause_time = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring FCReqTimeout when no RxFC\n");
+ pr_info("Ignoring FCReqTimeout when no RxFC\n");
}
/* high low and spacing check for rx flow control thresholds */
if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
/* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */
- printk(KERN_INFO
- "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
- "Using Defaults\n");
+ pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
adapter->hw.fc.high_water = DEFAULT_FCRTH;
adapter->hw.fc.low_water = DEFAULT_FCRTL;
}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 79c35ae..d0ea3d6 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -111,7 +111,10 @@ struct vf_data_storage {
u16 default_vf_vlan_id;
u16 vlans_enabled;
bool clear_to_send;
+ bool pf_set_mac;
int rar;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
};
/* wrapper around a pointer to a socket buffer,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 35a06b4..f2b7ff4 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -42,9 +42,9 @@ static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
@@ -1221,7 +1221,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
- .read = &ixgbe_read_eeprom_generic,
+ .read = &ixgbe_read_eerd_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 12fc0e7..e9706eb 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -133,27 +133,6 @@ setup_sfp_out:
return ret_val;
}
-/**
- * ixgbe_get_pcie_msix_count_82599 - Gets MSI-X vector count
- * @hw: pointer to hardware structure
- *
- * Read PCIe configuration space, and get the MSI-X vector count from
- * the capabilities table.
- **/
-static u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
-{
- struct ixgbe_adapter *adapter = hw->back;
- u16 msix_count;
- pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
- &msix_count);
- msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
- /* MSI-X count is zero-based in HW, so increment to give proper value */
- msix_count++;
-
- return msix_count;
-}
-
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -165,7 +144,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
return 0;
}
@@ -642,6 +621,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
s32 i, j;
bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ struct ixgbe_adapter *adapter = hw->back;
hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
@@ -726,64 +706,14 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
autoneg_wait_to_complete);
out:
+ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+ netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
+ " downgraded the link speed from the maximum"
+ " advertised\n");
return status;
}
/**
- * ixgbe_check_mac_link_82599 - Determine link and speed status
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @link_up: true when link is up
- * @link_up_wait_to_complete: bool used to wait for link up or not
- *
- * Reads the links register to determine if link is up and the current speed
- **/
-static s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up,
- bool link_up_wait_to_complete)
-{
- u32 links_reg;
- u32 i;
-
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
- if (links_reg & IXGBE_LINKS_UP) {
- *link_up = true;
- break;
- } else {
- *link_up = false;
- }
- msleep(100);
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- }
- } else {
- if (links_reg & IXGBE_LINKS_UP)
- *link_up = true;
- else
- *link_up = false;
- }
-
- if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_10G_82599)
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_1G_82599)
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- else
- *speed = IXGBE_LINK_SPEED_100_FULL;
-
- /* if link is down, zero out the current_mode */
- if (*link_up == false) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = false;
- }
-
- return 0;
-}
-
-/**
* ixgbe_setup_mac_link_82599 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -1045,243 +975,6 @@ reset_hw_out:
}
/**
- * ixgbe_clear_vmdq_82599 - Disassociate a VMDq pool index from a rx address
- * @hw: pointer to hardware struct
- * @rar: receive address register index to disassociate
- * @vmdq: VMDq pool index to remove from the rar
- **/
-static s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- u32 mpsar_lo, mpsar_hi;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- if (rar < rar_entries) {
- mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-
- if (!mpsar_lo && !mpsar_hi)
- goto done;
-
- if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
- if (mpsar_lo) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- mpsar_lo = 0;
- }
- if (mpsar_hi) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
- mpsar_hi = 0;
- }
- } else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
- } else {
- mpsar_hi &= ~(1 << (vmdq - 32));
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
- }
-
- /* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
- hw->mac.ops.clear_rar(hw, rar);
- } else {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- }
-
-done:
- return 0;
-}
-
-/**
- * ixgbe_set_vmdq_82599 - Associate a VMDq pool index with a rx address
- * @hw: pointer to hardware struct
- * @rar: receive address register index to associate with a VMDq index
- * @vmdq: VMDq pool index
- **/
-static s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- u32 mpsar;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- if (rar < rar_entries) {
- if (vmdq < 32) {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
- } else {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
- }
- } else {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- }
- return 0;
-}
-
-/**
- * ixgbe_set_vfta_82599 - Set VLAN filter table
- * @hw: pointer to hardware structure
- * @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- *
- * Turn on/off specified VLAN in the VLAN filter table.
- **/
-static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
-{
- u32 regindex;
- u32 vlvf_index;
- u32 bitindex;
- u32 bits;
- u32 first_empty_slot;
- u32 vt_ctl;
-
- if (vlan > 4095)
- return IXGBE_ERR_PARAM;
-
- /*
- * this is a 2 part operation - first the VFTA, then the
- * VLVF and VLVFB if vind is set
- */
-
- /* Part 1
- * The VFTA is a bitstring made up of 128 32-bit registers
- * that enable the particular VLAN id, much like the MTA:
- * bits[11-5]: which register
- * bits[4-0]: which bit in the register
- */
- regindex = (vlan >> 5) & 0x7F;
- bitindex = vlan & 0x1F;
- bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
- if (vlan_on)
- bits |= (1 << bitindex);
- else
- bits &= ~(1 << bitindex);
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
-
-
- /* Part 2
- * If VT mode is set
- * Either vlan_on
- * make sure the vlan is in VLVF
- * set the vind bit in the matching VLVFB
- * Or !vlan_on
- * clear the pool bit and possibly the vind
- */
- vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
- goto out;
-
- /* find the vlanid or the first empty slot */
- first_empty_slot = 0;
-
- for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
- bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
- if (!bits && !first_empty_slot)
- first_empty_slot = vlvf_index;
- else if ((bits & 0x0FFF) == vlan)
- break;
- }
-
- if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- vlvf_index = first_empty_slot;
- else {
- hw_dbg(hw, "No space in VLVF.\n");
- goto out;
- }
- }
-
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2), bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits |= (1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
- }
- } else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits &= ~(1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- }
- }
-
- if (bits) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
- (IXGBE_VLVF_VIEN | vlan));
- /* if bits is non-zero then some pools/VFs are still
- * using this VLAN ID. Force the VFTA entry to on */
- bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
- bits |= (1 << bitindex);
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
- }
- else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
-
-out:
- return 0;
-}
-
-/**
- * ixgbe_clear_vfta_82599 - Clear VLAN filter table
- * @hw: pointer to hardware structure
- *
- * Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-static s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
-{
- u32 offset;
-
- for (offset = 0; offset < hw->mac.vft_size; offset++)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
-
- for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
- * @hw: pointer to hardware structure
- **/
-static s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
-{
- int i;
- hw_dbg(hw, " Clearing UTA\n");
-
- for (i = 0; i < 128; i++)
- IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
-
- return 0;
-}
-
-/**
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
@@ -1303,7 +996,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
hw_dbg(hw ,"Flow Director previous command isn't complete, "
- "aborting table re-initialization. \n");
+ "aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -2462,10 +2155,14 @@ sfp_check:
goto out;
switch (hw->phy.type) {
- case ixgbe_phy_tw_tyco:
- case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
break;
+ case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_sfp_active_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+ break;
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
@@ -2545,75 +2242,6 @@ static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
}
/**
- * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599
- * @hw: pointer to hardware structure
- * @san_mac_offset: SAN MAC address offset
- *
- * This function will read the EEPROM location for the SAN MAC address
- * pointer, and returns the value at that location. This is used in both
- * get and set mac_addr routines.
- **/
-static s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
-{
- /*
- * First read the EEPROM pointer to see if the MAC addresses are
- * available.
- */
- hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
-
- return 0;
-}
-
-/**
- * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599
- * @hw: pointer to hardware structure
- * @san_mac_addr: SAN MAC address
- *
- * Reads the SAN MAC address from the EEPROM, if it's available. This is
- * per-port, so set_lan_id() must be called before reading the addresses.
- * set_lan_id() is called by identify_sfp(), but this cannot be relied
- * upon for non-SFP connections, so we must call it here.
- **/
-static s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
- u16 san_mac_data, san_mac_offset;
- u8 i;
-
- /*
- * First read the EEPROM pointer to see if the MAC addresses are
- * available. If they're not, no point in calling set_lan_id() here.
- */
- ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
-
- if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
- /*
- * No addresses available in this EEPROM. It's not an
- * error though, so just wipe the local address and return.
- */
- for (i = 0; i < 6; i++)
- san_mac_addr[i] = 0xFF;
-
- goto san_mac_addr_out;
- }
-
- /* make sure we know which port we need to program */
- hw->mac.ops.set_lan_id(hw);
- /* apply the port offset to the address offset */
- (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
- for (i = 0; i < 3; i++) {
- hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
- san_mac_addr[i * 2] = (u8)(san_mac_data);
- san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
- san_mac_offset++;
- }
-
-san_mac_addr_out:
- return 0;
-}
-
-/**
* ixgbe_verify_fw_version_82599 - verify fw version for 82599
* @hw: pointer to hardware structure
*
@@ -2715,7 +2343,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
.enable_rx_dma = &ixgbe_enable_rx_dma_82599,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
- .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
.get_device_caps = &ixgbe_get_device_caps_82599,
.get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
.stop_adapter = &ixgbe_stop_adapter_generic,
@@ -2724,7 +2352,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
.setup_link = &ixgbe_setup_mac_link_82599,
- .check_link = &ixgbe_check_mac_link_82599,
+ .check_link = &ixgbe_check_mac_link_generic,
.get_link_capabilities = &ixgbe_get_link_capabilities_82599,
.led_on = &ixgbe_led_on_generic,
.led_off = &ixgbe_led_off_generic,
@@ -2732,23 +2360,23 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.blink_led_stop = &ixgbe_blink_led_stop_generic,
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
- .set_vmdq = &ixgbe_set_vmdq_82599,
- .clear_vmdq = &ixgbe_clear_vmdq_82599,
+ .set_vmdq = &ixgbe_set_vmdq_generic,
+ .clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
.enable_mc = &ixgbe_enable_mc_generic,
.disable_mc = &ixgbe_disable_mc_generic,
- .clear_vfta = &ixgbe_clear_vfta_82599,
- .set_vfta = &ixgbe_set_vfta_82599,
- .fc_enable = &ixgbe_fc_enable_generic,
- .init_uta_tables = &ixgbe_init_uta_tables_82599,
+ .clear_vfta = &ixgbe_clear_vfta_generic,
+ .set_vfta = &ixgbe_set_vfta_generic,
+ .fc_enable = &ixgbe_fc_enable_generic,
+ .init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
};
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.init_params = &ixgbe_init_eeprom_params_generic,
- .read = &ixgbe_read_eeprom_generic,
+ .read = &ixgbe_read_eerd_generic,
.write = &ixgbe_write_eeprom_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
@@ -2757,7 +2385,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
static struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
.identify_sfp = &ixgbe_identify_sfp_module_generic,
- .init = &ixgbe_init_phy_ops_82599,
+ .init = &ixgbe_init_phy_ops_82599,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
.write_reg = &ixgbe_write_phy_reg_generic,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index eb49020..1159d91 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -34,7 +34,6 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
@@ -595,14 +594,14 @@ out:
}
/**
- * ixgbe_read_eeprom_generic - Read EEPROM word using EERD
+ * ixgbe_read_eerd_generic - Read EEPROM word using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
u32 eerd;
s32 status;
@@ -614,15 +613,15 @@ s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
goto out;
}
- eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
- IXGBE_EEPROM_READ_REG_START;
+ eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+ IXGBE_EEPROM_RW_REG_START;
IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
- status = ixgbe_poll_eeprom_eerd_done(hw);
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
if (status == 0)
*data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
- IXGBE_EEPROM_READ_REG_DATA);
+ IXGBE_EEPROM_RW_REG_DATA);
else
hw_dbg(hw, "Eeprom read timed out\n");
@@ -631,20 +630,26 @@ out:
}
/**
- * ixgbe_poll_eeprom_eerd_done - Poll EERD status
+ * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
* @hw: pointer to hardware structure
+ * @ee_reg: EEPROM flag for polling
*
- * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
+ * read or write is done respectively.
**/
-static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw)
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
s32 status = IXGBE_ERR_EEPROM;
- for (i = 0; i < IXGBE_EERD_ATTEMPTS; i++) {
- reg = IXGBE_READ_REG(hw, IXGBE_EERD);
- if (reg & IXGBE_EEPROM_READ_REG_DONE) {
+ for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
+ if (ee_reg == IXGBE_NVM_POLL_READ)
+ reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+ else
+ reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
+
+ if (reg & IXGBE_EEPROM_RW_REG_DONE) {
status = 0;
break;
}
@@ -1392,14 +1397,17 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl |= IXGBE_FCTRL_UPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ hw->addr_ctrl.uc_set_promisc = true;
}
} else {
/* only disable if set by overflow, not by user */
- if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
+ !(hw->addr_ctrl.user_set_promisc)) {
hw_dbg(hw, " Leaving address overflow promisc mode\n");
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl &= ~IXGBE_FCTRL_UPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ hw->addr_ctrl.uc_set_promisc = false;
}
}
@@ -1484,26 +1492,24 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
/**
* ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
* @hw: pointer to hardware structure
- * @mc_addr_list: the list of new multicast addresses
- * @mc_addr_count: number of addresses
- * @next: iterator function to walk the multicast address list
+ * @netdev: pointer to net device structure
*
* The given list replaces any existing list. Clears the MC addrs from receive
* address registers and the multicast table. Uses unused receive address
* registers for the first multicast addresses, and hashes the rest into the
* multicast table.
**/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr next)
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+ struct net_device *netdev)
{
+ struct netdev_hw_addr *ha;
u32 i;
- u32 vmdq;
/*
* Set the new number of MC addresses that we are being requested to
* use.
*/
- hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+ hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
hw->addr_ctrl.mta_in_use = 0;
/* Clear the MTA */
@@ -1512,9 +1518,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
/* Add the new addresses */
- for (i = 0; i < mc_addr_count; i++) {
+ netdev_for_each_mc_addr(ha, netdev) {
hw_dbg(hw, " Adding the multicast addresses:\n");
- ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+ ixgbe_set_mta(hw, ha->addr);
}
/* Enable mta */
@@ -2254,3 +2260,490 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
return 0;
}
+
+/**
+ * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_offset: SAN MAC address offset
+ *
+ * This function will read the EEPROM location for the SAN MAC address
+ * pointer, and returns the value at that location. This is used in both
+ * get and set mac_addr routines.
+ **/
+static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
+{
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available.
+ */
+ hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ * set_lan_id() is called by identify_sfp(), but this cannot be relied
+ * upon for non-SFP connections, so we must call it here.
+ **/
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available. If they're not, no point in calling set_lan_id() here.
+ */
+ ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+
+ if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+ /*
+ * No addresses available in this EEPROM. It's not an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+
+ goto san_mac_addr_out;
+ }
+
+ /* make sure we know which port we need to program */
+ hw->mac.ops.set_lan_id(hw);
+ /* apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ for (i = 0; i < 3; i++) {
+ hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+ san_mac_addr[i * 2] = (u8)(san_mac_data);
+ san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+ san_mac_offset++;
+ }
+
+san_mac_addr_out:
+ return 0;
+}
+
+/**
+ * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
+ * @hw: pointer to hardware structure
+ *
+ * Read PCIe configuration space, and get the MSI-X vector count from
+ * the capabilities table.
+ **/
+u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u16 msix_count;
+ pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
+ &msix_count);
+ msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW, so increment to give proper value */
+ msix_count++;
+
+ return msix_count;
+}
+
+/**
+ * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to disassociate
+ * @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar_lo, mpsar_hi;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ if (rar < rar_entries) {
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
+
+ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+ if (mpsar_lo) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
+ } else {
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
+ }
+
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ hw->mac.ops.clear_rar(hw, rar);
+ } else {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ }
+
+done:
+ return 0;
+}
+
+/**
+ * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ if (rar < rar_entries) {
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
+ }
+ } else {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ }
+ return 0;
+}
+
+/**
+ * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+{
+ int i;
+
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+
+ return 0;
+}
+
+/**
+ * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ *
+ * return the VLVF index where this VLAN id should be placed
+ *
+ **/
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+{
+ u32 bits = 0;
+ u32 first_empty_slot = 0;
+ s32 regindex;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /*
+ * Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way
+ */
+ for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+ if (!bits && !(first_empty_slot))
+ first_empty_slot = regindex;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ /*
+ * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
+ * in the VLVF. Else use the first empty VLVF register for this
+ * vlan id.
+ */
+ if (regindex >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ regindex = first_empty_slot;
+ else {
+ hw_dbg(hw, "No space in VLVF.\n");
+ regindex = IXGBE_ERR_NO_SPACE;
+ }
+ }
+
+ return regindex;
+}
+
+/**
+ * ixgbe_set_vfta_generic - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ s32 regindex;
+ u32 bitindex;
+ u32 vfta;
+ u32 bits;
+ u32 vt;
+ u32 targetbit;
+ bool vfta_changed = false;
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * this is a 2 part operation - first the VFTA, then the
+ * VLVF and VLVFB if VT Mode is set
+ * We don't write the VFTA until we know the VLVF part succeeded.
+ */
+
+ /* Part 1
+ * The VFTA is a bitstring made up of 128 32-bit registers
+ * that enable the particular VLAN id, much like the MTA:
+ * bits[11-5]: which register
+ * bits[4-0]: which bit in the register
+ */
+ regindex = (vlan >> 5) & 0x7F;
+ bitindex = vlan & 0x1F;
+ targetbit = (1 << bitindex);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+
+ if (vlan_on) {
+ if (!(vfta & targetbit)) {
+ vfta |= targetbit;
+ vfta_changed = true;
+ }
+ } else {
+ if ((vfta & targetbit)) {
+ vfta &= ~targetbit;
+ vfta_changed = true;
+ }
+ }
+
+ /* Part 2
+ * If VT Mode is set
+ * Either vlan_on
+ * make sure the vlan is in VLVF
+ * set the vind bit in the matching VLVFB
+ * Or !vlan_on
+ * clear the pool bit and possibly the vind
+ */
+ vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (vt & IXGBE_VT_CTL_VT_ENABLE) {
+ s32 vlvf_index;
+
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
+ if (vlvf_index < 0)
+ return vlvf_index;
+
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2),
+ bits);
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ bits |= (1 << (vind-32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1),
+ bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ bits &= ~(1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ bits &= ~(1 << (vind-32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ }
+ }
+
+ /*
+ * If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ if (!vlan_on) {
+ /* someone wants to clear the vfta entry
+ * but some pools/VFs are still using it.
+ * Ignore it. */
+ vfta_changed = false;
+ }
+ }
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ }
+
+ if (vfta_changed)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+ return 0;
+}
+
+/**
+ * ixgbe_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+{
+ u32 offset;
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_check_mac_link_generic - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ u32 links_reg;
+ u32 i;
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msleep(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_1G_82599)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+
+ /* if link is down, zero out the current_mode */
+ if (*link_up == false) {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw->fc.fc_was_autonegged = false;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+ &alt_san_mac_blk_offset);
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ hw->eeprom.ops.read(hw, offset, &caps);
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 13606d4..3080afb 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -30,6 +30,7 @@
#include "ixgbe_type.h"
+u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -45,20 +46,20 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count,
- ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+ struct net_device *netdev);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
@@ -71,9 +72,16 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
-
-s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index dd4883f..71da325 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -488,7 +488,6 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
if (adapter->temp_dcb_cfg.pfc_mode_enable !=
adapter->dcb_cfg.pfc_mode_enable)
adapter->dcb_set_bitmap |= BIT_PFC;
- return;
}
/**
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 8f461d5..c50a754 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -212,8 +212,8 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
- case ixgbe_phy_tw_tyco:
- case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_intel:
@@ -365,7 +365,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
else
fc.disable_fc_autoneg = false;
- if (pause->rx_pause && pause->tx_pause)
+ if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
fc.requested_mode = ixgbe_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
fc.requested_mode = ixgbe_fc_rx_pause;
@@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_tx_buffer *buf =
&(tx_ring->tx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma, buf->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, buf->dma,
+ buf->length, DMA_TO_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
@@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_rx_buffer *buf =
&(rx_ring->rx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma,
+ dma_unmap_single(&pdev->dev, buf->dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
}
if (tx_ring->desc) {
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
if (rx_ring->desc) {
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -1493,8 +1493,6 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->tx_buffer_info = NULL;
kfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
-
- return;
}
static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1520,8 +1518,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma))) {
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!(tx_ring->desc)) {
ret_val = 2;
goto err_nomem;
}
@@ -1563,8 +1562,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[i].length = skb->len;
tx_ring->tx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
desc->read.buffer_addr =
cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
desc->read.cmd_type_len = cpu_to_le32(skb->len);
@@ -1593,8 +1592,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma))) {
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!(rx_ring->desc)) {
ret_val = 5;
goto err_nomem;
}
@@ -1661,8 +1661,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->rx_buffer_info[i].skb = skb;
rx_ring->rx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ dma_map_single(&pdev->dev, skb->data,
+ IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->read.pkt_addr =
cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
@@ -1775,10 +1775,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
ixgbe_create_lbtest_frame(
tx_ring->tx_buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->tx_buffer_info[k].dma,
tx_ring->tx_buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (unlikely(++k == tx_ring->count))
k = 0;
}
@@ -1789,10 +1789,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
good_cnt = 0;
do {
/* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->rx_buffer_info[l].dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = ixgbe_check_lbtest_frame(
rx_ring->rx_buffer_info[l].skb, 1024);
if (!ret_val)
@@ -1971,8 +1971,6 @@ static void ixgbe_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & IXGBE_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
-
- return;
}
static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2079,12 +2077,32 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
return 0;
}
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
+ struct ethtool_coalesce *ec)
+{
+ /* check the old value and enable RSC if necessary */
+ if ((adapter->rx_itr_setting == 0) &&
+ (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ adapter->netdev->features |= NETIF_F_LRO;
+ DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
+ ec->rx_coalesce_usecs);
+ return true;
+ }
+ return false;
+}
+
static int ixgbe_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
+ bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */
if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
@@ -2095,11 +2113,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
+ u32 max_int;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ max_int = IXGBE_MAX_RSC_INT_RATE;
+ else
+ max_int = IXGBE_MAX_INT_RATE;
+
/* check the limits */
- if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
+ if ((1000000/ec->rx_coalesce_usecs > max_int) ||
(1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
return -EINVAL;
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_reenable_rsc(adapter, ec);
+
/* store the value in ints/second */
adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
@@ -2108,6 +2135,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* clear the lower bit as its used for dynamic state */
adapter->rx_itr_setting &= ~1;
} else if (ec->rx_coalesce_usecs == 1) {
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_reenable_rsc(adapter, ec);
+
/* 1 means dynamic mode */
adapter->rx_eitr_param = 20000;
adapter->rx_itr_setting = 1;
@@ -2116,14 +2146,30 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
- else
- adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
+ adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
adapter->rx_itr_setting = 0;
+
+ /*
+ * if hardware RSC is enabled, disable it when
+ * setting low latency mode, to avoid errata, assuming
+ * that when the user set low latency mode they want
+ * it at the cost of anything else
+ */
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ netdev->features &= ~NETIF_F_LRO;
+ DPRINTK(PROBE, INFO,
+ "rx-usecs set to 0, disabling RSC\n");
+
+ need_reset = true;
+ }
}
if (ec->tx_coalesce_usecs > 1) {
+ /*
+ * don't have to worry about max_int as above because
+ * tx vectors don't do hardware RSC (an rx function)
+ */
/* check the limits */
if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
(1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
@@ -2167,6 +2213,18 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
ixgbe_write_eitr(q_vector);
}
+ /*
+ * do reset here at the end to make sure EITR==0 case is handled
+ * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
+ * also locks in RSC enable/disable which requires reset
+ */
+ if (need_reset) {
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+
return 0;
}
@@ -2178,10 +2236,26 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
ethtool_op_set_flags(netdev, data);
/* if state changes we need to update adapter->flags and reset */
- if ((!!(data & ETH_FLAG_LRO)) !=
- (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
- adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
- need_reset = true;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
+ /*
+ * cast both to bool and verify if they are set the same
+ * but only enable RSC if itr is non-zero, as
+ * itr=0 and RSC are mutually exclusive
+ */
+ if (((!!(data & ETH_FLAG_LRO)) !=
+ (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
+ adapter->rx_itr_setting) {
+ adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ need_reset = true;
+ break;
+ default:
+ break;
+ }
+ } else if (!adapter->rx_itr_setting) {
+ netdev->features &= ~ETH_FLAG_LRO;
+ }
}
/*
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6493049..45182ab 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -32,6 +32,7 @@
#endif /* CONFIG_IXGBE_DCB */
#include <linux/if_ether.h>
#include <linux/gfp.h>
+#include <linux/if_vlan.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/fc/fc_fs.h>
@@ -312,10 +313,12 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (fcerr == IXGBE_FCERR_BADCRC)
skb->ip_summed = CHECKSUM_NONE;
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, skb_network_offset(skb) +
- sizeof(struct fcoe_hdr));
- fh = (struct fc_frame_header *)skb_transport_header(skb);
+ if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
+ fh = (struct fc_frame_header *)(skb->data +
+ sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
+ else
+ fh = (struct fc_frame_header *)(skb->data +
+ sizeof(struct fcoe_hdr));
fctl = ntoh24(fh->fh_f_ctl);
if (fctl & FC_FC_EX_CTX)
xid = be16_to_cpu(fh->fh_ox_id);
@@ -536,12 +539,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
- fcoe_i = f->mask;
- fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
- IXGBE_ETQS_QUEUE_EN |
- (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6c00ee4..9551cbb 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -175,6 +175,345 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
}
+struct ixgbe_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
+
+ /* General Registers */
+ {IXGBE_CTRL, "CTRL"},
+ {IXGBE_STATUS, "STATUS"},
+ {IXGBE_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {IXGBE_EICR, "EICR"},
+
+ /* RX Registers */
+ {IXGBE_SRRCTL(0), "SRRCTL"},
+ {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
+ {IXGBE_RDLEN(0), "RDLEN"},
+ {IXGBE_RDH(0), "RDH"},
+ {IXGBE_RDT(0), "RDT"},
+ {IXGBE_RXDCTL(0), "RXDCTL"},
+ {IXGBE_RDBAL(0), "RDBAL"},
+ {IXGBE_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {IXGBE_TDBAL(0), "TDBAL"},
+ {IXGBE_TDBAH(0), "TDBAH"},
+ {IXGBE_TDLEN(0), "TDLEN"},
+ {IXGBE_TDH(0), "TDH"},
+ {IXGBE_TDT(0), "TDT"},
+ {IXGBE_TXDCTL(0), "TXDCTL"},
+
+ /* List Terminator */
+ {}
+};
+
+
+/*
+ * ixgbe_regdump - register printout routine
+ */
+static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
+{
+ int i = 0, j = 0;
+ char rname[16];
+ u32 regs[64];
+
+ switch (reginfo->ofs) {
+ case IXGBE_SRRCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+ break;
+ case IXGBE_DCA_RXCTRL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ break;
+ case IXGBE_RDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
+ break;
+ case IXGBE_RDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
+ break;
+ case IXGBE_RDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
+ break;
+ case IXGBE_RXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ break;
+ case IXGBE_RDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
+ break;
+ case IXGBE_RDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
+ break;
+ case IXGBE_TDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
+ break;
+ case IXGBE_TDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
+ break;
+ case IXGBE_TDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
+ break;
+ case IXGBE_TDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
+ break;
+ case IXGBE_TDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
+ break;
+ case IXGBE_TXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n", reginfo->name,
+ IXGBE_READ_REG(hw, reginfo->ofs));
+ return;
+ }
+
+ for (i = 0; i < 8; i++) {
+ snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
+ printk(KERN_ERR "%-15s ", rname);
+ for (j = 0; j < 8; j++)
+ printk(KERN_CONT "%08x ", regs[i*8+j]);
+ printk(KERN_CONT "\n");
+ }
+
+}
+
+/*
+ * ixgbe_dump - Print registers, tx-rings and rx-rings
+ */
+static void ixgbe_dump(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_reg_info *reginfo;
+ int n = 0;
+ struct ixgbe_ring *tx_ring;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct ixgbe_ring *rx_ring;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ ixgbe_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
+ "leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ tx_buffer_info =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "T [desc] [address 63:0 ] "
+ "[PlPOIdStDDt Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ " %04X %3X %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp,
+ tx_buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) &&
+ tx_buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(tx_buffer_info->dma),
+ tx_buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "%5d %5X %5X\n", n,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 16 15 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & IXGBE_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ rx_buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)rx_buffer_info->dma,
+ rx_buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(rx_buffer_info->dma),
+ rx_ring->rx_buf_len, true);
+
+ if (rx_ring->rx_buf_len
+ < IXGBE_RXBUFFER_2048)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(
+ rx_buffer_info->page_dma +
+ rx_buffer_info->page_offset
+ ),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
@@ -266,15 +605,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -286,16 +625,16 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_tx_is_paused - check if the tx ring is paused
+ * ixgbe_tx_xon_state - check the tx ring xon state
* @adapter: the ixgbe adapter
* @tx_ring: the corresponding tx_ring
*
* If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
* corresponding TC of this tx_ring when checking TFCS.
*
- * Returns : true if paused
+ * Returns : true if in xon state (currently not paused)
*/
-static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
u32 txoff = IXGBE_TFCS_TXOFF;
@@ -351,7 +690,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
adapter->detect_tx_hung = false;
if (tx_ring->tx_buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
- !ixgbe_tx_is_paused(adapter, tx_ring)) {
+ ixgbe_tx_xon_state(adapter, tx_ring)) {
/* detected Tx unit hang */
union ixgbe_adv_tx_desc *tx_desc;
tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -721,10 +1060,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!bi->skb) {
@@ -743,9 +1082,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- skb->data));
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -821,6 +1160,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
struct ixgbe_rsc_cb {
dma_addr_t dma;
+ bool delay_unmap;
};
#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
@@ -861,9 +1201,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (len > IXGBE_RX_HDR_SIZE)
- len = IXGBE_RX_HDR_SIZE;
upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ if ((len > IXGBE_RX_HDR_SIZE) ||
+ (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
+ len = IXGBE_RX_HDR_SIZE;
} else {
len = le16_to_cpu(rx_desc->wb.upper.length);
}
@@ -876,7 +1217,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (rx_buffer_info->dma) {
if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(!(staterr & IXGBE_RXD_STAT_EOP)) &&
- (!(skb->prev)))
+ (!(skb->prev))) {
/*
* When HWRSC is enabled, delay unmapping
* of the first packet. It carries the
@@ -884,18 +1225,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
* access the header after the writeback.
* Only unmap it when EOP is reached
*/
+ IXGBE_RSC_CB(skb)->delay_unmap = true;
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
- else
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ } else {
+ dma_unmap_single(&pdev->dev,
+ rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
+ }
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
@@ -936,11 +1280,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (skb->prev)
skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- if (IXGBE_RSC_CB(skb)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
+ if (IXGBE_RSC_CB(skb)->delay_unmap) {
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(skb)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
}
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
@@ -1190,6 +1536,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
itr_reg |= (itr_reg << 16);
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
/*
+ * 82599 can support a value of zero, so allow it for
+ * max interrupt rate, but there is an errata where it can
+ * not be zero with RSC
+ */
+ if (itr_reg == 8 &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ itr_reg = 0;
+
+ /*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
*/
@@ -1261,8 +1616,6 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
ixgbe_write_eitr(q_vector);
}
-
- return;
}
static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1826,8 +2179,6 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
ixgbe_write_eitr(q_vector);
}
-
- return;
}
/**
@@ -2372,7 +2723,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
- ixgbe_set_vmolr(hw, adapter->num_vfs);
+ ixgbe_set_vmolr(hw, adapter->num_vfs, true);
}
/* Program MRQC for the distribution of queues */
@@ -2482,12 +2833,82 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
}
+/**
+ * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+#ifdef CONFIG_IXGBE_DCB
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ vlnctrl &= ~IXGBE_VLNCTRL_VME;
+#endif
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+#ifdef CONFIG_IXGBE_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ break;
+#endif
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
static void ixgbe_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 ctrl;
- int i, j;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_disable(adapter);
@@ -2498,25 +2919,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
* still receive traffic from a DCB-enabled host even if we're
* not in DCB mode.
*/
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
-
- /* Disable CFI check */
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-
- /* enable VLAN tag stripping */
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- ctrl |= IXGBE_VLNCTRL_VME;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- u32 ctrl;
- j = adapter->rx_ring[i]->reg_idx;
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
- }
- }
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ ixgbe_vlan_filter_enable(adapter);
ixgbe_vlan_rx_add_vid(netdev, 0);
@@ -2538,21 +2941,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
}
-static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
-{
- struct dev_mc_list *mc_ptr;
- u8 *addr = *mc_addr_ptr;
- *vmdq = 0;
-
- mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
- if (mc_ptr->next)
- *mc_addr_ptr = mc_ptr->next->dmi_addr;
- else
- *mc_addr_ptr = NULL;
-
- return addr;
-}
-
/**
* ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -2566,42 +2954,36 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 fctrl, vlnctrl;
- u8 *addr_list = NULL;
- int addr_count = 0;
+ u32 fctrl;
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
if (netdev->flags & IFF_PROMISC) {
- hw->addr_ctrl.user_set_promisc = 1;
+ hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+ /* don't hardware filter vlans in promisc mode */
+ ixgbe_vlan_filter_disable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
fctrl &= ~IXGBE_FCTRL_UPE;
- } else {
+ } else if (!hw->addr_ctrl.uc_set_promisc) {
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
}
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- hw->addr_ctrl.user_set_promisc = 0;
+ ixgbe_vlan_filter_enable(adapter);
+ hw->addr_ctrl.user_set_promisc = false;
}
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
- addr_count = netdev_mc_count(netdev);
- if (addr_count)
- addr_list = netdev->mc_list->dmi_addr;
- hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
- ixgbe_addr_list_itr);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+
if (adapter->num_vfs)
ixgbe_restore_vf_multicasts(adapter);
}
@@ -2661,7 +3043,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 txdctl, vlnctrl;
+ u32 txdctl;
int i, j;
ixgbe_dcb_check_config(&adapter->dcb_cfg);
@@ -2679,22 +3061,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
}
/* Enable VLAN tag insert/strip */
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- if (hw->mac.type == ixgbe_mac_82598EB) {
- vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i]->reg_idx;
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
- vlnctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
- }
- }
+ ixgbe_vlan_filter_enable(adapter);
+
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
}
@@ -2750,8 +3118,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
- case ixgbe_phy_tw_tyco:
- case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_sfp_ftl_active:
return true;
default:
return false;
@@ -2927,8 +3297,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
+ if (adapter->rx_itr_setting == 0) {
+ /* cannot set wthresh when itr==0 */
+ txdctl &= ~0x007F0000;
+ } else {
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ }
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
}
@@ -3131,9 +3506,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
@@ -3141,11 +3516,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info->skb = NULL;
do {
struct sk_buff *this = skb;
- if (IXGBE_RSC_CB(this)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
+ if (IXGBE_RSC_CB(this)->delay_unmap) {
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(this)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
}
skb = skb->prev;
dev_kfree_skb(this);
@@ -3154,8 +3531,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (!rx_buffer_info->page)
continue;
if (rx_buffer_info->page_dma) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
}
put_page(rx_buffer_info->page);
@@ -3268,22 +3645,23 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- netif_tx_disable(netdev);
-
IXGBE_WRITE_FLUSH(hw);
msleep(10);
netif_tx_stop_all_queues(netdev);
- ixgbe_irq_disable(adapter);
-
- ixgbe_napi_disable_all(adapter);
-
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
del_timer_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->watchdog_task);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ ixgbe_irq_disable(adapter);
+
+ ixgbe_napi_disable_all(adapter);
+
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
cancel_work_sync(&adapter->fdir_reinit_task);
@@ -3301,8 +3679,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
- netif_carrier_off(netdev);
-
/* clear n-tuple filters that are cached */
ethtool_ntuple_flush(netdev);
@@ -3379,6 +3755,8 @@ static void ixgbe_reset_task(struct work_struct *work)
adapter->tx_timeout_count++;
+ ixgbe_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
ixgbe_reinit_locked(adapter);
}
@@ -3479,12 +3857,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
adapter->num_tx_queues = 1;
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
+ DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
ixgbe_set_dcb_queues(adapter);
}
#endif
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
+ DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_set_fdir_queues(adapter);
@@ -4095,7 +4473,6 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
pci_disable_msi(adapter->pdev);
}
- return;
}
/**
@@ -4381,8 +4758,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -4452,7 +4829,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
DPRINTK(PROBE, ERR,
@@ -4513,7 +4891,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -4550,7 +4929,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -5100,7 +5480,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
&(adapter->tx_ring[i]->reinit_state));
} else {
DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
- "ignored adding FDIR ATR filters \n");
+ "ignored adding FDIR ATR filters\n");
}
/* Done FDIR Re-initialization, enable transmits */
netif_tx_start_all_queues(adapter->netdev);
@@ -5420,10 +5800,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(pdev,
+ tx_buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5456,12 +5836,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5697,7 +6077,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
- } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
+ skb->priority != TC_PRIO_CONTROL) {
tx_flags |= ((skb->queue_mapping & 0x7) << 13);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
@@ -5942,6 +6323,10 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
.ndo_do_ioctl = ixgbe_ioctl,
+ .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
@@ -6039,13 +6424,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 1c1efd3..22d21af 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -475,7 +475,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
msleep(edata);
break;
case IXGBE_DATA_NL:
- hw_dbg(hw, "DATA: \n");
+ hw_dbg(hw, "DATA:\n");
data_offset++;
hw->eeprom.ops.read(hw, data_offset++,
&phy_offset);
@@ -491,7 +491,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
break;
case IXGBE_CONTROL_NL:
data_offset++;
- hw_dbg(hw, "CONTROL: \n");
+ hw_dbg(hw, "CONTROL:\n");
if (edata == IXGBE_CONTROL_EOL_NL) {
hw_dbg(hw, "EOL\n");
end_data = true;
@@ -531,6 +531,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
u8 comp_codes_10g = 0;
u8 oui_bytes[3] = {0, 0, 0};
u8 cable_tech = 0;
+ u8 cable_spec = 0;
u16 enforce_sfp = 0;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
@@ -580,14 +581,30 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
} else if (hw->mac.type == ixgbe_mac_82599EB) {
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core1;
- else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+ hw->phy.ops.read_i2c_eeprom(
+ hw, IXGBE_SFF_CABLE_SPEC_COMP,
+ &cable_spec);
+ if (cable_spec &
+ IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_unknown;
+ }
+ } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core0;
@@ -637,10 +654,14 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
switch (vendor_oui) {
case IXGBE_SFF_VENDOR_OUI_TYCO:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type = ixgbe_phy_tw_tyco;
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_tyco;
break;
case IXGBE_SFF_VENDOR_OUI_FTL:
- hw->phy.type = ixgbe_phy_sfp_ftl;
+ if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_ftl_active;
+ else
+ hw->phy.type = ixgbe_phy_sfp_ftl;
break;
case IXGBE_SFF_VENDOR_OUI_AVAGO:
hw->phy.type = ixgbe_phy_sfp_avago;
@@ -650,7 +671,11 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
break;
default:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type = ixgbe_phy_tw_unknown;
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_active_unknown;
else
hw->phy.type = ixgbe_phy_sfp_unknown;
break;
@@ -658,7 +683,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
/* All passive DA cables are supported */
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+ IXGBE_SFF_DA_ACTIVE_CABLE)) {
status = 0;
goto out;
}
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9cf5f3b..c9c5459 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -40,9 +40,12 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index d4cd20f..f6cee94 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -48,7 +48,11 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf)
{
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ struct ixgbe_hw *hw = &adapter->hw;
int i;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -68,8 +72,13 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
vfinfo->vf_mc_hashes[i] = hash_list[i];;
}
- /* Flush and reset the mta with the new values */
- ixgbe_set_rx_mode(adapter->netdev);
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
return 0;
}
@@ -98,38 +107,51 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
{
- u32 ctrl;
-
- /* Check if global VLAN already set, if not set it */
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
- if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
- /* enable VLAN tag insert/strip */
- ctrl |= IXGBE_VLNCTRL_VFE;
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
- }
-
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_AUPE |
- IXGBE_VMOLR_ROMPE |
+ vmolr |= (IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_ROPE |
IXGBE_VMOLR_BAM);
+ if (aupe)
+ vmolr |= IXGBE_VMOLR_AUPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_AUPE;
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (vid)
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
+ (vid | IXGBE_VMVIR_VLANA_DEFAULT));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+}
+
inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
/* reset offloads to defaults */
- ixgbe_set_vmolr(hw, vf);
-
+ if (adapter->vfinfo[vf].pf_vlan) {
+ ixgbe_set_vf_vlan(adapter, true,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter,
+ (adapter->vfinfo[vf].pf_vlan |
+ (adapter->vfinfo[vf].pf_qos <<
+ VLAN_PRIO_SHIFT)), vf);
+ ixgbe_set_vmolr(hw, vf, false);
+ } else {
+ ixgbe_set_vmvir(adapter, 0, vf);
+ ixgbe_set_vmolr(hw, vf, true);
+ }
/* reset multicast table array for vf */
adapter->vfinfo[vf].num_vf_mc_hashes = 0;
@@ -263,10 +285,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_SET_MAC_ADDR:
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
- if (is_valid_ether_addr(new_mac))
+ if (is_valid_ether_addr(new_mac) &&
+ !adapter->vfinfo[vf].pf_set_mac)
ixgbe_set_vf_mac(adapter, vf, new_mac);
else
- retval = -1;
+ ixgbe_set_vf_mac(adapter,
+ vf, adapter->vfinfo[vf].vf_mac_addresses);
}
break;
case IXGBE_VF_SET_MULTICAST:
@@ -360,3 +384,76 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
}
}
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
+ return -EINVAL;
+ adapter->vfinfo[vf].pf_set_mac = true;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return ixgbe_set_vf_mac(adapter, vf, mac);
+}
+
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
+ if (err)
+ goto out;
+ ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ ixgbe_set_vmolr(&adapter->hw, vf, false);
+ adapter->vfinfo[vf].pf_vlan = vlan;
+ adapter->vfinfo[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter, vlan, vf);
+ ixgbe_set_vmolr(&adapter->hw, vf, true);
+ adapter->vfinfo[vf].pf_vlan = 0;
+ adapter->vfinfo[vf].pf_qos = 0;
+ }
+out:
+ return err;
+}
+
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = 0;
+ ivi->vlan = adapter->vfinfo[vf].pf_vlan;
+ ivi->qos = adapter->vfinfo[vf].pf_qos;
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 51d1106..184730e 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -32,7 +32,7 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf);
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
@@ -42,6 +42,12 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
+ u8 qos);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi);
#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 534affc..39b9be8 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -73,6 +73,7 @@
/* NVM Registers */
#define IXGBE_EEC 0x10010
#define IXGBE_EERD 0x10014
+#define IXGBE_EEWR 0x10018
#define IXGBE_FLA 0x1001C
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
@@ -219,6 +220,7 @@
#define IXGBE_MTQC 0x08120
#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VT_CTL 0x051B0
#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
@@ -698,6 +700,7 @@
#define IXGBE_MREVID 0x11064
#define IXGBE_DCA_ID 0x11070
#define IXGBE_DCA_CTRL 0x11074
+#define IXGBE_SWFW_SYNC IXGBE_GSSR
/* PCIe registers 82599-specific */
#define IXGBE_GCR_EXT 0x11050
@@ -1311,6 +1314,10 @@
#define IXGBE_VLVF_ENTRIES 64
#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
/* STATUS Bit Masks */
@@ -1458,8 +1465,9 @@
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
-/* GSSR definitions */
+/* SW_FW_SYNC/GSSR definitions */
#define IXGBE_GSSR_EEP_SM 0x0001
#define IXGBE_GSSR_PHY0_SM 0x0002
#define IXGBE_GSSR_PHY1_SM 0x0004
@@ -1479,6 +1487,8 @@
#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
@@ -1534,10 +1544,12 @@
#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
/* EEPROM Read Register */
-#define IXGBE_EEPROM_READ_REG_DATA 16 /* data offset in EEPROM read reg */
-#define IXGBE_EEPROM_READ_REG_DONE 2 /* Offset to READ done bit */
-#define IXGBE_EEPROM_READ_REG_START 1 /* First bit to start operation */
-#define IXGBE_EEPROM_READ_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
@@ -1545,9 +1557,15 @@
#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
#endif
-#ifndef IXGBE_EERD_ATTEMPTS
-/* Number of 5 microseconds we wait for EERD read to complete */
-#define IXGBE_EERD_ATTEMPTS 100000
+#ifndef IXGBE_EERD_EEWR_ATTEMPTS
+/* Number of 5 microseconds we wait for EERD read and
+ * EERW write to complete */
+#define IXGBE_EERD_EEWR_ATTEMPTS 100000
+#endif
+
+#ifndef IXGBE_FLUDONE_ATTEMPTS
+/* # attempts we wait for flush update to complete */
+#define IXGBE_FLUDONE_ATTEMPTS 20000
#endif
#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
@@ -2090,6 +2108,7 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
@@ -2159,10 +2178,12 @@ enum ixgbe_phy_type {
ixgbe_phy_qt,
ixgbe_phy_xaui,
ixgbe_phy_nl,
- ixgbe_phy_tw_tyco,
- ixgbe_phy_tw_unknown,
+ ixgbe_phy_sfp_passive_tyco,
+ ixgbe_phy_sfp_passive_unknown,
+ ixgbe_phy_sfp_active_unknown,
ixgbe_phy_sfp_avago,
ixgbe_phy_sfp_ftl,
+ ixgbe_phy_sfp_ftl_active,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
ixgbe_phy_sfp_unsupported,
@@ -2190,6 +2211,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_cu_core1 = 4,
ixgbe_sfp_type_srlr_core0 = 5,
ixgbe_sfp_type_srlr_core1 = 6,
+ ixgbe_sfp_type_da_act_lmt_core0 = 7,
+ ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -2263,6 +2286,7 @@ struct ixgbe_addr_filter_info {
u32 mc_addr_in_rar_count;
u32 mta_in_use;
u32 overflow_promisc;
+ bool uc_set_promisc;
bool user_set_promisc;
};
@@ -2419,8 +2443,7 @@ struct ixgbe_mac_operations {
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
@@ -2471,6 +2494,7 @@ struct ixgbe_mac_info {
u32 mcft_size;
u32 vft_size;
u32 num_rar_entries;
+ u32 rar_highwater;
u32 max_tx_queues;
u32 max_rx_queues;
u32 max_msix_vectors;
@@ -2577,8 +2601,10 @@ struct ixgbe_info {
#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
#define IXGBE_ERR_SFP_NOT_PRESENT -20
#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
#define IXGBE_ERR_FDIR_REINIT_FAILED -23
#define IXGBE_ERR_EEPROM_VERSION -24
+#define IXGBE_ERR_NO_SPACE -25
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index c44fdb0..ca2c81f 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -41,11 +41,13 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
-#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
-#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
-#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
-#define IXGBE_LINKS_UP 0x40000000
-#define IXGBE_LINKS_SPEED 0x20000000
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 0cd6202..a16cff7 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
skb = bi->skb;
@@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
bi->skb = skb;
}
if (!bi->dma) {
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
@@ -604,14 +604,13 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* packets not getting split correctly
*/
if (staterr & IXGBE_RXD_STAT_LB) {
- u32 header_fixup_len = skb->len - skb->data_len;
+ u32 header_fixup_len = skb_headlen(skb);
if (header_fixup_len < 14)
skb_push(skb, header_fixup_len);
}
skb->protocol = eth_type_trans(skb, adapter->netdev);
ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
- adapter->netdev->last_rx = jiffies;
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -947,8 +946,6 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
ixgbevf_write_eitr(adapter, v_idx, itr_reg);
}
-
- return;
}
static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
@@ -962,12 +959,28 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+ if (!hw->mbx.ops.check_for_ack(hw)) {
+ /*
+ * checking for the ack clears the PFACK bit. Place
+ * it back in the v2p_mailbox cache so that anyone
+ * polling for an ack will not miss it. Also
+ * avoid the read below because the code to read
+ * the mailbox will also clear the ack bit. This was
+ * causing lost acks. Just cache the bit and exit
+ * the IRQ handler.
+ */
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+ goto out;
+ }
+
+ /* Not an ack interrupt, go ahead and read the message */
hw->mbx.ops.read(hw, &msg, 1);
if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + 1));
+out:
return IRQ_HANDLED;
}
@@ -1496,22 +1509,6 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
}
}
-static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq)
-{
- struct dev_mc_list *mc_ptr;
- u8 *addr = *mc_addr_ptr;
- *vmdq = 0;
-
- mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
- if (mc_ptr->next)
- *mc_addr_ptr = mc_ptr->next->dmi_addr;
- else
- *mc_addr_ptr = NULL;
-
- return addr;
-}
-
/**
* ixgbevf_set_rx_mode - Multicast set
* @netdev: network interface device structure
@@ -1524,16 +1521,10 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u8 *addr_list = NULL;
- int addr_count = 0;
/* reprogram multicast list */
- addr_count = netdev_mc_count(netdev);
- if (addr_count)
- addr_list = netdev->mc_list->dmi_addr;
if (hw->mac.ops.update_mc_addr_list)
- hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
- ixgbevf_addr_list_itr);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1744,9 +1735,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
@@ -1760,8 +1751,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
}
if (!rx_buffer_info->page)
continue;
- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
@@ -2158,8 +2149,6 @@ static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
-
- return;
}
/**
@@ -2418,9 +2407,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
if (link_up) {
if (!netif_carrier_ok(netdev)) {
- hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
- ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- "10 Gbps\n" : "1 Gbps\n"));
+ hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ 10 : 1);
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
} else {
@@ -2468,7 +2457,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -2513,8 +2503,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2584,8 +2574,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
hw_dbg(&adapter->hw,
@@ -2646,7 +2636,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -2958,10 +2949,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(adapter->pdev,
+ tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -2987,13 +2978,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset,
size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -3189,8 +3180,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
skb->len, hdr_len);
- netdev->trans_start = jiffies;
-
ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
@@ -3334,14 +3323,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -3482,7 +3471,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
- hw_dbg(hw, "LRO is disabled \n");
+ hw_dbg(hw, "LRO is disabled\n");
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
cards_found++;
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index 4b5dec0..f6f9299 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -252,22 +252,18 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/**
* ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- * @next: caller supplied function to return next address in list
+ * @netdev: pointer to net device structure
*
* Updates the Multicast Table Array.
**/
-static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count,
- ixgbe_mc_addr_itr next)
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+ struct net_device *netdev)
{
+ struct netdev_hw_addr *ha;
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
- u32 vector;
u32 cnt, i;
- u32 vmdq;
/* Each entry in the list uses 1 16 bit word. We have 30
* 16 bit words available in our HW msg buffer (minus 1 for the
@@ -278,13 +274,17 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
* addresses except for in large enterprise network environments.
*/
- cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ cnt = netdev_mc_count(netdev);
+ if (cnt > 30)
+ cnt = 30;
msgbuf[0] = IXGBE_VF_SET_MULTICAST;
msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
- for (i = 0; i < cnt; i++) {
- vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
- vector_list[i] = vector;
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == cnt)
+ break;
+ vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
@@ -359,7 +359,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
else
*link_up = false;
- if (links_reg & IXGBE_LINKS_SPEED)
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 1f31b05..94b750b 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
+#include <linux/netdevice.h>
#include "defines.h"
#include "regs.h"
@@ -62,8 +63,7 @@ struct ixgbe_mac_operations {
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d5932ca..78ddd8b 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -64,8 +64,6 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
ixp2000_reg_write(RING_TX_PENDING,
TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
- dev->trans_start = jiffies;
-
local_irq_save(flags);
ip->tx_queue_entries++;
if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index b705ad3..99f24f5 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -103,8 +103,6 @@ jme_mdio_write(struct net_device *netdev,
if (i == 0)
jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
-
- return;
}
static inline void
@@ -130,8 +128,6 @@ jme_reset_phy_processor(struct jme_adapter *jme)
jme_mdio_write(jme->dev,
jme->mii_if.phy_id,
MII_BMCR, val | BMCR_RESET);
-
- return;
}
static void
@@ -2010,12 +2006,12 @@ jme_set_multi(struct net_device *netdev)
} else if (netdev->flags & IFF_ALLMULTI) {
jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
} else if (netdev->flags & IFF_MULTICAST) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int bit_nr;
jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
- netdev_for_each_mc_addr(mclist, netdev) {
- bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
+ netdev_for_each_mc_addr(ha, netdev) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
}
@@ -2839,7 +2835,7 @@ jme_init_one(struct pci_dev *pdev,
default:
jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
break;
- };
+ }
/*
* Must check before reset_mac_processor
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 300c224..26bf1b7 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
unsigned long flags;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
int i;
@@ -502,8 +502,8 @@ static void korina_multicast_list(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -1135,7 +1135,7 @@ static int korina_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
dev->base_addr = r->start;
- lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->eth_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
rc = -ENXIO;
@@ -1143,7 +1143,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
- lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->rx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
rc = -ENXIO;
@@ -1151,7 +1151,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
- lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->tx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
rc = -ENXIO;
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 5c45cb5..f852ab3 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -1,5 +1,5 @@
/*
- * ks8842_main.c timberdale KS8842 ethernet driver
+ * ks8842.c timberdale KS8842 ethernet driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
@@ -20,12 +20,15 @@
* The Micrel KS8842 behind the timberdale FPGA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/ks8842.h>
#define DRV_NAME "ks8842"
@@ -302,6 +305,20 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
ks8842_write16(adapter, 39, mac, REG_MACAR3);
}
+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
+{
+ unsigned long flags;
+ unsigned i;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ for (i = 0; i < ETH_ALEN; i++) {
+ ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
+ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
+ REG_MACAR1 + i);
+ }
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
{
return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
@@ -520,13 +537,14 @@ static int ks8842_open(struct net_device *netdev)
/* reset the HW */
ks8842_reset_hw(adapter);
+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
+
ks8842_update_link_status(netdev, adapter);
err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
adapter);
if (err) {
- printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
- adapter->irq, err);
+ pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
return err;
}
@@ -567,10 +585,8 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
static int ks8842_set_mac(struct net_device *netdev, void *p)
{
struct ks8842_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
struct sockaddr *addr = p;
char *mac = (u8 *)addr->sa_data;
- int i;
dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
@@ -579,13 +595,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, mac, netdev->addr_len);
- spin_lock_irqsave(&adapter->lock, flags);
- for (i = 0; i < ETH_ALEN; i++) {
- ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
- ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
- REG_MACAR1 + i);
- }
- spin_unlock_irqrestore(&adapter->lock, flags);
+ ks8842_write_mac_addr(adapter, mac);
return 0;
}
@@ -604,6 +614,8 @@ static void ks8842_tx_timeout(struct net_device *netdev)
ks8842_reset_hw(adapter);
+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
+
ks8842_update_link_status(netdev, adapter);
}
@@ -626,7 +638,9 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
struct resource *iomem;
struct net_device *netdev;
struct ks8842_adapter *adapter;
+ struct ks8842_platform_data *pdata = pdev->dev.platform_data;
u16 id;
+ unsigned i;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
@@ -657,7 +671,25 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
netdev->netdev_ops = &ks8842_netdev_ops;
netdev->ethtool_ops = &ks8842_ethtool_ops;
- ks8842_read_mac_addr(adapter, netdev->dev_addr);
+ /* Check if a mac address was given */
+ i = netdev->addr_len;
+ if (pdata) {
+ for (i = 0; i < netdev->addr_len; i++)
+ if (pdata->macaddr[i] != 0)
+ break;
+
+ if (i < netdev->addr_len)
+ /* an address was passed, use it */
+ memcpy(netdev->dev_addr, pdata->macaddr,
+ netdev->addr_len);
+ }
+
+ if (i == netdev->addr_len) {
+ ks8842_read_mac_addr(adapter, netdev->dev_addr);
+
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
+ }
id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
@@ -668,8 +700,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
- printk(KERN_INFO DRV_NAME
- " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 9e9f9b3..b4fb07a 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DEBUG
#include <linux/module.h>
@@ -76,7 +78,9 @@ union ks8851_tx_hdr {
* @msg_enable: The message flags controlling driver output (see ethtool).
* @fid: Incrementing frame id tag.
* @rc_ier: Cached copy of KS_IER.
+ * @rc_ccr: Cached copy of KS_CCR.
* @rc_rxqcr: Cached copy of KS_RXQCR.
+ * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -107,6 +111,8 @@ struct ks8851_net {
u16 rc_ier;
u16 rc_rxqcr;
+ u16 rc_ccr;
+ u16 eeprom_size;
struct mii_if_info mii;
struct ks8851_rxctrl rxctrl;
@@ -125,11 +131,6 @@ struct ks8851_net {
static int msg_enable;
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg)
-
/* shift for byte-enable data */
#define BYTE_EN(_x) ((_x) << 2)
@@ -167,7 +168,7 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "spi_sync() failed\n");
+ netdev_err(ks->netdev, "spi_sync() failed\n");
}
/**
@@ -197,7 +198,7 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "spi_sync() failed\n");
+ netdev_err(ks->netdev, "spi_sync() failed\n");
}
/**
@@ -263,7 +264,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "read: spi_sync() failed\n");
+ netdev_err(ks->netdev, "read: spi_sync() failed\n");
else if (ks8851_rx_1msg(ks))
memcpy(rxb, trx + 2, rxl);
else
@@ -417,8 +418,8 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
u8 txb[1];
int ret;
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d@%p\n", __func__, len, buff);
/* set the operation we're issuing */
txb[0] = KS_SPIOP_RXFIFO;
@@ -434,7 +435,7 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "%s: spi_sync() failed\n", __func__);
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
}
/**
@@ -446,10 +447,11 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
*/
static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
{
- ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
- rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
- rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
- rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
+ netdev_dbg(ks->netdev,
+ "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
+ rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
+ rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
}
/**
@@ -471,8 +473,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
rxfc = ks8851_rdreg8(ks, KS_RXFC);
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "%s: %d packets\n", __func__, rxfc);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d packets\n", __func__, rxfc);
/* Currently we're issuing a read per packet, but we could possibly
* improve the code by issuing a single read, getting the receive
@@ -489,9 +491,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
rxstat = rxh & 0xffff;
rxlen = rxh >> 16;
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n",
- rxstat, rxlen);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
/* the length of the packet includes the 32bit CRC */
@@ -553,9 +554,8 @@ static void ks8851_irq_work(struct work_struct *work)
status = ks8851_rdreg16(ks, KS_ISR);
- if (netif_msg_intr(ks))
- dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n",
- __func__, status);
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: status 0x%04x\n", __func__, status);
if (status & IRQ_LCI) {
/* should do something about checking link status */
@@ -582,8 +582,8 @@ static void ks8851_irq_work(struct work_struct *work)
* system */
ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
- if (netif_msg_intr(ks))
- ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space);
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: txspace %d\n", __func__, ks->tx_space);
}
if (status & IRQ_RXI)
@@ -659,9 +659,8 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
unsigned fid = 0;
int ret;
- if (netif_msg_tx_queued(ks))
- dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n",
- __func__, txp, txp->len, txp->data, irq);
+ netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
+ __func__, txp, txp->len, txp->data, irq);
fid = ks->fid++;
fid &= TXFR_TXFID_MASK;
@@ -685,7 +684,7 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "%s: spi_sync() failed\n", __func__);
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
}
/**
@@ -746,8 +745,7 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
{
unsigned pmecr;
- if (netif_msg_hw(ks))
- ks_dbg(ks, "setting power mode %d\n", pwrmode);
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
pmecr = ks8851_rdreg16(ks, KS_PMECR);
pmecr &= ~PMECR_PM_MASK;
@@ -771,8 +769,7 @@ static int ks8851_net_open(struct net_device *dev)
* else at the moment */
mutex_lock(&ks->lock);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "opening %s\n", dev->name);
+ netif_dbg(ks, ifup, ks->netdev, "opening\n");
/* bring chip out of any power saving mode it was in */
ks8851_set_powermode(ks, PMECR_PM_NORMAL);
@@ -828,8 +825,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_start_queue(ks->netdev);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "network device %s up\n", dev->name);
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
mutex_unlock(&ks->lock);
return 0;
@@ -847,8 +843,7 @@ static int ks8851_net_stop(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- if (netif_msg_ifdown(ks))
- ks_info(ks, "%s: shutting down\n", dev->name);
+ netif_info(ks, ifdown, dev, "shutting down\n");
netif_stop_queue(dev);
@@ -876,8 +871,8 @@ static int ks8851_net_stop(struct net_device *dev)
while (!skb_queue_empty(&ks->txq)) {
struct sk_buff *txb = skb_dequeue(&ks->txq);
- if (netif_msg_ifdown(ks))
- ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb);
+ netif_dbg(ks, ifdown, ks->netdev,
+ "%s: freeing txb %p\n", __func__, txb);
dev_kfree_skb(txb);
}
@@ -906,9 +901,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
unsigned needed = calc_txlen(skb->len);
netdev_tx_t ret = NETDEV_TX_OK;
- if (netif_msg_tx_queued(ks))
- ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__,
- skb, skb->len, skb->data);
+ netif_dbg(ks, tx_queued, ks->netdev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
spin_lock(&ks->statelock);
@@ -968,13 +962,13 @@ static void ks8851_set_rx_mode(struct net_device *dev)
rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
RXCR1_RXPAFMA | RXCR1_RXMAFMA);
} else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u32 crc;
/* accept some multicast */
- netdev_for_each_mc_addr(mcptr, dev) {
- crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
crc >>= (32 - 6); /* get top six bits */
rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
@@ -1040,6 +1034,234 @@ static const struct net_device_ops ks8851_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+/* Companion eeprom access */
+
+enum { /* EEPROM programming states */
+ EEPROM_CONTROL,
+ EEPROM_ADDRESS,
+ EEPROM_DATA,
+ EEPROM_COMPLETE
+};
+
+/**
+ * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
+ * @dev: The network device the PHY is on.
+ * @addr: EEPROM address to read
+ *
+ * eeprom_size: used to define the data coding length. Can be changed
+ * through debug-fs.
+ *
+ * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
+ * Warning: The READ feature is not supported on ks8851 revision 0.
+ *
+ * Rough programming model:
+ * - on period start: set clock high and read value on bus
+ * - on period / 2: set clock low and program value on bus
+ * - start on period / 2
+ */
+unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int eepcr;
+ int ctrl = EEPROM_OP_READ;
+ int state = EEPROM_CONTROL;
+ int bit_count = EEPROM_OP_LEN - 1;
+ unsigned int data = 0;
+ int dummy;
+ unsigned int addr_len;
+
+ addr_len = (ks->eeprom_size == 128) ? 6 : 8;
+
+ /* start transaction: chip select high, authorize write */
+ mutex_lock(&ks->lock);
+ eepcr = EEPCR_EESA | EEPCR_EESRWA;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr |= EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ while (state != EEPROM_COMPLETE) {
+ /* falling clock period starts... */
+ /* set EED_IO pin for control and address */
+ eepcr &= ~EEPCR_EEDO;
+ switch (state) {
+ case EEPROM_CONTROL:
+ eepcr |= ((ctrl >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ bit_count = addr_len - 1;
+ state = EEPROM_ADDRESS;
+ }
+ break;
+ case EEPROM_ADDRESS:
+ eepcr |= ((addr >> bit_count) & 1) << 2;
+ bit_count--;
+ break;
+ case EEPROM_DATA:
+ /* Change to receive mode */
+ eepcr &= ~EEPCR_EESRWA;
+ break;
+ }
+
+ /* lower clock */
+ eepcr &= ~EEPCR_EESCK;
+
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* waitread period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+
+ /* rising clock period starts... */
+
+ /* raise clock */
+ mutex_lock(&ks->lock);
+ eepcr |= EEPCR_EESCK;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* Manage read */
+ switch (state) {
+ case EEPROM_ADDRESS:
+ if (bit_count < 0) {
+ bit_count = EEPROM_DATA_LEN - 1;
+ state = EEPROM_DATA;
+ }
+ break;
+ case EEPROM_DATA:
+ mutex_lock(&ks->lock);
+ dummy = ks8851_rdreg16(ks, KS_EEPCR);
+ mutex_unlock(&ks->lock);
+ data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
+ if (bit_count-- <= 0)
+ state = EEPROM_COMPLETE;
+ break;
+ }
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+ }
+
+ /* close transaction */
+ mutex_lock(&ks->lock);
+ eepcr &= ~EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr = 0;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ return data;
+}
+
+/**
+ * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
+ * @dev: The network device the PHY is on.
+ * @op: operand (can be WRITE, EWEN, EWDS)
+ * @addr: EEPROM address to write
+ * @data: data to write
+ *
+ * eeprom_size: used to define the data coding length. Can be changed
+ * through debug-fs.
+ *
+ * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
+ *
+ * Note that a write enable is required before writing data.
+ *
+ * Rough programming model:
+ * - on period start: set clock high
+ * - on period / 2: set clock low and program value on bus
+ * - start on period / 2
+ */
+void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
+ unsigned int addr, unsigned int data)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int eepcr;
+ int state = EEPROM_CONTROL;
+ int bit_count = EEPROM_OP_LEN - 1;
+ unsigned int addr_len;
+
+ addr_len = (ks->eeprom_size == 128) ? 6 : 8;
+
+ switch (op) {
+ case EEPROM_OP_EWEN:
+ addr = 0x30;
+ break;
+ case EEPROM_OP_EWDS:
+ addr = 0;
+ break;
+ }
+
+ /* start transaction: chip select high, authorize write */
+ mutex_lock(&ks->lock);
+ eepcr = EEPCR_EESA | EEPCR_EESRWA;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr |= EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ while (state != EEPROM_COMPLETE) {
+ /* falling clock period starts... */
+ /* set EED_IO pin for control and address */
+ eepcr &= ~EEPCR_EEDO;
+ switch (state) {
+ case EEPROM_CONTROL:
+ eepcr |= ((op >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ bit_count = addr_len - 1;
+ state = EEPROM_ADDRESS;
+ }
+ break;
+ case EEPROM_ADDRESS:
+ eepcr |= ((addr >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ if (op == EEPROM_OP_WRITE) {
+ bit_count = EEPROM_DATA_LEN - 1;
+ state = EEPROM_DATA;
+ } else {
+ state = EEPROM_COMPLETE;
+ }
+ }
+ break;
+ case EEPROM_DATA:
+ eepcr |= ((data >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0)
+ state = EEPROM_COMPLETE;
+ break;
+ }
+
+ /* lower clock */
+ eepcr &= ~EEPCR_EESCK;
+
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+
+ /* rising clock period starts... */
+
+ /* raise clock */
+ eepcr |= EEPCR_EESCK;
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+ }
+
+ /* close transaction */
+ mutex_lock(&ks->lock);
+ eepcr &= ~EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr = 0;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+}
+
/* ethtool support */
static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1086,6 +1308,117 @@ static int ks8851_nway_reset(struct net_device *dev)
return mii_nway_restart(&ks->mii);
}
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ return ks->eeprom_size;
+}
+
+static int ks8851_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ u16 *eeprom_buff;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->len > ks->eeprom_size)
+ return -EINVAL;
+
+ eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+
+ /* Device's eeprom is little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int ks8851_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->len > ks->eeprom_size)
+ return -EINVAL;
+
+ if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
+ return -EFAULT;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ max_len = (last_word - first_word + 1) * 2;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
+ ptr++;
+ }
+ if ((eeprom->offset + eeprom->len) & 1)
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ eeprom_buff[last_word - first_word] =
+ ks8851_eeprom_read(dev, last_word);
+
+
+ /* Device's eeprom is little-endian, word addressable */
+ le16_to_cpus(&eeprom_buff[0]);
+ le16_to_cpus(&eeprom_buff[last_word - first_word]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
+ eeprom_buff[i]);
+ mdelay(EEPROM_WRITE_TIME);
+ }
+
+ ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
static const struct ethtool_ops ks8851_ethtool_ops = {
.get_drvinfo = ks8851_get_drvinfo,
.get_msglevel = ks8851_get_msglevel,
@@ -1094,6 +1427,9 @@ static const struct ethtool_ops ks8851_ethtool_ops = {
.set_settings = ks8851_set_settings,
.get_link = ks8851_get_link,
.nway_reset = ks8851_nway_reset,
+ .get_eeprom_len = ks8851_get_eeprom_len,
+ .get_eeprom = ks8851_get_eeprom,
+ .set_eeprom = ks8851_set_eeprom,
};
/* MII interface controls */
@@ -1187,17 +1523,17 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
rd = ks8851_rdreg16(ks, KS_MBIR);
if ((rd & both_done) != both_done) {
- ks_warn(ks, "Memory selftest not finished\n");
+ netdev_warn(ks->netdev, "Memory selftest not finished\n");
return 0;
}
if (rd & MBIR_TXMBFA) {
- ks_err(ks, "TX memory selftest fail\n");
+ netdev_err(ks->netdev, "TX memory selftest fail\n");
ret |= 1;
}
if (rd & MBIR_RXMBFA) {
- ks_err(ks, "RX memory selftest fail\n");
+ netdev_err(ks->netdev, "RX memory selftest fail\n");
ret |= 2;
}
@@ -1279,6 +1615,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_id;
}
+ /* cache the contents of the CCR register for EEPROM, etc. */
+ ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
+
+ if (ks->rc_ccr & CCR_EEPROM)
+ ks->eeprom_size = 128;
+ else
+ ks->eeprom_size = 0;
+
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
@@ -1295,9 +1639,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_netdev;
}
- dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n",
- CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
- ndev->dev_addr, ndev->irq);
+ netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+ CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
+ ndev->dev_addr, ndev->irq);
return 0;
@@ -1316,7 +1660,7 @@ static int __devexit ks8851_remove(struct spi_device *spi)
struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
if (netif_msg_drv(priv))
- dev_info(&spi->dev, "remove");
+ dev_info(&spi->dev, "remove\n");
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index f52c312..537fb06e 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -25,12 +25,24 @@
#define OBCR_ODS_16mA (1 << 6)
#define KS_EEPCR 0x22
+#define EEPCR_EESRWA (1 << 5)
#define EEPCR_EESA (1 << 4)
-#define EEPCR_EESB (1 << 3)
+#define EEPCR_EESB_OFFSET 3
+#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
#define EEPCR_EEDO (1 << 2)
#define EEPCR_EESCK (1 << 1)
#define EEPCR_EECS (1 << 0)
+#define EEPROM_OP_LEN 3 /* bits:*/
+#define EEPROM_OP_READ 0x06
+#define EEPROM_OP_EWEN 0x04
+#define EEPROM_OP_WRITE 0x05
+#define EEPROM_OP_EWDS 0x14
+
+#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
+#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
+#define EEPROM_SK_PERIOD 400 /* in us */
+
#define KS_MBIR 0x24
#define MBIR_TXMBF (1 << 12)
#define MBIR_TXMBFA (1 << 11)
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 6354ab3..2e2c69b 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -21,6 +21,8 @@
* KS8851 16bit MLL chip from Micrel Inc.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -361,7 +363,6 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
#define MAX_MCAST_LST 32
#define HW_MCAST_SIZE 8
-#define MAC_ADDR_LEN 6
/**
* union ks_tx_hdr - tx header data
@@ -449,7 +450,7 @@ struct ks_net {
u16 promiscuous;
u16 all_mcast;
u16 mcast_lst_size;
- u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
+ u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
u8 mcast_bits[HW_MCAST_SIZE];
u8 mac_addr[6];
u8 fid;
@@ -459,11 +460,6 @@ struct ks_net {
static int msg_enable;
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
-
#define BE3 0x8000 /* Byte Enable 3 */
#define BE2 0x4000 /* Byte Enable 2 */
#define BE1 0x2000 /* Byte Enable 1 */
@@ -625,8 +621,7 @@ static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
{
unsigned pmecr;
- if (netif_msg_hw(ks))
- ks_dbg(ks, "setting power mode %d\n", pwrmode);
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
ks_rdreg16(ks, KS_GRR);
pmecr = ks_rdreg16(ks, KS_PMECR);
@@ -806,11 +801,10 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
/* read data block including CRC 4 bytes */
ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
skb_put(skb, frame_hdr->len);
- skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev);
netif_rx(skb);
} else {
- printk(KERN_ERR "%s: err:skb alloc\n", __func__);
+ pr_err("%s: err:skb alloc\n", __func__);
ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
if (skb)
dev_kfree_skb_irq(skb);
@@ -837,9 +831,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
netif_carrier_off(netdev);
link_up_status = false;
}
- if (netif_msg_link(ks))
- ks_dbg(ks, "%s: %s\n",
- __func__, link_up_status ? "UP" : "DOWN");
+ netif_dbg(ks, link, ks->netdev,
+ "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
}
/**
@@ -909,15 +902,13 @@ static int ks_net_open(struct net_device *netdev)
* else at the moment.
*/
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "%s - entry\n", __func__);
+ netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
/* reset the HW */
err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
if (err) {
- printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
- ks->irq, err);
+ pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err);
return err;
}
@@ -930,8 +921,7 @@ static int ks_net_open(struct net_device *netdev)
ks_enable_qmu(ks);
netif_start_queue(ks->netdev);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "network device %s up\n", netdev->name);
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
return 0;
}
@@ -948,8 +938,7 @@ static int ks_net_stop(struct net_device *netdev)
{
struct ks_net *ks = netdev_priv(netdev);
- if (netif_msg_ifdown(ks))
- ks_info(ks, "%s: shutting down\n", netdev->name);
+ netif_info(ks, ifdown, netdev, "shutting down\n");
netif_stop_queue(netdev);
@@ -1181,7 +1170,7 @@ static void ks_set_mcast(struct ks_net *ks, u16 mcast)
static void ks_set_rx_mode(struct net_device *netdev)
{
struct ks_net *ks = netdev_priv(netdev);
- struct dev_mc_list *ptr;
+ struct netdev_hw_addr *ha;
/* Turn on/off promiscuous mode. */
if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
@@ -1198,13 +1187,12 @@ static void ks_set_rx_mode(struct net_device *netdev)
if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
int i = 0;
- netdev_for_each_mc_addr(ptr, netdev) {
- if (!(*ptr->dmi_addr & 1))
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (!(*ha->addr & 1))
continue;
if (i >= MAX_MCAST_LST)
break;
- memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
- MAC_ADDR_LEN);
+ memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
}
ks->mcast_lst_size = (u8)i;
ks_set_grpaddr(ks);
@@ -1430,21 +1418,21 @@ static int ks_read_selftest(struct ks_net *ks)
rd = ks_rdreg16(ks, KS_MBIR);
if ((rd & both_done) != both_done) {
- ks_warn(ks, "Memory selftest not finished\n");
+ netdev_warn(ks->netdev, "Memory selftest not finished\n");
return 0;
}
if (rd & MBIR_TXMBFA) {
- ks_err(ks, "TX memory selftest fails\n");
+ netdev_err(ks->netdev, "TX memory selftest fails\n");
ret |= 1;
}
if (rd & MBIR_RXMBFA) {
- ks_err(ks, "RX memory selftest fails\n");
+ netdev_err(ks->netdev, "RX memory selftest fails\n");
ret |= 2;
}
- ks_info(ks, "the selftest passes\n");
+ netdev_info(ks->netdev, "the selftest passes\n");
return ret;
}
@@ -1515,7 +1503,7 @@ static int ks_hw_init(struct ks_net *ks)
ks->frame_head_info = (struct type_frame_head *) \
kmalloc(MHEADER_SIZE, GFP_KERNEL);
if (!ks->frame_head_info) {
- printk(KERN_ERR "Error: Fail to allocate frame memory\n");
+ pr_err("Error: Fail to allocate frame memory\n");
return false;
}
@@ -1581,7 +1569,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
ks->mii.mdio_read = ks_phy_read;
ks->mii.mdio_write = ks_phy_write;
- ks_info(ks, "message enable is %d\n", msg_enable);
+ netdev_info(netdev, "message enable is %d\n", msg_enable);
/* set the default message enable */
ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
NETIF_MSG_PROBE |
@@ -1590,13 +1578,13 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
/* simple check for a valid chip being connected to the bus */
if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
- ks_err(ks, "failed to read device ID\n");
+ netdev_err(netdev, "failed to read device ID\n");
err = -ENODEV;
goto err_register;
}
if (ks_read_selftest(ks)) {
- ks_err(ks, "failed to read device ID\n");
+ netdev_err(netdev, "failed to read device ID\n");
err = -ENODEV;
goto err_register;
}
@@ -1627,9 +1615,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
id = ks_rdreg16(ks, KS_CIDER);
- printk(KERN_INFO DRV_NAME
- " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
- (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
+ netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
err_register:
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 0606a1f..c80ca64 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -14,10 +14,11 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
@@ -1484,11 +1485,6 @@ struct dev_priv {
int promiscuous;
};
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
-
#define DRV_NAME "KSZ884X PCI"
#define DEVICE_NAME "KSZ884x PCI"
#define DRV_VERSION "1.0.0"
@@ -3835,7 +3831,7 @@ static void ksz_check_desc_num(struct ksz_desc_info *info)
alloc >>= 1;
}
if (alloc != 1 || shift < MIN_DESC_SHIFT) {
- printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
+ pr_alert("Hardware descriptor numbers not right!\n");
while (alloc) {
shift++;
alloc >>= 1;
@@ -4546,8 +4542,7 @@ static int ksz_alloc_mem(struct dev_info *adapter)
(((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
DESC_ALIGNMENT) * DESC_ALIGNMENT);
if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
- printk(KERN_ALERT
- "Hardware descriptor size not right!\n");
+ pr_alert("Hardware descriptor size not right!\n");
ksz_check_desc_num(&hw->rx_desc_info);
ksz_check_desc_num(&hw->tx_desc_info);
@@ -4689,7 +4684,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
int frag;
skb_frag_t *this_frag;
- dma_buf->len = skb->len - skb->data_len;
+ dma_buf->len = skb_headlen(skb);
dma_buf->dma = pci_map_single(
hw_priv->pdev, skb->data, dma_buf->len,
@@ -5049,8 +5044,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
dma_buf->skb->data, packet_len);
} while (0);
- skb->dev = dev;
-
skb->protocol = eth_type_trans(skb, dev);
if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
@@ -5061,8 +5054,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
priv->stats.rx_bytes += packet_len;
/* Notify upper layer for received packet. */
- dev->last_rx = jiffies;
-
rx_status = netif_rx(skb);
return 0;
@@ -5320,10 +5311,10 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
u32 data;
hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
- printk(KERN_INFO "Tx stopped\n");
+ pr_info("Tx stopped\n");
data = readl(hw->io + KS_DMA_TX_CTRL);
if (!(data & DMA_TX_ENABLE))
- printk(KERN_INFO "Tx disabled\n");
+ pr_info("Tx disabled\n");
break;
}
} while (0);
@@ -5496,6 +5487,18 @@ static int prepare_hardware(struct net_device *dev)
return 0;
}
+static void set_media_state(struct net_device *dev, int media_state)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ if (media_state == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ netif_info(priv, link, dev, "link %s\n",
+ media_state == priv->media_state ? "on" : "off");
+}
+
/**
* netdev_open - open network device
* @dev: Network device.
@@ -5585,15 +5588,7 @@ static int netdev_open(struct net_device *dev)
priv->media_state = port->linked->state;
- if (media_connected == priv->media_state)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s link %s\n", dev->name,
- (media_connected == priv->media_state ?
- "on" : "off"));
-
+ set_media_state(dev, media_connected);
netif_start_queue(dev);
return 0;
@@ -5767,7 +5762,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
int multicast = (dev->flags & IFF_ALLMULTI);
dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
@@ -5784,7 +5779,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
int i = 0;
/* List too big to support so turn on all multicast mode. */
- if (dev->mc_count > MAX_MULTICAST_LIST) {
+ if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
if (MAX_MULTICAST_LIST != hw->multi_list_size) {
hw->multi_list_size = MAX_MULTICAST_LIST;
++hw->all_multi;
@@ -5793,13 +5788,12 @@ static void netdev_set_rx_mode(struct net_device *dev)
return;
}
- netdev_for_each_mc_addr(mc_ptr, dev) {
- if (!(*mc_ptr->dmi_addr & 1))
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!(*ha->addr & 1))
continue;
if (i >= MAX_MULTICAST_LIST)
break;
- memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
- MAC_ADDR_LEN);
+ memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
}
hw->multi_list_size = (u8) i;
hw_set_grp_addr(hw);
@@ -6683,16 +6677,8 @@ static void update_link(struct net_device *dev, struct dev_priv *priv,
{
if (priv->media_state != port->linked->state) {
priv->media_state = port->linked->state;
- if (netif_running(dev)) {
- if (media_connected == priv->media_state)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s link %s\n", dev->name,
- (media_connected == priv->media_state ?
- "on" : "off"));
- }
+ if (netif_running(dev))
+ set_media_state(dev, media_connected);
}
}
@@ -6986,7 +6972,7 @@ static int __init pcidev_init(struct pci_dev *pdev,
int pi;
int port_count;
int result;
- char banner[80];
+ char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
result = pci_enable_device(pdev);
@@ -7010,10 +6996,9 @@ static int __init pcidev_init(struct pci_dev *pdev,
result = -ENOMEM;
- info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
if (!info)
goto pcidev_init_dev_err;
- memset(info, 0, sizeof(struct platform_info));
hw_priv = &info->dev_info;
hw_priv->pdev = pdev;
@@ -7027,15 +7012,15 @@ static int __init pcidev_init(struct pci_dev *pdev,
cnt = hw_init(hw);
if (!cnt) {
if (msg_enable & NETIF_MSG_PROBE)
- printk(KERN_ALERT "chip not detected\n");
+ pr_alert("chip not detected\n");
result = -ENODEV;
goto pcidev_init_alloc_err;
}
- sprintf(banner, "%s\n", version);
- banner[13] = cnt + '0';
- ks_info(hw_priv, "%s", banner);
- ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
+ snprintf(banner, sizeof(banner), "%s", version);
+ banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
+ dev_info(&hw_priv->pdev->dev, "%s\n", banner);
+ dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
/* Assume device is KSZ8841. */
hw->dev_count = 1;
@@ -7064,10 +7049,9 @@ static int __init pcidev_init(struct pci_dev *pdev,
mib_port_count = SWITCH_PORT_NUM;
}
hw->mib_port_cnt = TOTAL_PORT_NUM;
- hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL);
+ hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
if (!hw->ksz_switch)
goto pcidev_init_alloc_err;
- memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
sw = hw->ksz_switch;
}
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 7b94476..21f8ada 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -945,7 +945,7 @@ static void lance_tx_timeout (struct net_device *dev)
#endif
lance_restart (dev, 0x0043, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -1011,8 +1011,6 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
outw(0x0000, ioaddr+LANCE_ADDR);
outw(0x0048, ioaddr+LANCE_DATA);
- dev->trans_start = jiffies;
-
if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
netif_stop_queue(dev);
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 973390b..ce5d6e9 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -963,7 +963,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -974,7 +974,6 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_cmd *tx_cmd;
struct i596_tbd *tbd;
short length = skb->len;
- dev->trans_start = jiffies;
DEB(DEB_STARTTX, printk(KERN_DEBUG
"%s: i596_start_xmit(%x,%p) called\n",
@@ -1092,7 +1091,7 @@ static int __devinit i82596_probe(struct net_device *dev)
DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
(void *)dma, lp->dma_addr);
return i;
- };
+ }
DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
dev->name, dev->base_addr, dev->dev_addr,
@@ -1388,7 +1387,7 @@ static void set_multicast_list(struct net_device *dev)
}
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1396,10 +1395,10 @@ static void set_multicast_list(struct net_device *dev)
cmd->cmd.command = SWAP16(CmdMulticastList);
cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
cp = cmd->mc_addrs;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, dmi->dmi_addr, 6);
+ memcpy(cp, ha->addr, 6);
if (i596_debug > 1)
DEB(DEB_MULTI,
printk(KERN_DEBUG
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 56f66f4..316bb70 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -257,7 +257,7 @@ static void __ei_tx_timeout(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
dev->stats.tx_errors++;
@@ -386,7 +386,6 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
{
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, output_page);
- dev->trans_start = jiffies;
if (output_page == ei_local->tx_start_page)
{
ei_local->tx1 = -1;
@@ -445,14 +444,14 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
if (ei_local->irqlock)
{
-#if 1 /* This might just be an interrupt for a PCI device sharing this line */
- /* The "irqlock" check is only for testing. */
- printk(ei_local->irqlock
- ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
- : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ /*
+ * This might just be an interrupt for a PCI device sharing
+ * this line
+ */
+ printk("%s: Interrupted while interrupts are masked!"
+ " isr=%#2x imr=%#2x.\n",
dev->name, ei_inb_p(e8390_base + EN0_ISR),
ei_inb_p(e8390_base + EN0_IMR));
-#endif
spin_unlock(&ei_local->page_lock);
return IRQ_NONE;
}
@@ -792,7 +791,6 @@ static void ei_receive(struct net_device *dev)
/* We used to also ack ENISR_OVER here, but that would sometimes mask
a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
- return;
}
/**
@@ -905,10 +903,10 @@ static struct net_device_stats *__ei_get_stats(struct net_device *dev)
static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(dmi, dev) {
- u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h
index 1af66a1..c033584 100644
--- a/drivers/net/ll_temac.h
+++ b/drivers/net/ll_temac.h
@@ -5,8 +5,11 @@
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/spinlock.h>
+
+#ifdef CONFIG_PPC_DCR
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
+#endif
/* packet size info */
#define XTE_HDR_SIZE 14 /* size of Ethernet header */
@@ -290,9 +293,6 @@ This option defaults to enabled (set) */
#define TX_CONTROL_CALC_CSUM_MASK 1
-#define XTE_ALIGN 32
-#define BUFFER_ALIGN(adr) ((XTE_ALIGN - ((u32) adr)) % XTE_ALIGN)
-
#define MULTICAST_CAM_TABLE_NUM 4
/* TX/RX CURDESC_PTR points to first descriptor */
@@ -335,9 +335,15 @@ struct temac_local {
struct mii_bus *mii_bus; /* MII bus reference */
int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
- /* IO registers and IRQs */
+ /* IO registers, dma functions and IRQs */
void __iomem *regs;
+ void __iomem *sdma_regs;
+#ifdef CONFIG_PPC_DCR
dcr_host_t sdma_dcrs;
+#endif
+ u32 (*dma_in)(struct temac_local *, int);
+ void (*dma_out)(struct temac_local *, int, u32);
+
int tx_irq;
int rx_irq;
int emac_num;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index ba617e3c..b59b24d 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -20,9 +20,6 @@
* or rx, so this should be okay.
*
* TODO:
- * - Fix driver to work on more than just Virtex5. Right now the driver
- * assumes that the locallink DMA registers are accessed via DCR
- * instructions.
* - Factor out locallink DMA code into separate driver
* - Fix multicast assignment.
* - Fix support for hardware checksumming.
@@ -116,17 +113,86 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
}
+/**
+ * temac_dma_in32 - Memory mapped DMA read, this function expects a
+ * register input that is based on DCR word addresses which
+ * are then converted to memory mapped byte addresses
+ */
static u32 temac_dma_in32(struct temac_local *lp, int reg)
{
- return dcr_read(lp->sdma_dcrs, reg);
+ return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
}
+/**
+ * temac_dma_out32 - Memory mapped DMA read, this function expects a
+ * register input that is based on DCR word addresses which
+ * are then converted to memory mapped byte addresses
+ */
static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
{
+ out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+}
+
+/* DMA register access functions can be DCR based or memory mapped.
+ * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
+ * memory mapped.
+ */
+#ifdef CONFIG_PPC_DCR
+
+/**
+ * temac_dma_dcr_in32 - DCR based DMA read
+ */
+static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
+{
+ return dcr_read(lp->sdma_dcrs, reg);
+}
+
+/**
+ * temac_dma_dcr_out32 - DCR based DMA write
+ */
+static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
+{
dcr_write(lp->sdma_dcrs, reg, value);
}
/**
+ * temac_dcr_setup - If the DMA is DCR based, then setup the address and
+ * I/O functions
+ */
+static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+ struct device_node *np)
+{
+ unsigned int dcrs;
+
+ /* setup the dcr address mapping if it's in the device tree */
+
+ dcrs = dcr_resource_start(np, 0);
+ if (dcrs != 0) {
+ lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
+ lp->dma_in = temac_dma_dcr_in;
+ lp->dma_out = temac_dma_dcr_out;
+ dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
+ return 0;
+ }
+ /* no DCR in the device tree, indicate a failure */
+ return -1;
+}
+
+#else
+
+/*
+ * temac_dcr_setup - This is a stub for when DCR is not supported,
+ * such as with MicroBlaze
+ */
+static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+ struct device_node *np)
+{
+ return -1;
+}
+
+#endif
+
+/**
* temac_dma_bd_init - Setup buffer descriptor rings
*/
static int temac_dma_bd_init(struct net_device *ndev)
@@ -156,14 +222,14 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
- skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
- + XTE_ALIGN, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE);
+
if (skb == 0) {
dev_err(&ndev->dev, "alloc_skb error %d\n", i);
return -1;
}
lp->rx_skb[i] = skb;
- skb_reserve(skb, BUFFER_ALIGN(skb->data));
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
@@ -173,23 +239,23 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
}
- temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 |
+ lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN);
/* 0x10220483 */
/* 0x00100483 */
- temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 |
+ lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN |
CHNL_CTRL_IRQ_IOE);
/* 0xff010283 */
- temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p);
- temac_dma_out32(lp, RX_TAILDESC_PTR,
+ lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
- temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
return 0;
}
@@ -251,20 +317,20 @@ static void temac_set_multicast_list(struct net_device *ndev)
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
} else if (!netdev_mc_empty(ndev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
i = 0;
- netdev_for_each_mc_addr(mclist, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
- multi_addr_msw = ((mclist->dmi_addr[3] << 24) |
- (mclist->dmi_addr[2] << 16) |
- (mclist->dmi_addr[1] << 8) |
- (mclist->dmi_addr[0]));
+ multi_addr_msw = ((ha->addr[3] << 24) |
+ (ha->addr[2] << 16) |
+ (ha->addr[1] << 8) |
+ (ha->addr[0]));
temac_indirect_out32(lp, XTE_MAW0_OFFSET,
multi_addr_msw);
- multi_addr_lsw = ((mclist->dmi_addr[5] << 8) |
- (mclist->dmi_addr[4]) | (i << 16));
+ multi_addr_lsw = ((ha->addr[5] << 8) |
+ (ha->addr[4]) | (i << 16));
temac_indirect_out32(lp, XTE_MAW1_OFFSET,
multi_addr_lsw);
i++;
@@ -427,9 +493,9 @@ static void temac_device_reset(struct net_device *ndev)
temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
/* Reset Local Link (DMA) */
- temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
timeout = 1000;
- while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
+ while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
@@ -437,7 +503,7 @@ static void temac_device_reset(struct net_device *ndev)
break;
}
}
- temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
temac_dma_bd_init(ndev);
@@ -461,7 +527,7 @@ static void temac_device_reset(struct net_device *ndev)
dev_err(&ndev->dev, "Error setting TEMAC options\n");
/* Init Driver variable */
- ndev->trans_start = 0;
+ ndev->trans_start = jiffies; /* prevent tx timeout */
}
void temac_adjust_link(struct net_device *ndev)
@@ -598,7 +664,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0;
/* Kick off the transfer */
- temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+ lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
return NETDEV_TX_OK;
}
@@ -612,7 +678,6 @@ static void ll_temac_recv(struct net_device *ndev)
struct cdmac_bd *cur_p;
dma_addr_t tail_p;
int length;
- unsigned long skb_vaddr;
unsigned long flags;
spin_lock_irqsave(&lp->rx_lock, flags);
@@ -626,8 +691,7 @@ static void ll_temac_recv(struct net_device *ndev)
skb = lp->rx_skb[lp->rx_bd_ci];
length = cur_p->app4 & 0x3FFF;
- skb_vaddr = virt_to_bus(skb->data);
- dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
+ dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
DMA_FROM_DEVICE);
skb_put(skb, length);
@@ -640,16 +704,15 @@ static void ll_temac_recv(struct net_device *ndev)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
- new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN,
- GFP_ATOMIC);
+ new_skb = netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE);
+
if (new_skb == 0) {
dev_err(&ndev->dev, "no memory for new sk_buff\n");
spin_unlock_irqrestore(&lp->rx_lock, flags);
return;
}
- skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
-
cur_p->app0 = STS_CTRL_APP0_IRQONEND;
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
@@ -664,7 +727,7 @@ static void ll_temac_recv(struct net_device *ndev)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
bdstat = cur_p->app0;
}
- temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
spin_unlock_irqrestore(&lp->rx_lock, flags);
}
@@ -675,8 +738,8 @@ static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
struct temac_local *lp = netdev_priv(ndev);
unsigned int status;
- status = temac_dma_in32(lp, TX_IRQ_REG);
- temac_dma_out32(lp, TX_IRQ_REG, status);
+ status = lp->dma_in(lp, TX_IRQ_REG);
+ lp->dma_out(lp, TX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
temac_start_xmit_done(lp->ndev);
@@ -693,8 +756,8 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
unsigned int status;
/* Read and clear the status registers */
- status = temac_dma_in32(lp, RX_IRQ_REG);
- temac_dma_out32(lp, RX_IRQ_REG, status);
+ status = lp->dma_in(lp, RX_IRQ_REG);
+ lp->dma_out(lp, RX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
ll_temac_recv(lp->ndev);
@@ -795,7 +858,7 @@ static ssize_t temac_show_llink_regs(struct device *dev,
int i, len = 0;
for (i = 0; i < 0x11; i++)
- len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i),
+ len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
(i % 8) == 7 ? "\n" : " ");
len += sprintf(buf + len, "\n");
@@ -821,7 +884,6 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
struct net_device *ndev;
const void *addr;
int size, rc = 0;
- unsigned int dcrs;
/* Init network device structure */
ndev = alloc_etherdev(sizeof(*lp));
@@ -871,13 +933,20 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
goto nodev;
}
- dcrs = dcr_resource_start(np, 0);
- if (dcrs == 0) {
- dev_err(&op->dev, "could not get DMA register address\n");
- goto nodev;
+ /* Setup the DMA register accesses, could be DCR or memory mapped */
+ if (temac_dcr_setup(lp, op, np)) {
+
+ /* no DCR in the device tree, try non-DCR */
+ lp->sdma_regs = of_iomap(np, 0);
+ if (lp->sdma_regs) {
+ lp->dma_in = temac_dma_in32;
+ lp->dma_out = temac_dma_out32;
+ dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
+ } else {
+ dev_err(&op->dev, "unable to map DMA registers\n");
+ goto nodev;
+ }
}
- lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
- dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
lp->rx_irq = irq_of_parse_and_map(np, 0);
lp->tx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 41cbaae..8a1097c 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -307,8 +307,6 @@ static void lne390_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(0x01, ioaddr + LNE390_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/*
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 3e3cc04..3df046a 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -875,8 +875,6 @@ static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev)
length = ETH_ZLEN;
}
- dev->trans_start = jiffies;
-
tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
if (tx_cmd == NULL) {
printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
@@ -1256,7 +1254,7 @@ static void set_multicast_list(struct net_device *dev) {
dev->name, netdev_mc_count(dev));
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *cp;
cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
netdev_mc_count(dev) * 6, GFP_ATOMIC);
@@ -1267,8 +1265,8 @@ static void set_multicast_list(struct net_device *dev) {
cmd->command = CmdMulticastList;
*((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
cp = ((char *)(cmd + 1))+2;
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy(cp, dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(cp, ha->addr, 6);
cp += 6;
}
if (i596_debug & LOG_SRCDST)
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index c8e68fd..1136c9a 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -661,7 +661,6 @@ static void mac8390_no_reset(struct net_device *dev)
ei_status.txing = 0;
if (ei_debug > 1)
pr_info("reset not supported\n");
- return;
}
static void interlan_reset(struct net_device *dev)
@@ -673,7 +672,6 @@ static void interlan_reset(struct net_device *dev)
target[0xC0000] = 0;
if (ei_debug > 1)
pr_cont("reset complete\n");
- return;
}
/* dayna_memcpy_fromio/dayna_memcpy_toio */
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index c0876e9..69fa4ef6 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -408,7 +408,6 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev)
skb->len+1);
local_irq_restore(flags);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index c8a18a6..40797fb 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -666,8 +666,6 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -793,6 +791,7 @@ static void macb_init_hw(struct macb *bp)
config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+ config |= MACB_BIT(BIG); /* Receive oversized frames */
if (bp->dev->flags & IFF_PROMISC)
config |= MACB_BIT(CAF); /* Copy All Frames */
if (!(bp->dev->flags & IFF_BROADCAST))
@@ -882,15 +881,15 @@ static int hash_get_index(__u8 *addr)
*/
static void macb_sethashtable(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
unsigned long mc_filter[2];
unsigned int bitnr;
struct macb *bp = netdev_priv(dev);
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 962c41d..b6855a6 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -599,7 +599,7 @@ static void mace_set_multicast(struct net_device *dev)
mp->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++)
@@ -607,8 +607,8 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
i = crc >> 26; /* bit number in multicast_filter */
multicast_filter[i >> 3] |= 1 << (i & 7);
}
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 52e9a51..c685a46 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -488,7 +488,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -509,7 +508,7 @@ static void mace_set_multicast(struct net_device *dev)
mb->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++) {
@@ -518,8 +517,8 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
/* bit number in multicast_filter */
i = crc >> 26;
multicast_filter[i >> 3] |= 1 << (i & 7);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 40faa36..4e238af 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -145,19 +145,15 @@ static void macvlan_broadcast(struct sk_buff *skb,
}
/* called under rcu_read_lock() from netif_receive_skb */
-static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
+static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port,
+ struct sk_buff *skb)
{
const struct ethhdr *eth = eth_hdr(skb);
- const struct macvlan_port *port;
const struct macvlan_dev *vlan;
const struct macvlan_dev *src;
struct net_device *dev;
unsigned int len;
- port = rcu_dereference(skb->dev->macvlan_port);
- if (port == NULL)
- return skb;
-
if (is_multicast_ether_addr(eth->h_dest)) {
src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
@@ -243,7 +239,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
int ret;
ret = macvlan_queue_xmit(skb, dev);
- if (likely(ret == NET_XMIT_SUCCESS)) {
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
txq->tx_packets++;
txq->tx_bytes += len;
} else
@@ -282,7 +278,7 @@ static int macvlan_open(struct net_device *dev)
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
- err = dev_unicast_add(lowerdev, dev->dev_addr);
+ err = dev_uc_add(lowerdev, dev->dev_addr);
if (err < 0)
goto out;
if (dev->flags & IFF_ALLMULTI) {
@@ -294,7 +290,7 @@ static int macvlan_open(struct net_device *dev)
return 0;
del_unicast:
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
out:
return err;
}
@@ -308,7 +304,7 @@ static int macvlan_stop(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
macvlan_hash_del(vlan);
return 0;
@@ -332,11 +328,11 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
if (macvlan_addr_busy(vlan->port, addr->sa_data))
return -EBUSY;
- err = dev_unicast_add(lowerdev, addr->sa_data);
+ err = dev_uc_add(lowerdev, addr->sa_data);
if (err)
return err;
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
macvlan_hash_change_addr(vlan, addr->sa_data);
}
@@ -748,6 +744,9 @@ static int macvlan_device_event(struct notifier_block *unused,
list_for_each_entry_safe(vlan, next, &port->vlans, list)
vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
break;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlaying device to change its type. */
+ return NOTIFY_BAD;
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index abba3cc..a8a94e2 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -37,6 +37,8 @@
struct macvtap_queue {
struct sock sk;
struct socket sock;
+ struct socket_wq wq;
+ int vnet_hdr_sz;
struct macvlan_dev *vlan;
struct file *file;
unsigned int flags;
@@ -181,7 +183,7 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
return -ENOLINK;
skb_queue_tail(&q->sk.sk_receive_queue, skb);
- wake_up_interruptible_poll(q->sk.sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND);
+ wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
return 0;
}
@@ -242,12 +244,15 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
static void macvtap_sock_write_space(struct sock *sk)
{
+ wait_queue_head_t *wqueue;
+
if (!sock_writeable(sk) ||
!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND);
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
}
static int macvtap_open(struct inode *inode, struct file *file)
@@ -272,7 +277,8 @@ static int macvtap_open(struct inode *inode, struct file *file)
if (!q)
goto out;
- init_waitqueue_head(&q->sock.wait);
+ q->sock.wq = &q->wq;
+ init_waitqueue_head(&q->wq.wait);
q->sock.type = SOCK_RAW;
q->sock.state = SS_CONNECTED;
q->sock.file = file;
@@ -280,6 +286,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
sock_init_data(&q->sock, &q->sk);
q->sk.sk_write_space = macvtap_sock_write_space;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+ q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
err = macvtap_set_queue(dev, file, q);
if (err)
@@ -308,7 +315,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
goto out;
mask = 0;
- poll_wait(file, &q->sock.wait, wait);
+ poll_wait(file, &q->wq.wait, wait);
if (!skb_queue_empty(&q->sk.sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -440,14 +447,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
int vnet_hdr_len = 0;
if (q->flags & IFF_VNET_HDR) {
- vnet_hdr_len = sizeof(vnet_hdr);
+ vnet_hdr_len = q->vnet_hdr_sz;
err = -EINVAL;
if ((len -= vnet_hdr_len) < 0)
goto err;
err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
- vnet_hdr_len);
+ sizeof(vnet_hdr));
if (err < 0)
goto err;
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -529,7 +536,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
- vnet_hdr_len = sizeof (vnet_hdr);
+ vnet_hdr_len = q->vnet_hdr_sz;
if ((len -= vnet_hdr_len) < 0)
return -EINVAL;
@@ -537,7 +544,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (ret)
return ret;
- if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, vnet_hdr_len))
+ if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
}
@@ -562,7 +569,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
struct sk_buff *skb;
ssize_t ret = 0;
- add_wait_queue(q->sk.sk_sleep, &wait);
+ add_wait_queue(sk_sleep(&q->sk), &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -587,7 +594,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
}
current->state = TASK_RUNNING;
- remove_wait_queue(q->sk.sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(&q->sk), &wait);
return ret;
}
@@ -622,6 +629,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
struct ifreq __user *ifr = argp;
unsigned int __user *up = argp;
unsigned int u;
+ int __user *sp = argp;
+ int s;
int ret;
switch (cmd) {
@@ -667,6 +676,21 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
q->sk.sk_sndbuf = u;
return 0;
+ case TUNGETVNETHDRSZ:
+ s = q->vnet_hdr_sz;
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETHDRSZ:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s < (int)sizeof(struct virtio_net_hdr))
+ return -EINVAL;
+
+ q->vnet_hdr_sz = s;
+ return 0;
+
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 9f72cb4..42e3294 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -746,10 +746,8 @@ static void meth_tx_timeout(struct net_device *dev)
/* Enable interrupt */
spin_unlock_irqrestore(&priv->meth_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
-
- return;
}
/*
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 86467b4..d5afd03 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -140,8 +140,6 @@ static void mlx4_en_get_wol(struct net_device *netdev,
{
wol->supported = 0;
wol->wolopts = 0;
-
- return;
}
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 73c3d20..96180c0 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -161,39 +161,29 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
static void mlx4_en_clear_list(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct dev_mc_list *plist = priv->mc_list;
- struct dev_mc_list *next;
- while (plist) {
- next = plist->next;
- kfree(plist);
- plist = next;
- }
- priv->mc_list = NULL;
+ kfree(priv->mc_addrs);
+ priv->mc_addrs_cnt = 0;
}
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct dev_mc_list *mclist;
- struct dev_mc_list *tmp;
- struct dev_mc_list *plist = NULL;
-
- for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
- tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
- if (!tmp) {
- en_err(priv, "failed to allocate multicast list\n");
- mlx4_en_clear_list(dev);
- return;
- }
- memcpy(tmp, mclist, sizeof(struct dev_mc_list));
- tmp->next = NULL;
- if (plist)
- plist->next = tmp;
- else
- priv->mc_list = tmp;
- plist = tmp;
+ struct netdev_hw_addr *ha;
+ char *mc_addrs;
+ int mc_addrs_cnt = netdev_mc_count(dev);
+ int i;
+
+ mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
+ if (!mc_addrs) {
+ en_err(priv, "failed to allocate multicast list\n");
+ return;
}
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+ priv->mc_addrs = mc_addrs;
+ priv->mc_addrs_cnt = mc_addrs_cnt;
}
@@ -213,7 +203,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
mcast_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
- struct dev_mc_list *mclist;
u64 mcast_addr = 0;
int err;
@@ -289,6 +278,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
if (err)
en_err(priv, "Failed disabling multicast filter\n");
} else {
+ int i;
+
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
@@ -303,8 +294,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
netif_tx_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_tx_unlock_bh(dev);
- for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
- mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
+ for (i = 0; i < priv->mc_addrs_cnt; i++) {
+ mcast_addr =
+ mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -512,7 +504,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
if (err)
- en_dbg(HW, priv, "Could not update stats \n");
+ en_dbg(HW, priv, "Could not update stats\n");
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
@@ -985,7 +977,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->flags = prof->flags;
priv->tx_ring_num = prof->tx_ring_num;
priv->rx_ring_num = prof->rx_ring_num;
- priv->mc_list = NULL;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 7365bf4..4230534 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -239,7 +239,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
break;
- };
+ }
++eq->cons_index;
eqes_found = 1;
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index bc72d6e..13343e8 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -40,6 +40,7 @@
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/timer.h>
+#include <linux/semaphore.h>
#include <linux/workqueue.h>
#include <linux/mlx4/device.h>
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 82c3ebc..b55e46c 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -492,7 +492,8 @@ struct mlx4_en_priv {
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
- struct dev_mc_list *mc_list;
+ char *mc_addrs;
+ int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;
};
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8613a52..e345ec8 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -882,7 +882,6 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
txq->tx_bytes += skb->len;
txq->tx_packets++;
- dev->trans_start = jiffies;
entries_left = txq->tx_ring_size - txq->tx_desc_count;
if (entries_left < MAX_SKB_FRAGS + 1)
@@ -1770,7 +1769,7 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 *mc_spec;
u32 *mc_other;
- struct dev_addr_list *addr;
+ struct netdev_hw_addr *ha;
int i;
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
@@ -1795,8 +1794,8 @@ oom:
memset(mc_spec, 0, 0x100);
memset(mc_other, 0, 0x100);
- netdev_for_each_mc_addr(addr, dev) {
- u8 *a = addr->da_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *a = ha->addr;
u32 *table;
int entry;
@@ -2609,10 +2608,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
goto out;
ret = -ENOMEM;
- msp = kmalloc(sizeof(*msp), GFP_KERNEL);
+ msp = kzalloc(sizeof(*msp), GFP_KERNEL);
if (msp == NULL)
goto out;
- memset(msp, 0, sizeof(*msp));
msp->base = ioremap(res->start, res->end - res->start + 1);
if (msp->base == NULL)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ecde087..e0b47cc 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -110,15 +110,15 @@ MODULE_LICENSE("Dual BSD/GPL");
struct myri10ge_rx_buffer_state {
struct page *page;
int page_offset;
- DECLARE_PCI_UNMAP_ADDR(bus)
- DECLARE_PCI_UNMAP_LEN(len)
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_tx_buffer_state {
struct sk_buff *skb;
int last;
- DECLARE_PCI_UNMAP_ADDR(bus)
- DECLARE_PCI_UNMAP_LEN(len)
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_cmd {
@@ -1234,7 +1234,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
rx->info[idx].page_offset = rx->page_offset;
/* note that this is the address of the start of the
* page */
- pci_unmap_addr_set(&rx->info[idx], bus, rx->bus);
+ dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
rx->shadow[idx].addr_low =
htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
rx->shadow[idx].addr_high =
@@ -1266,7 +1266,7 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
/* unmap the recvd page if we're the only or last user of it */
if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
(info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
- pci_unmap_page(pdev, (pci_unmap_addr(info, bus)
+ pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
& ~(MYRI10GE_ALLOC_SIZE - 1)),
MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
}
@@ -1373,21 +1373,21 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
tx->info[idx].last = 0;
}
tx->done++;
- len = pci_unmap_len(&tx->info[idx], len);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ len = dma_unmap_len(&tx->info[idx], len);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_bytes += skb->len;
ss->stats.tx_packets++;
dev_kfree_skb_irq(skb);
if (len)
pci_unmap_single(pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
@@ -2094,20 +2094,20 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
/* Mark as free */
tx->info[idx].skb = NULL;
tx->done++;
- len = pci_unmap_len(&tx->info[idx], len);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ len = dma_unmap_len(&tx->info[idx], len);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_dropped++;
dev_kfree_skb_any(skb);
if (len)
pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
@@ -2757,12 +2757,12 @@ again:
}
/* map the skb for DMA */
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
idx = tx->req & tx->mask;
tx->info[idx].skb = skb;
bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&tx->info[idx], bus, bus);
- pci_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ dma_unmap_len_set(&tx->info[idx], len, len);
frag_cnt = skb_shinfo(skb)->nr_frags;
frag_idx = 0;
@@ -2865,8 +2865,8 @@ again:
len = frag->size;
bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&tx->info[idx], bus, bus);
- pci_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ dma_unmap_len_set(&tx->info[idx], len, len);
}
(req - rdma_count)->rdma_count = rdma_count;
@@ -2903,19 +2903,19 @@ abort_linearize:
idx = tx->req & tx->mask;
tx->info[idx].skb = NULL;
do {
- len = pci_unmap_len(&tx->info[idx], len);
+ len = dma_unmap_len(&tx->info[idx], len);
if (len) {
if (tx->info[idx].skb != NULL)
pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
else
pci_unmap_page(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
tx->info[idx].skb = NULL;
}
idx = (idx + 1) & tx->mask;
@@ -3002,7 +3002,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
__be32 data[2] = { 0, 0 };
int err;
@@ -3039,8 +3039,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
}
/* Walk the multicast list, and add each address */
- netdev_for_each_mc_addr(mc_list, dev) {
- memcpy(data, &mc_list->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(data, &ha->addr, 6);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3048,7 +3048,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
- err, mc_list->dmi_addr);
+ err, ha->addr);
goto abort;
}
}
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index b72e749..3898108 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -865,7 +865,7 @@ static inline void determine_reg_space_size(struct myri_eth *mp)
printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n",
mp->eeprom.cpuvers);
mp->reg_size = (3 * 128 * 1024) + 4096;
- };
+ }
}
#ifdef DEBUG_DETECT
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index e520387..2a17b50 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1905,7 +1905,7 @@ static void ns_tx_timeout(struct net_device *dev)
spin_unlock_irq(&np->lock);
enable_irq(dev->irq);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -2119,8 +2119,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies;
-
if (netif_msg_tx_queued(np)) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
dev->name, np->cur_tx, entry);
@@ -2493,12 +2491,12 @@ static void __set_rx_mode(struct net_device *dev)
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptMyPhys;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
mc_filter[b/8] |= (1 << (b & 0x07));
}
rx_mode = RxFilterEnable | AcceptBroadcast
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index 7bd6662..e0b0ef1 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -608,7 +608,6 @@ retry:
outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index f4347f8..b8e2923 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -785,7 +785,6 @@ retry:
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static int __init ne_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index ff3c4c8..70cdc69 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -730,7 +730,6 @@ retry:
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 85aec4f..3c333cb 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -631,7 +631,6 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static void ne2k_pci_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index a00bbfb..243ed2a 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -255,8 +255,6 @@ static void ne3210_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(0x01, ioaddr + NE3210_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/*
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index a361dea..ca142c4 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -665,7 +665,8 @@ static int netconsole_netdev_event(struct notifier_block *this,
struct netconsole_target *nt;
struct net_device *dev = ptr;
- if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER))
+ if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
+ event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN))
goto done;
spin_lock_irqsave(&target_list_lock, flags);
@@ -677,19 +678,21 @@ static int netconsole_netdev_event(struct notifier_block *this,
strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
break;
case NETDEV_UNREGISTER:
- if (!nt->enabled)
- break;
netpoll_cleanup(&nt->np);
+ /* Fall through */
+ case NETDEV_GOING_DOWN:
+ case NETDEV_BONDING_DESLAVE:
nt->enabled = 0;
- printk(KERN_INFO "netconsole: network logging stopped"
- ", interface %s unregistered\n",
- dev->name);
break;
}
}
netconsole_target_put(nt);
}
spin_unlock_irqrestore(&target_list_lock, flags);
+ if (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE)
+ printk(KERN_INFO "netconsole: network logging stopped, "
+ "interface %s %s\n", dev->name,
+ event == NETDEV_UNREGISTER ? "unregistered" : "released slaves");
done:
return NOTIFY_DONE;
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 6477029..2e4b421 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -126,7 +126,6 @@ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
FIFO_PTR_FRAMENO(1) |
FIFO_PTR_FRAMELEN(len));
- ndev->trans_start = jiffies;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 0f70383..ffa1b9c 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -95,6 +95,9 @@
#define ADDR_IN_WINDOW1(off) \
((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
/*
* normalize a 64MB crb address to 32MB PCI window
* To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
@@ -420,7 +423,6 @@ struct status_desc {
} __attribute__ ((aligned(16)));
/* UNIFIED ROMIMAGE *************************/
-#define NX_UNI_FW_MIN_SIZE 0xc8000
#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
#define NX_UNI_DIR_SECT_BOOTLD 0x6
#define NX_UNI_DIR_SECT_FW 0x7
@@ -1353,6 +1355,8 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable);
int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd);
int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
+void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
+void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index f8499e5..20f7c58 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -632,6 +632,9 @@ static int netxen_nic_reg_test(struct net_device *dev)
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
data_written = (u32)0xa5a5a5a5;
NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
@@ -703,6 +706,11 @@ netxen_nic_get_ethtool_stats(struct net_device *dev,
}
}
+static u32 netxen_nic_get_tx_csum(struct net_device *dev)
+{
+ return dev->features & NETIF_F_IP_CSUM;
+}
+
static u32 netxen_nic_get_rx_csum(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
@@ -909,6 +917,7 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
.set_ringparam = netxen_nic_set_ringparam,
.get_pauseparam = netxen_nic_get_pauseparam,
.set_pauseparam = netxen_nic_set_pauseparam,
+ .get_tx_csum = netxen_nic_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.get_tso = netxen_nic_get_tso,
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 622e4c8..d8bd73d 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -681,14 +681,8 @@ enum {
#define MIU_TEST_AGT_ADDR_HI (0x08)
#define MIU_TEST_AGT_WRDATA_LO (0x10)
#define MIU_TEST_AGT_WRDATA_HI (0x14)
-#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
-#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
-#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
#define MIU_TEST_AGT_RDDATA_LO (0x18)
#define MIU_TEST_AGT_RDDATA_HI (0x1c)
-#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
-#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
-#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
@@ -789,9 +783,7 @@ enum {
* for backward compability
*/
#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8)
-#define CRB_NIC_CAPABILITIES_FW NETXEN_NIC_REG(0x1dc)
#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270)
-#define CRB_NIC_MSI_MODE_FW NETXEN_NIC_REG(0x274)
#define INTR_SCHEME_PERPORT 0x1
#define MSI_MODE_MULTIFUNC 0x1
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index b1cf46a..5c496f8 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -32,7 +32,6 @@
#define MASK(n) ((1ULL<<(n))-1)
#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
-#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
#define MS_WIN(addr) (addr & 0x0ffc0000)
#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
@@ -63,9 +62,6 @@ static inline void writeq(u64 val, void __iomem *addr)
}
#endif
-#define ADDR_IN_RANGE(addr, low, high) \
- (((addr) < (high)) && ((addr) >= (low)))
-
#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
((adapter)->ahw.pci_base0 + (off))
#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
@@ -538,7 +534,7 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
void netxen_p2_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 null_addr[6];
int i;
@@ -572,8 +568,8 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
netxen_nic_enable_mcast_filter(adapter);
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev)
+ netxen_nic_set_mcast_addr(adapter, i++, ha->addr);
/* Clear out remaining addresses */
while (i < adapter->max_mc_count)
@@ -681,7 +677,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
void netxen_p3_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
LIST_HEAD(del_list);
@@ -708,8 +704,8 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
}
if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(mc_ptr, netdev)
- nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
+ netdev_for_each_mc_addr(ha, netdev)
+ nx_p3_nic_add_mac(adapter, ha->addr, &del_list);
}
send_fw_cmd:
@@ -1391,18 +1387,8 @@ netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
u64 addr, u32 *start)
{
u32 window;
- struct pci_dev *pdev = adapter->pdev;
- if ((addr & 0x00ff800) == 0xff800) {
- if (printk_ratelimit())
- dev_warn(&pdev->dev, "QM access not handled\n");
- return -EIO;
- }
-
- if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
- window = OCM_WIN_P3P(addr);
- else
- window = OCM_WIN(addr);
+ window = OCM_WIN(addr);
writel(window, adapter->ahw.ocm_win_crb);
/* read back to flush */
@@ -1419,7 +1405,7 @@ netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
{
void __iomem *addr, *mem_ptr = NULL;
resource_size_t mem_base;
- int ret = -EIO;
+ int ret;
u32 start;
spin_lock(&adapter->ahw.mem_lock);
@@ -1428,20 +1414,23 @@ netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
if (ret != 0)
goto unlock;
- addr = pci_base_offset(adapter, start);
- if (addr)
- goto noremap;
-
- mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ addr = adapter->ahw.pci_base0 + start;
+ } else {
+ addr = pci_base_offset(adapter, start);
+ if (addr)
+ goto noremap;
+
+ mem_base = pci_resource_start(adapter->pdev, 0) +
+ (start & PAGE_MASK);
+ mem_ptr = ioremap(mem_base, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ ret = -EIO;
+ goto unlock;
+ }
- mem_ptr = ioremap(mem_base, PAGE_SIZE);
- if (mem_ptr == NULL) {
- ret = -EIO;
- goto unlock;
+ addr = mem_ptr + (start & (PAGE_SIZE-1));
}
-
- addr = mem_ptr + (start & (PAGE_SIZE - 1));
-
noremap:
if (op == 0) /* read */
*data = readq(addr);
@@ -1456,6 +1445,28 @@ unlock:
return ret;
}
+void
+netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ *data = readq(addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ writeq(data, addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
#define MAX_CTL_CHECK 1000
static int
@@ -1621,9 +1632,8 @@ static int
netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
u64 off, u64 data)
{
- int i, j, ret;
+ int j, ret;
u32 temp, off8;
- u64 stride;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -1650,44 +1660,17 @@ netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
return -EIO;
correct:
- stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & 0xfffffff8;
spin_lock(&adapter->ahw.mem_lock);
writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
- i = 0;
- if (stride == 16) {
- writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
- writel((TA_CTL_START | TA_CTL_ENABLE),
- (mem_crb + TEST_AGT_CTRL));
-
- for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = readl(mem_crb + TEST_AGT_CTRL);
- if ((temp & TA_CTL_BUSY) == 0)
- break;
- }
-
- if (j >= MAX_CTL_CHECK) {
- ret = -EIO;
- goto done;
- }
-
- i = (off & 0xf) ? 0 : 2;
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
- mem_crb + MIU_TEST_AGT_WRDATA(i));
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
- i = (off & 0xf) ? 2 : 0;
- }
-
writel(data & 0xffffffff,
- mem_crb + MIU_TEST_AGT_WRDATA(i));
+ mem_crb + MIU_TEST_AGT_WRDATA_LO);
writel((data >> 32) & 0xffffffff,
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ mem_crb + MIU_TEST_AGT_WRDATA_HI);
writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
@@ -1707,7 +1690,6 @@ correct:
} else
ret = 0;
-done:
spin_unlock(&adapter->ahw.mem_lock);
return ret;
@@ -1719,7 +1701,7 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
{
int j, ret;
u32 temp, off8;
- u64 val, stride;
+ u64 val;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -1748,9 +1730,7 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
return -EIO;
correct:
- stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & 0xfffffff8;
spin_lock(&adapter->ahw.mem_lock);
@@ -1771,13 +1751,8 @@ correct:
"failed to read through agent\n");
ret = -EIO;
} else {
- off8 = MIU_TEST_AGT_RDDATA_LO;
- if ((stride == 16) && (off & 0xf))
- off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
-
- temp = readl(mem_crb + off8 + 4);
- val = (u64)temp << 32;
- val |= readl(mem_crb + off8);
+ val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32;
+ val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO);
*data = val;
ret = 0;
}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 02876f5..045a7c8 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -614,22 +614,123 @@ static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
return NULL;
}
+#define QLCNIC_FILEHEADER_SIZE (14 * 4)
+
static int
-nx_set_product_offs(struct netxen_adapter *adapter)
-{
- struct uni_table_desc *ptab_descr;
+netxen_nic_validate_header(struct netxen_adapter *adapter)
+ {
const u8 *unirom = adapter->fw->data;
- uint32_t i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ u32 fw_file_size = adapter->fw->size;
+ u32 tab_size;
__le32 entries;
+ __le32 entry_size;
+
+ if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = cpu_to_le32(directory->num_entries);
+ entry_size = cpu_to_le32(directory->entry_size);
+ tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_bootld(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_BOOTLD_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_fw(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_FIRMWARE_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+ return 0;
+}
+
+
+static int
+netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
1 : netxen_p3_has_mn(adapter);
+ __le32 entries;
+ __le32 entry_size;
+ u32 tab_size;
+ u32 i;
ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
if (ptab_descr == NULL)
- return -1;
+ return -EINVAL;
entries = cpu_to_le32(ptab_descr->num_entries);
+ entry_size = cpu_to_le32(ptab_descr->entry_size);
+ tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
nomn:
for (i = 0; i < entries; i++) {
@@ -658,9 +759,38 @@ nomn:
goto nomn;
}
- return -1;
+ return -EINVAL;
}
+static int
+netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
+{
+ if (netxen_nic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
u32 section, u32 idx_offset)
@@ -890,6 +1020,16 @@ netxen_load_firmware(struct netxen_adapter *adapter)
flashaddr += 8;
}
+
+ size = (__force u32)nx_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
} else {
u64 data;
u32 hi, lo;
@@ -934,27 +1074,23 @@ static int
netxen_validate_firmware(struct netxen_adapter *adapter)
{
__le32 val;
- u32 ver, min_ver, bios, min_size;
+ u32 ver, min_ver, bios;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
u8 fw_type = adapter->fw_type;
if (fw_type == NX_UNIFIED_ROMIMAGE) {
- if (nx_set_product_offs(adapter))
+ if (netxen_nic_validate_unified_romimage(adapter))
return -EINVAL;
-
- min_size = NX_UNI_FW_MIN_SIZE;
} else {
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
if ((__force u32)val != NETXEN_BDINFO_MAGIC)
return -EINVAL;
- min_size = NX_FW_MIN_SIZE;
+ if (fw->size < NX_FW_MIN_SIZE)
+ return -EINVAL;
}
- if (fw->size < min_size)
- return -EINVAL;
-
val = nx_get_fw_version(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
@@ -1225,10 +1361,12 @@ int netxen_init_firmware(struct netxen_adapter *adapter)
return err;
NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
- NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+
return err;
}
@@ -1763,6 +1901,5 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
void netxen_nic_clear_stats(struct netxen_adapter *adapter)
{
memset(&adapter->stats, 0, sizeof(adapter->stats));
- return;
}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index ce838f7..6ce6ce1 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -782,15 +782,22 @@ netxen_check_options(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
adapter->msix_supported = !!use_msi_x;
adapter->rss_supported = !!use_msi_x;
- } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
- switch (adapter->ahw.board_type) {
- case NETXEN_BRDTYPE_P2_SB31_10G:
- case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
- adapter->msix_supported = !!use_msi_x;
- adapter->rss_supported = !!use_msi_x;
- break;
- default:
- break;
+ } else {
+ u32 flashed_ver = 0;
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ break;
+ default:
+ break;
+ }
}
}
@@ -2304,6 +2311,7 @@ netxen_fwinit_work(struct work_struct *work)
}
break;
+ case NX_DEV_NEED_RESET:
case NX_DEV_INITALIZING:
if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
netxen_schedule_work(adapter,
@@ -2347,6 +2355,9 @@ netxen_detach_work(struct work_struct *work)
ref_cnt = nx_decr_dev_ref_cnt(adapter);
+ if (ref_cnt == -EIO)
+ goto err_ret;
+
delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
adapter->fw_wait_cnt = 0;
@@ -2526,51 +2537,81 @@ static int
netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
loff_t offset, size_t size)
{
+ size_t crb_size = 4;
+
if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
return -EIO;
- if ((size != 4) || (offset & 0x3))
- return -EINVAL;
+ if (offset < NETXEN_PCI_CRBSPACE) {
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EINVAL;
- if (offset < NETXEN_PCI_CRBSPACE)
- return -EINVAL;
+ if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
+
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
return 0;
}
static ssize_t
-netxen_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = netxen_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- data = NXRD32(adapter, offset);
- memcpy(buf, &data, size);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ netxen_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = NXRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+
return size;
}
static ssize_t
-netxen_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = netxen_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- memcpy(&data, buf, size);
- NXWR32(adapter, offset, data);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ memcpy(&qmdata, buf, size);
+ netxen_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ NXWR32(adapter, offset, data);
+ }
+
return size;
}
@@ -2588,7 +2629,8 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
}
static ssize_t
-netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -2608,7 +2650,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
return size;
}
-static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
+static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
@@ -2742,7 +2784,6 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
} endfor_ifa(indev);
in_dev_put(indev);
- return;
}
static int netxen_netdev_event(struct notifier_block *this,
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 3892330..4d3f2e2 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -444,7 +444,7 @@ static void ni5010_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
/* FIXME: Give it a real kick here */
chipset_init(dev, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -460,7 +460,6 @@ static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
@@ -515,8 +514,6 @@ static void dump_packet(void *buf, int len)
if (i % 16 == 15) printk("\n");
}
printk("\n");
-
- return;
}
/* We have a good packet, get it out of the buffer. */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index f7a8f70..9bddb5f 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -595,7 +595,7 @@ static int init586(struct net_device *dev)
struct iasetup_cmd_struct __iomem *ias_cmd;
struct tdr_cmd_struct __iomem *tdr_cmd;
struct mcsetup_cmd_struct __iomem *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs = netdev_mc_count(dev);
ptr = p->scb + 1;
@@ -724,8 +724,8 @@ static int init586(struct net_device *dev)
writew(num_addrs * 6, &mc_cmd->mc_cnt);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6);
writew(make16(mc_cmd), &p->scb->cbl_offset);
writeb(CUC_START, &p->scb->cmd_cuc);
@@ -1147,7 +1147,7 @@ static void ni52_timeout(struct net_device *dev)
writeb(CUC_START, &p->scb->cmd_cuc);
ni_attn586();
wait_for_scb_cmd(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
return 0;
}
#endif
@@ -1165,7 +1165,7 @@ static void ni52_timeout(struct net_device *dev)
ni52_close(dev);
ni52_open(dev);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
/******************************************************
@@ -1218,7 +1218,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writeb(CUC_START, &p->scb->cmd_cuc);
}
ni_attn586();
- dev->trans_start = jiffies;
if (!i)
dev_kfree_skb(skb);
wait_for_scb_cmd(dev);
@@ -1240,7 +1239,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writew(0, &p->nop_cmds[next_nop]->cmd_status);
writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
# endif
@@ -1256,7 +1254,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writew(0, &p->nop_cmds[next_nop]->cmd_status);
writew(make16(p->xmit_cmds[p->xmit_count]),
&p->nop_cmds[p->xmit_count]->cmd_link);
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
{
unsigned long flags;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 9225c76..da228a0 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -784,7 +784,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
if(!p->lock)
if (p->tmdnum || !p->xmit_queued)
netif_wake_queue(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
else
writedatareg(CSR0_STRT | csr0);
@@ -1150,7 +1150,7 @@ static void ni65_timeout(struct net_device *dev)
printk("%02x ",p->tmdhead[i].u.s.status);
printk("\n");
ni65_lance_reinit(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1213,7 +1213,6 @@ static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
netif_wake_queue(dev);
p->lock = 0;
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&p->ring_lock, flags);
}
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index d5cd16b..30abb4e 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -36,8 +36,8 @@
#include "niu.h"
#define DRV_MODULE_NAME "niu"
-#define DRV_MODULE_VERSION "1.0"
-#define DRV_MODULE_RELDATE "Nov 14, 2008"
+#define DRV_MODULE_VERSION "1.1"
+#define DRV_MODULE_RELDATE "Apr 22, 2010"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -3444,6 +3444,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
struct rx_ring_info *rp)
{
unsigned int index = rp->rcr_index;
+ struct rx_pkt_hdr1 *rh;
struct sk_buff *skb;
int len, num_rcr;
@@ -3477,9 +3478,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
if (num_rcr == 1) {
int ptype;
- off += 2;
- append_size -= 2;
-
ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
if ((ptype == RCR_PKT_TYPE_TCP ||
ptype == RCR_PKT_TYPE_UDP) &&
@@ -3488,8 +3486,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
- }
- if (!(val & RCR_ENTRY_MULTI))
+ } else if (!(val & RCR_ENTRY_MULTI))
append_size = len - skb->len;
niu_rx_skb_append(skb, page, off, append_size);
@@ -3510,8 +3507,17 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
}
rp->rcr_index = index;
- skb_reserve(skb, NET_IP_ALIGN);
- __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
+ len += sizeof(*rh);
+ len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
+ __pskb_pull_tail(skb, len);
+
+ rh = (struct rx_pkt_hdr1 *) skb->data;
+ if (np->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = ((u32)rh->hashval2_0 << 24 |
+ (u32)rh->hashval2_1 << 16 |
+ (u32)rh->hashval1_1 << 8 |
+ (u32)rh->hashval1_2 << 0);
+ skb_pull(skb, sizeof(*rh));
rp->rx_packets++;
rp->rx_bytes += skb->len;
@@ -4946,7 +4952,9 @@ static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
RX_DMA_CTL_STAT_RCRTO |
RX_DMA_CTL_STAT_RBR_EMPTY));
nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
- nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
+ nw64(RXDMA_CFIG2(channel),
+ ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
+ RXDMA_CFIG2_FULL_HDR));
nw64(RBR_CFIG_A(channel),
((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
(rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
@@ -6314,7 +6322,6 @@ static void niu_set_rx_mode(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
int i, alt_cnt, err;
- struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
unsigned long flags;
u16 hash[16] = { 0, };
@@ -6366,8 +6373,8 @@ static void niu_set_rx_mode(struct net_device *dev)
for (i = 0; i < 16; i++)
hash[i] = 0xffff;
} else if (!netdev_mc_empty(dev)) {
- netdev_for_each_mc_addr(addr, dev) {
- u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
crc >>= 24;
hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
@@ -7911,6 +7918,18 @@ static int niu_phys_id(struct net_device *dev, u32 data)
return 0;
}
+static int niu_set_flags(struct net_device *dev, u32 data)
+{
+ if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE))
+ return -EOPNOTSUPP;
+
+ if (data & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+ return 0;
+}
+
static const struct ethtool_ops niu_ethtool_ops = {
.get_drvinfo = niu_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -7927,6 +7946,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
.phys_id = niu_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .set_flags = niu_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
@@ -9755,6 +9776,12 @@ static void __devinit niu_device_announce(struct niu *np)
}
}
+static void __devinit niu_set_basic_features(struct net_device *dev)
+{
+ dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_GRO | NETIF_F_RXHASH);
+}
+
static int __devinit niu_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -9839,7 +9866,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
}
}
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
+ niu_set_basic_features(dev);
np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
@@ -10081,7 +10108,7 @@ static int __devinit niu_of_probe(struct of_device *op,
goto err_out_free_dev;
}
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
+ niu_set_basic_features(dev);
np->regs = of_ioremap(&op->resource[1], 0,
resource_size(&op->resource[1]),
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 3bd0b59..d671546 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -2706,7 +2706,7 @@ struct rx_pkt_hdr0 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 inputport:2,
maccheck:1,
- class:4;
+ class:5;
u8 vlan:1,
llcsnap:1,
noport:1,
@@ -2715,7 +2715,7 @@ struct rx_pkt_hdr0 {
tres:2,
tzfvld:1;
#elif defined(__BIG_ENDIAN_BITFIELD)
- u8 class:4,
+ u8 class:5,
maccheck:1,
inputport:2;
u8 tzfvld:1,
@@ -2775,6 +2775,9 @@ struct rx_pkt_hdr1 {
/* Bits 7:0 of hash value, H1. */
u8 hashval1_2;
+ u8 hwrsvd5;
+ u8 hwrsvd6;
+
u8 usrdata_0; /* Bits 39:32 of user data. */
u8 usrdata_1; /* Bits 31:24 of user data. */
u8 usrdata_2; /* Bits 23:16 of user data. */
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 8aadc8e..000e792 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -189,12 +189,19 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
while (mix_orcnt.s.orcnt) {
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+
+ if (mix_orcnt.s.orcnt == 0) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ break;
+ }
+
dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
DMA_BIDIRECTIONAL);
- spin_lock_irqsave(&p->tx_list.lock, flags);
-
re.d64 = p->tx_ring[p->tx_next_clean];
p->tx_next_clean =
(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
@@ -317,7 +324,6 @@ good:
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += skb->len;
- netdev->last_rx = jiffies;
netif_receive_skb(skb);
rc = 0;
} else if (re.s.code == RING_ENTRY_CODE_MORE) {
@@ -374,7 +380,6 @@ done:
mix_ircnt.s.ircnt = 1;
cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
return rc;
-
}
static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
@@ -384,7 +389,6 @@ static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
union cvmx_mixx_ircnt mix_ircnt;
int rc;
-
mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
while (work_done < budget && mix_ircnt.s.ircnt) {
@@ -475,13 +479,12 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
struct octeon_mgmt_cam_state cam_state;
- struct dev_addr_list *list;
- struct list_head *pos;
+ struct netdev_hw_addr *ha;
int available_cam_entries;
memset(&cam_state, 0, sizeof(cam_state));
- if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) {
+ if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
cam_mode = 0;
available_cam_entries = 8;
} else {
@@ -489,13 +492,13 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
* One CAM entry for the primary address, leaves seven
* for the secondary addresses.
*/
- available_cam_entries = 7 - netdev->dev_addrs.count;
+ available_cam_entries = 7 - netdev->uc.count;
}
if (netdev->flags & IFF_MULTICAST) {
if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
netdev_mc_count(netdev) > available_cam_entries)
- multicast_mode = 2; /* 1 - Accept all multicast. */
+ multicast_mode = 2; /* 2 - Accept all multicast. */
else
multicast_mode = 0; /* 0 - Use CAM. */
}
@@ -503,19 +506,14 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
if (cam_mode == 1) {
/* Add primary address. */
octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
- list_for_each(pos, &netdev->dev_addrs.list) {
- struct netdev_hw_addr *hw_addr;
- hw_addr = list_entry(pos, struct netdev_hw_addr, list);
- octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
- list = list->next;
- }
+ netdev_for_each_uc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
}
if (multicast_mode == 0) {
- netdev_for_each_mc_addr(list, netdev)
- octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
+ netdev_for_each_mc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
}
-
spin_lock_irqsave(&p->lock, flags);
/* Disable packet I/O. */
@@ -524,7 +522,6 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
agl_gmx_prtx.s.en = 0;
cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
-
adr_ctl.u64 = 0;
adr_ctl.s.cam_mode = cam_mode;
adr_ctl.s.mcst = multicast_mode;
@@ -597,8 +594,7 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
/* Clear any pending interrupts */
- cvmx_write_csr(CVMX_MIXX_ISR(port),
- cvmx_read_csr(CVMX_MIXX_ISR(port)));
+ cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64);
cvmx_read_csr(CVMX_MIXX_ISR(port));
if (mixx_isr.s.irthresh) {
@@ -832,9 +828,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
mix_irhwm.s.irhwm = 0;
cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
- /* Interrupt when we have 5 or more packets to clean. */
+ /* Interrupt when we have 1 or more packets to clean. */
mix_orhwm.u64 = 0;
- mix_orhwm.s.orhwm = 5;
+ mix_orhwm.s.orhwm = 1;
cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
/* Enable receive and transmit interrupts */
@@ -928,7 +924,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
octeon_mgmt_reset_hw(p);
-
free_irq(p->irq, netdev);
/* dma_unmap is a nop on Octeon, so just free everything. */
@@ -945,7 +940,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
DMA_BIDIRECTIONAL);
kfree(p->tx_ring);
-
return 0;
}
@@ -955,6 +949,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
int port = p->port;
union mgmt_port_ring_entry re;
unsigned long flags;
+ int rv = NETDEV_TX_BUSY;
re.d64 = 0;
re.s.len = skb->len;
@@ -964,15 +959,18 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
spin_lock_irqsave(&p->tx_list.lock, flags);
+ if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ netif_stop_queue(netdev);
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+ }
+
if (unlikely(p->tx_current_fill >=
ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
spin_unlock_irqrestore(&p->tx_list.lock, flags);
-
dma_unmap_single(p->dev, re.s.addr, re.s.len,
DMA_TO_DEVICE);
-
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
+ goto out;
}
__skb_queue_tail(&p->tx_list, skb);
@@ -994,10 +992,10 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Ring the bell. */
cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
- netdev->trans_start = jiffies;
- octeon_mgmt_clean_tx_buffers(p);
+ rv = NETDEV_TX_OK;
+out:
octeon_mgmt_update_tx_stats(netdev);
- return NETDEV_TX_OK;
+ return rv;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1007,7 +1005,6 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
octeon_mgmt_receive_packets(p, 16);
octeon_mgmt_update_rx_stats(netdev);
- return;
}
#endif
@@ -1107,7 +1104,6 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
netdev->netdev_ops = &octeon_mgmt_ops;
netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
-
/* The mgmt ports get the first N MACs. */
for (i = 0; i < 6; i++)
netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 370c147..8ab6ae0 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1472,8 +1472,6 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
txring->next_to_fill = fill;
write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
-
- return;
}
static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 3678585..56f3fc4 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1354,7 +1354,6 @@ static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
- dev->trans_start = jiffies;
atomic_inc(&tp->cur_tx);
if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
netif_stop_queue(dev);
@@ -1813,12 +1812,12 @@ static void netdrv_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 30b7cf7..10ee106 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -613,8 +613,6 @@ static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value)
outw(MDIO_ENB_IN, mdio_addr);
outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
}
-
- return;
}
/* Reset and restore all of the 3c574 registers. */
@@ -730,7 +728,7 @@ static void el3_tx_timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc574_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -781,8 +779,6 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
- dev->trans_start = jiffies;
-
/* TxFree appears only in Window 1, not offset 0x1c. */
if (inw(ioaddr + TxFree) <= 1536) {
netif_stop_queue(dev);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 5ab589d..ce63c37 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -1,20 +1,20 @@
/*======================================================================
A PCMCIA ethernet driver for the 3com 3c589 card.
-
+
Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
3c589_cs.c 1.162 2001/10/13 00:08:50
The network driver code is based on Donald Becker's 3c589 code:
-
+
Written 1994 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency. This software may be used and
distributed according to the terms of the GNU General Public License,
incorporated herein by reference.
Donald Becker may be reached at becker@scyld.com
-
+
Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
======================================================================*/
@@ -69,31 +69,54 @@
/* The top five bits written to EL3_CMD are a command, the lower
11 bits are the parameter, if applicable. */
enum c509cmd {
- TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
- RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
- TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
- FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
- SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
- SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
- StatsDisable = 22<<11, StopCoax = 23<<11,
+ TotalReset = 0<<11,
+ SelectWindow = 1<<11,
+ StartCoax = 2<<11,
+ RxDisable = 3<<11,
+ RxEnable = 4<<11,
+ RxReset = 5<<11,
+ RxDiscard = 8<<11,
+ TxEnable = 9<<11,
+ TxDisable = 10<<11,
+ TxReset = 11<<11,
+ FakeIntr = 12<<11,
+ AckIntr = 13<<11,
+ SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11,
+ SetRxFilter = 16<<11,
+ SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11,
+ SetTxStart = 19<<11,
+ StatsEnable = 21<<11,
+ StatsDisable = 22<<11,
+ StopCoax = 23<<11
};
enum c509status {
- IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
- TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
- IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000
+ IntLatch = 0x0001,
+ AdapterFailure = 0x0002,
+ TxComplete = 0x0004,
+ TxAvailable = 0x0008,
+ RxComplete = 0x0010,
+ RxEarly = 0x0020,
+ IntReq = 0x0040,
+ StatsFull = 0x0080,
+ CmdBusy = 0x1000
};
/* The SetRxFilter command accepts the following classes: */
enum RxFilter {
- RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+ RxStation = 1,
+ RxMulticast = 2,
+ RxBroadcast = 4,
+ RxProm = 8
};
/* Register window 1 offsets, the window used in normal operation. */
#define TX_FIFO 0x00
#define RX_FIFO 0x00
-#define RX_STATUS 0x08
-#define TX_STATUS 0x0B
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
@@ -106,12 +129,12 @@ enum RxFilter {
struct el3_private {
struct pcmcia_device *p_dev;
- /* For transceiver monitoring */
- struct timer_list media;
- u16 media_status;
- u16 fast_poll;
- unsigned long last_irq;
- spinlock_t lock;
+ /* For transceiver monitoring */
+ struct timer_list media;
+ u16 media_status;
+ u16 fast_poll;
+ unsigned long last_irq;
+ spinlock_t lock;
};
static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
@@ -163,15 +186,15 @@ static void tc589_detach(struct pcmcia_device *p_dev);
======================================================================*/
static const struct net_device_ops el3_netdev_ops = {
- .ndo_open = el3_open,
- .ndo_stop = el3_close,
+ .ndo_open = el3_open,
+ .ndo_stop = el3_close,
.ndo_start_xmit = el3_start_xmit,
- .ndo_tx_timeout = el3_tx_timeout,
+ .ndo_tx_timeout = el3_tx_timeout,
.ndo_set_config = el3_config,
.ndo_get_stats = el3_get_stats,
.ndo_set_multicast_list = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -233,7 +256,7 @@ static void tc589_detach(struct pcmcia_device *link)
tc589_config() is scheduled to run after a CARD_INSERTION event
is received, to configure the PCMCIA socket, and to make the
ethernet device available to the system.
-
+
======================================================================*/
static int tc589_config(struct pcmcia_device *link)
@@ -245,7 +268,7 @@ static int tc589_config(struct pcmcia_device *link)
char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
u8 *buf;
size_t len;
-
+
dev_dbg(&link->dev, "3c589_config\n");
phys_addr = (__be16 *)dev->dev_addr;
@@ -274,7 +297,7 @@ static int tc589_config(struct pcmcia_device *link)
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
-
+
dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
ioaddr = dev->base_addr;
@@ -308,7 +331,7 @@ static int tc589_config(struct pcmcia_device *link)
dev->if_port = if_port;
else
printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
-
+
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
@@ -316,13 +339,12 @@ static int tc589_config(struct pcmcia_device *link)
goto failed;
}
- printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, "
- "hw_addr %pM\n",
- dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq,
- dev->dev_addr);
- printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
- (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
- if_names[dev->if_port]);
+ netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
+ (multi ? "562" : "589"), dev->base_addr, dev->irq,
+ dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+ if_names[dev->if_port]);
return 0;
failed:
@@ -335,7 +357,7 @@ failed:
After a card is removed, tc589_release() will unregister the net
device, and release the PCMCIA configuration. If the device is
still open, this will be postponed until it is closed.
-
+
======================================================================*/
static void tc589_release(struct pcmcia_device *link)
@@ -357,7 +379,7 @@ static int tc589_resume(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- if (link->open) {
+ if (link->open) {
tc589_reset(dev);
netif_device_attach(dev);
}
@@ -377,8 +399,7 @@ static void tc589_wait_for_completion(struct net_device *dev, int cmd)
while (--i > 0)
if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
if (i == 0)
- printk(KERN_WARNING "%s: command 0x%04x did not complete!\n",
- dev->name, cmd);
+ netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
}
/*
@@ -404,7 +425,7 @@ static void tc589_set_xcvr(struct net_device *dev, int if_port)
{
struct el3_private *lp = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
-
+
EL3WINDOW(0);
switch (if_port) {
case 0: case 1: outw(0, ioaddr + 6); break;
@@ -427,14 +448,13 @@ static void dump_status(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
EL3WINDOW(1);
- printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
- "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS),
- inw(ioaddr+RX_STATUS), inb(ioaddr+TX_STATUS),
- inw(ioaddr+TX_FREE));
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
+ inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
EL3WINDOW(4);
- printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
- " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
- inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
+ inw(ioaddr+0x0a));
EL3WINDOW(1);
}
@@ -443,18 +463,18 @@ static void tc589_reset(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
int i;
-
+
EL3WINDOW(0);
- outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x0001, ioaddr + 4); /* Activate board. */
outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
-
+
/* Set the station address in window 2. */
EL3WINDOW(2);
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + i);
tc589_set_xcvr(dev, dev->if_port);
-
+
/* Switch to the stats window, and clear all stats by reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
EL3WINDOW(6);
@@ -462,7 +482,7 @@ static void tc589_reset(struct net_device *dev)
inb(ioaddr+i);
inw(ioaddr + 10);
inw(ioaddr + 12);
-
+
/* Switch to register set 1 for normal use. */
EL3WINDOW(1);
@@ -496,8 +516,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 3) {
dev->if_port = map->port;
- printk(KERN_INFO "%s: switched to %s port\n",
- dev->name, if_names[dev->if_port]);
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
tc589_set_xcvr(dev, dev->if_port);
} else
return -EINVAL;
@@ -509,13 +528,13 @@ static int el3_open(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
-
+
if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
netif_start_queue(dev);
-
+
tc589_reset(dev);
init_timer(&lp->media);
lp->media.function = &media_check;
@@ -525,18 +544,18 @@ static int el3_open(struct net_device *dev)
dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
dev->name, inw(dev->base_addr + EL3_STATUS));
-
+
return 0;
}
static void el3_tx_timeout(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
-
- printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name);
+
+ netdev_warn(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc589_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -547,19 +566,18 @@ static void pop_tx_status(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
int i;
-
+
/* Clear the Tx status stack. */
for (i = 32; i > 0; i--) {
u_char tx_status = inb(ioaddr + TX_STATUS);
if (!(tx_status & 0x84)) break;
/* reset transmitter on jabber error or underrun */
if (tx_status & 0x30)
- tc589_wait_for_completion(dev, TxReset);
+ tc589_wait_for_completion(dev, TxReset);
if (tx_status & 0x38) {
- pr_debug("%s: transmit error: status 0x%02x\n",
- dev->name, tx_status);
- outw(TxEnable, ioaddr + EL3_CMD);
- dev->stats.tx_aborted_errors++;
+ netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_aborted_errors++;
}
outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
@@ -572,11 +590,10 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
struct el3_private *priv = netdev_priv(dev);
unsigned long flags;
- pr_debug("%s: el3_start_xmit(length = %ld) called, "
- "status %4.4x.\n", dev->name, (long)skb->len,
- inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ (long)skb->len, inw(ioaddr + EL3_STATUS));
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
dev->stats.tx_bytes += skb->len;
@@ -586,7 +603,6 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- dev->trans_start = jiffies;
if (inw(ioaddr + TX_FREE) <= 1536) {
netif_stop_queue(dev);
/* Interrupt us when the FIFO has room for max-sized packet. */
@@ -594,9 +610,9 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
}
pop_tx_status(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
dev_kfree_skb(skb);
-
+
return NETDEV_TX_OK;
}
@@ -608,37 +624,32 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
unsigned int ioaddr;
__u16 status;
int i = 0, handled = 1;
-
+
if (!netif_device_present(dev))
return IRQ_NONE;
ioaddr = dev->base_addr;
- pr_debug("%s: interrupt, status %4.4x.\n",
- dev->name, inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
- spin_lock(&lp->lock);
+ spin_lock(&lp->lock);
while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
if ((status & 0xe000) != 0x2000) {
- pr_debug("%s: interrupt from dead card\n", dev->name);
- handled = 0;
- break;
+ netdev_dbg(dev, "interrupt from dead card\n");
+ handled = 0;
+ break;
}
-
if (status & RxComplete)
- el3_rx(dev);
-
+ el3_rx(dev);
if (status & TxAvailable) {
- pr_debug(" TX room bit was handled.\n");
- /* There's room in the FIFO for a full-sized packet. */
- outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
+ netdev_dbg(dev, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
}
-
if (status & TxComplete)
- pop_tx_status(dev);
-
+ pop_tx_status(dev);
if (status & (AdapterFailure | RxEarly | StatsFull)) {
/* Handle all uncommon interrupts. */
if (status & StatsFull) /* Empty statistics. */
@@ -652,8 +663,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
EL3WINDOW(4);
fifo_diag = inw(ioaddr + 4);
EL3WINDOW(1);
- printk(KERN_WARNING "%s: adapter failure, FIFO diagnostic"
- " register %04x.\n", dev->name, fifo_diag);
+ netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
+ fifo_diag);
if (fifo_diag & 0x0400) {
/* Tx overrun */
tc589_wait_for_completion(dev, TxReset);
@@ -668,22 +679,20 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
}
}
-
if (++i > 10) {
- printk(KERN_ERR "%s: infinite loop in interrupt, "
- "status %4.4x.\n", dev->name, status);
- /* Clear all interrupts */
- outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
- break;
+ netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
+ status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
}
/* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
}
-
lp->last_irq = jiffies;
- spin_unlock(&lp->lock);
- pr_debug("%s: exiting interrupt, status %4.4x.\n",
- dev->name, inw(ioaddr + EL3_STATUS));
+ spin_unlock(&lp->lock);
+ netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS));
return IRQ_RETVAL(handled);
}
@@ -702,7 +711,7 @@ static void media_check(unsigned long arg)
if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
(inb(ioaddr + EL3_TIMER) == 0xff)) {
if (!lp->fast_poll)
- printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_warn(dev, "interrupt(s) dropped!\n");
local_irq_save(flags);
el3_interrupt(dev->irq, dev);
@@ -719,7 +728,7 @@ static void media_check(unsigned long arg)
/* lp->lock guards the EL3 window. Window should always be 1 except
when the lock is held */
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
EL3WINDOW(4);
media = inw(ioaddr+WN4_MEDIA) & 0xc810;
@@ -739,32 +748,30 @@ static void media_check(unsigned long arg)
if (media != lp->media_status) {
if ((media & lp->media_status & 0x8000) &&
((lp->media_status ^ media) & 0x0800))
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (lp->media_status & 0x0800 ? "lost" : "found"));
+ netdev_info(dev, "%s link beat\n",
+ (lp->media_status & 0x0800 ? "lost" : "found"));
else if ((media & lp->media_status & 0x4000) &&
((lp->media_status ^ media) & 0x0010))
- printk(KERN_INFO "%s: coax cable %s\n", dev->name,
- (lp->media_status & 0x0010 ? "ok" : "problem"));
+ netdev_info(dev, "coax cable %s\n",
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
if (dev->if_port == 0) {
if (media & 0x8000) {
if (media & 0x0800)
- printk(KERN_INFO "%s: flipped to 10baseT\n",
- dev->name);
+ netdev_info(dev, "flipped to 10baseT\n");
else
- tc589_set_xcvr(dev, 2);
+ tc589_set_xcvr(dev, 2);
} else if (media & 0x4000) {
if (media & 0x0010)
tc589_set_xcvr(dev, 1);
else
- printk(KERN_INFO "%s: flipped to 10base2\n",
- dev->name);
+ netdev_info(dev, "flipped to 10base2\n");
}
}
lp->media_status = media;
}
-
+
EL3WINDOW(1);
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
reschedule:
lp->media.expires = jiffies + HZ;
@@ -778,7 +785,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
struct pcmcia_device *link = lp->p_dev;
if (pcmcia_dev_present(link)) {
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
update_stats(dev);
spin_unlock_irqrestore(&lp->lock, flags);
}
@@ -790,21 +797,21 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
single-threaded if the device is active. This is expected to be a rare
operation, and it's simpler for the rest of the driver to assume that
window 1 is always valid rather than use a special window-state variable.
-
+
Caller must hold the lock for this
*/
static void update_stats(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
- pr_debug("%s: updating the statistics.\n", dev->name);
+ netdev_dbg(dev, "updating the statistics.\n");
/* Turn off statistics updates while reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
- dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
- /* Multiple collisions. */ inb(ioaddr + 2);
+ /* Multiple collisions. */ inb(ioaddr + 2);
dev->stats.collisions += inb(ioaddr + 3);
dev->stats.tx_window_errors += inb(ioaddr + 4);
dev->stats.rx_fifo_errors += inb(ioaddr + 5);
@@ -813,7 +820,7 @@ static void update_stats(struct net_device *dev)
/* Tx deferrals */ inb(ioaddr + 8);
/* Rx octets */ inw(ioaddr + 10);
/* Tx octets */ inw(ioaddr + 12);
-
+
/* Back to window 1, and turn statistics back on. */
EL3WINDOW(1);
outw(StatsEnable, ioaddr + EL3_CMD);
@@ -824,9 +831,9 @@ static int el3_rx(struct net_device *dev)
unsigned int ioaddr = dev->base_addr;
int worklimit = 32;
short rx_status;
-
- pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
- dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+
+ netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
worklimit > 0) {
worklimit--;
@@ -844,11 +851,11 @@ static int el3_rx(struct net_device *dev)
} else {
short pkt_len = rx_status & 0x7ff;
struct sk_buff *skb;
-
+
skb = dev_alloc_skb(pkt_len+5);
-
- pr_debug(" Receiving packet size %d status %4.4x.\n",
- pkt_len, rx_status);
+
+ netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
if (skb != NULL) {
skb_reserve(skb, 2);
insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
@@ -858,8 +865,8 @@ static int el3_rx(struct net_device *dev)
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
} else {
- pr_debug("%s: couldn't allocate a sk_buff of"
- " size %d.\n", dev->name, pkt_len);
+ netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
+ pkt_len);
dev->stats.rx_dropped++;
}
}
@@ -867,7 +874,7 @@ static int el3_rx(struct net_device *dev)
tc589_wait_for_completion(dev, RxDiscard);
}
if (worklimit == 0)
- printk(KERN_WARNING "%s: too much work in el3_rx!\n", dev->name);
+ netdev_warn(dev, "too much work in el3_rx!\n");
return 0;
}
@@ -898,17 +905,17 @@ static int el3_close(struct net_device *dev)
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
unsigned int ioaddr = dev->base_addr;
-
+
dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
if (pcmcia_dev_present(link)) {
/* Turn off statistics ASAP. We update dev->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
-
+
/* Disable the receiver and transmitter. */
outw(RxDisable, ioaddr + EL3_CMD);
outw(TxDisable, ioaddr + EL3_CMD);
-
+
if (dev->if_port == 2)
/* Turn off thinnet power. Green! */
outw(StopCoax, ioaddr + EL3_CMD);
@@ -917,12 +924,12 @@ static int el3_close(struct net_device *dev)
EL3WINDOW(4);
outw(0, ioaddr + WN4_MEDIA);
}
-
+
/* Switching back to window 0 disables the IRQ. */
EL3WINDOW(0);
/* But we explicitly zero the IRQ line select anyway. */
outw(0x0f00, ioaddr + WN0_IRQ);
-
+
/* Check if the card still exists */
if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
update_stats(dev);
@@ -931,7 +938,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
del_timer_sync(&lp->media);
-
+
return 0;
}
@@ -953,7 +960,7 @@ static struct pcmcia_driver tc589_driver = {
},
.probe = tc589_probe,
.remove = tc589_detach,
- .id_table = tc589_ids,
+ .id_table = tc589_ids,
.suspend = tc589_suspend,
.resume = tc589_resume,
};
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 59f6fa3..5b3dfb4 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -994,7 +994,7 @@ static void axnet_tx_timeout(struct net_device *dev)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
dev->stats.tx_errors++;
@@ -1499,8 +1499,6 @@ static void ei_receive(struct net_device *dev)
ei_local->current_page = next_frame;
outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
}
-
- return;
}
/**
@@ -1611,11 +1609,11 @@ static struct net_device_stats *get_stats(struct net_device *dev)
static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 crc;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 6580d78..7c27c50 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -878,7 +878,6 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
lp->sent = lp->tx_queue ;
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_start_queue(dev);
} else {
@@ -1070,8 +1069,6 @@ static void fjn_rx(struct net_device *dev)
"%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
}
*/
-
- return;
} /* fjn_rx */
/*====================================================================*/
@@ -1184,11 +1181,11 @@ static void set_rx_mode(struct net_device *dev)
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit >> 3] |= (1 << (bit & 7));
}
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 2e42d80..67ee985 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -394,8 +394,6 @@ static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase)
/* 0x40 will release the card for use */
outb(0x40, dev->base_addr);
-
- return;
}
static struct pcmcia_device_id ibmtr_ids[] = {
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index d8a3b3c..9b63dec 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -895,7 +895,7 @@ static void mace_tx_timeout(struct net_device *dev)
#else /* #if RESET_ON_TIMEOUT */
printk("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -937,8 +937,6 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
}
- dev->trans_start = jiffies;
-
#if MULTI_TX
if (lp->tx_free_frames > 0)
netif_start_queue(dev);
@@ -1307,8 +1305,6 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
/* lp->linux_stats.tx_window_errors; */
-
- return;
} /* update_stats */
/* ----------------------------------------------------------------------------
@@ -1467,7 +1463,7 @@ static void set_multicast_list(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
#ifdef PCMCIA_DEBUG
{
@@ -1487,8 +1483,8 @@ static void set_multicast_list(struct net_device *dev)
if (num_addrs > 0) {
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(adr, ha->addr, ETHER_ADDR_LEN);
BuildLAF(lp->multicast_ladrf, adr);
}
}
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 59796e7..7b6fe89 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1228,7 +1228,6 @@ static void smc_hardware_send_packet(struct net_device * dev)
dev_kfree_skb_irq(skb);
dev->trans_start = jiffies;
netif_start_queue(dev);
- return;
}
/*====================================================================*/
@@ -1243,7 +1242,7 @@ static void smc_tx_timeout(struct net_device *dev)
dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
dev->stats.tx_errors++;
smc_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
smc->saved_skb = NULL;
netif_wake_queue(dev);
}
@@ -1358,7 +1357,6 @@ static void smc_tx_err(struct net_device * dev)
smc->packets_waiting--;
outw(saved_packet, ioaddr + PNR_ARR);
- return;
}
/*====================================================================*/
@@ -1578,8 +1576,6 @@ static void smc_rx(struct net_device *dev)
}
/* Let the MMU free the memory of this packet. */
outw(MC_RELEASE, ioaddr + MMU_CMD);
-
- return;
}
/*======================================================================
@@ -1610,10 +1606,10 @@ static void set_rx_mode(struct net_device *dev)
rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
else {
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mc_addr;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_addr, dev) {
- u_int position = ether_crc(6, mc_addr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u_int position = ether_crc(6, ha->addr);
multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
}
}
@@ -1629,8 +1625,6 @@ static void set_rx_mode(struct net_device *dev)
outw(rx_cfg_setting, ioaddr + RCR);
SMC_SELECT_BANK(2);
spin_unlock_irqrestore(&smc->lock, flags);
-
- return;
}
/*======================================================================
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 5e6b62b..b6c36448 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1265,7 +1265,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
struct net_device *dev = local->dev;
/* reset the card */
do_reset(dev,1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1328,7 +1328,6 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
PutByte(XIRCREG_CR, TransmitPacket|EnableIntr);
dev_kfree_skb (skb);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += pktlen;
netif_start_queue(dev);
return NETDEV_TX_OK;
@@ -1368,7 +1367,7 @@ static void set_addresses(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
local_info_t *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct set_address_info sa_info;
int i;
@@ -1383,10 +1382,10 @@ static void set_addresses(struct net_device *dev)
set_address(&sa_info, dev->dev_addr);
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i++ == 9)
break;
- set_address(&sa_info, dmi->dmi_addr);
+ set_address(&sa_info, ha->addr);
}
while (i++ < 9)
set_address(&sa_info, dev->dev_addr);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 084d78d..c200c282 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
napi_disable(&lp->napi);
netif_tx_disable(dev);
}
@@ -647,7 +647,6 @@ free_new_rx_ring:
(1 << size),
new_rx_ring,
new_ring_dma_addr);
- return;
}
static void pcnet32_purge_rx_ring(struct net_device *dev)
@@ -1215,7 +1214,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
dev->stats.rx_packets++;
- return;
}
static int pcnet32_rx(struct net_device *dev, int budget)
@@ -2398,7 +2396,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
}
pcnet32_restart(dev, CSR0_NORMAL);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2449,8 +2447,6 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
/* Trigger an immediate send poll. */
lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
- dev->trans_start = jiffies;
-
if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
lp->tx_full = 1;
netif_stop_queue(dev);
@@ -2590,7 +2586,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
volatile struct pcnet32_init_block *ib = lp->init_block;
volatile __le16 *mcast_table = (__le16 *)ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned long ioaddr = dev->base_addr;
char *addrs;
int i;
@@ -2611,8 +2607,8 @@ static void pcnet32_load_multicast(struct net_device *dev)
ib->filter[1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -2625,7 +2621,6 @@ static void pcnet32_load_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
le16_to_cpu(mcast_table[i]));
- return;
}
/*
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 4fed95e..c128156 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -130,3 +130,11 @@ static void __exit bcm63xx_phy_exit(void)
module_init(bcm63xx_phy_init);
module_exit(bcm63xx_phy_exit);
+
+static struct mdio_device_id bcm63xx_tbl[] = {
+ { 0x00406000, 0xfffffc00 },
+ { 0x002bdc00, 0xfffffc00 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, bcm63xx_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f482fc4..cecdbbd 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -908,3 +908,19 @@ static void __exit broadcom_exit(void)
module_init(broadcom_init);
module_exit(broadcom_exit);
+
+static struct mdio_device_id broadcom_tbl[] = {
+ { 0x00206070, 0xfffffff0 },
+ { 0x002060e0, 0xfffffff0 },
+ { 0x002060c0, 0xfffffff0 },
+ { 0x002060b0, 0xfffffff0 },
+ { 0x0143bca0, 0xfffffff0 },
+ { 0x0143bcb0, 0xfffffff0 },
+ { PHY_ID_BCM50610, 0xfffffff0 },
+ { PHY_ID_BCM50610M, 0xfffffff0 },
+ { PHY_ID_BCM57780, 0xfffffff0 },
+ { PHY_ID_BCMAC131, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, broadcom_tbl);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 92282b3..1a325d6 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -158,3 +158,11 @@ static void __exit cicada_exit(void)
module_init(cicada_init);
module_exit(cicada_exit);
+
+static struct mdio_device_id cicada_tbl[] = {
+ { 0x000fc410, 0x000ffff0 },
+ { 0x000fc440, 0x000fffc0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, cicada_tbl);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index c722e95..29c1761 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -218,3 +218,12 @@ static void __exit davicom_exit(void)
module_init(davicom_init);
module_exit(davicom_exit);
+
+static struct mdio_device_id davicom_tbl[] = {
+ { 0x0181b880, 0x0ffffff0 },
+ { 0x0181b8a0, 0x0ffffff0 },
+ { 0x00181b80, 0x0ffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, davicom_tbl);
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 7712ebe..13995f5 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -110,3 +110,10 @@ static void __exit et1011c_exit(void)
module_init(et1011c_init);
module_exit(et1011c_exit);
+
+static struct mdio_device_id et1011c_tbl[] = {
+ { 0x0282f014, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, et1011c_tbl);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 904208b..439adaf 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -131,3 +131,10 @@ static void __exit ip175c_exit(void)
module_init(ip175c_init);
module_exit(ip175c_exit);
+
+static struct mdio_device_id icplus_tbl[] = {
+ { 0x02430d80, 0x0ffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, icplus_tbl);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 057ecaa..8ee929b 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -173,3 +173,11 @@ static void __exit lxt_exit(void)
module_init(lxt_init);
module_exit(lxt_exit);
+
+static struct mdio_device_id lxt_tbl[] = {
+ { 0x78100000, 0xfffffff0 },
+ { 0x001378e0, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, lxt_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 64c7fbe..78b74e8 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -648,3 +648,16 @@ static void __exit marvell_exit(void)
module_init(marvell_init);
module_exit(marvell_exit);
+
+static struct mdio_device_id marvell_tbl[] = {
+ { 0x01410c60, 0xfffffff0 },
+ { 0x01410c90, 0xfffffff0 },
+ { 0x01410cc0, 0xfffffff0 },
+ { 0x01410e10, 0xfffffff0 },
+ { 0x01410cb0, 0xfffffff0 },
+ { 0x01410cd0, 0xfffffff0 },
+ { 0x01410e30, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, marvell_tbl);
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 19e70d7..6539189 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -22,8 +22,13 @@
#include <linux/types.h>
#include <linux/delay.h>
-#define MDIO_READ 1
-#define MDIO_WRITE 0
+#define MDIO_READ 2
+#define MDIO_WRITE 1
+
+#define MDIO_C45 (1<<15)
+#define MDIO_C45_ADDR (MDIO_C45 | 0)
+#define MDIO_C45_READ (MDIO_C45 | 3)
+#define MDIO_C45_WRITE (MDIO_C45 | 1)
#define MDIO_SETUP_TIME 10
#define MDIO_HOLD_TIME 10
@@ -89,7 +94,7 @@ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits)
/* Utility to send the preamble, address, and
* register (common to read and write).
*/
-static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg)
+static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg)
{
const struct mdiobb_ops *ops = ctrl->ops;
int i;
@@ -108,23 +113,56 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg)
for (i = 0; i < 32; i++)
mdiobb_send_bit(ctrl, 1);
- /* send the start bit (01) and the read opcode (10) or write (10) */
+ /* send the start bit (01) and the read opcode (10) or write (10).
+ Clause 45 operation uses 00 for the start and 11, 10 for
+ read/write */
mdiobb_send_bit(ctrl, 0);
- mdiobb_send_bit(ctrl, 1);
- mdiobb_send_bit(ctrl, read);
- mdiobb_send_bit(ctrl, !read);
+ if (op & MDIO_C45)
+ mdiobb_send_bit(ctrl, 0);
+ else
+ mdiobb_send_bit(ctrl, 1);
+ mdiobb_send_bit(ctrl, (op >> 1) & 1);
+ mdiobb_send_bit(ctrl, (op >> 0) & 1);
mdiobb_send_num(ctrl, phy, 5);
mdiobb_send_num(ctrl, reg, 5);
}
+/* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the
+ lower 16 bits of the 21 bit address. This transfer is done identically to a
+ MDIO_WRITE except for a different code. To enable clause 45 mode or
+ MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices
+ can exist on the same bus. Normal devices should ignore the MDIO_ADDR
+ phase. */
+static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
+{
+ unsigned int dev_addr = (addr >> 16) & 0x1F;
+ unsigned int reg = addr & 0xFFFF;
+ mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr);
+
+ /* send the turnaround (10) */
+ mdiobb_send_bit(ctrl, 1);
+ mdiobb_send_bit(ctrl, 0);
+
+ mdiobb_send_num(ctrl, reg, 16);
+
+ ctrl->ops->set_mdio_dir(ctrl, 0);
+ mdiobb_get_bit(ctrl);
+
+ return dev_addr;
+}
static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
- mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
+ if (reg & MII_ADDR_C45) {
+ reg = mdiobb_cmd_addr(ctrl, phy, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
+ } else
+ mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
+
ctrl->ops->set_mdio_dir(ctrl, 0);
/* check the turnaround bit: the PHY should be driving it to zero */
@@ -147,7 +185,11 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mdiobb_ctrl *ctrl = bus->priv;
- mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
+ if (reg & MII_ADDR_C45) {
+ reg = mdiobb_cmd_addr(ctrl, phy, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
+ } else
+ mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
/* send the turnaround (10) */
mdiobb_send_bit(ctrl, 1);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index e17b702..6a6b819 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mdiobus_scan);
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
-int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum)
+int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
@@ -233,7 +233,7 @@ EXPORT_SYMBOL(mdiobus_read);
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
-int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val)
+int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e67691d..0692f75 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -103,3 +103,12 @@ module_exit(ksphy_exit);
MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
+
+static struct mdio_device_id micrel_tbl[] = {
+ { PHY_ID_KSZ9021, 0x000fff10 },
+ { PHY_ID_VSC8201, 0x00fffff0 },
+ { PHY_ID_KS8001, 0x00fffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, micrel_tbl);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 6c636eb..a73ba0b 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -97,7 +97,6 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
phy_write(phydev, NS_EXP_MEM_DATA, 0x0008);
phy_write(phydev, MII_BMCR, (bmcr & ~BMCR_PDOWN));
phy_write(phydev, LED_CTRL_REG, mode);
- return;
}
static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
@@ -110,8 +109,6 @@ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n",
(ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
-
- return;
}
static int ns_config_init(struct phy_device *phydev)
@@ -153,3 +150,10 @@ MODULE_LICENSE("GPL");
module_init(ns_init);
module_exit(ns_exit);
+
+static struct mdio_device_id ns_tbl[] = {
+ { DP83865_PHY_ID, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, ns_tbl);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index db17945..1a99bb2 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -149,6 +149,7 @@ EXPORT_SYMBOL(phy_scan_fixups);
struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
{
struct phy_device *dev;
+
/* We allocate the device, and initialize the
* default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -179,6 +180,17 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
+ /* Request the appropriate module unconditionally; don't
+ bother trying to do so only if it isn't already loaded,
+ because that gets complicated. A hotplug event would have
+ done an unconditional modprobe anyway.
+ We don't do normal hotplug because it won't work for MDIO
+ -- because it relies on the device staying around for long
+ enough for the driver to get loaded. With MDIO, the NIC
+ driver will get bored and give up as soon as it finds that
+ there's no driver _already_ loaded. */
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
+
return dev;
}
EXPORT_SYMBOL(phy_device_create);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index f6e190f..6736b23 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -137,3 +137,10 @@ static void __exit qs6612_exit(void)
module_init(qs6612_init);
module_exit(qs6612_exit);
+
+static struct mdio_device_id qs6612_tbl[] = {
+ { 0x00181440, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qs6612_tbl);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a052a67..f567c0e 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -78,3 +78,10 @@ static void __exit realtek_exit(void)
module_init(realtek_init);
module_exit(realtek_exit);
+
+static struct mdio_device_id realtek_tbl[] = {
+ { 0x001cc912, 0x001fffff },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, realtek_tbl);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index ed2644a..78fa988 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -253,3 +253,14 @@ MODULE_LICENSE("GPL");
module_init(smsc_init);
module_exit(smsc_exit);
+
+static struct mdio_device_id smsc_tbl[] = {
+ { 0x0007c0a0, 0xfffffff0 },
+ { 0x0007c0b0, 0xfffffff0 },
+ { 0x0007c0c0, 0xfffffff0 },
+ { 0x0007c0d0, 0xfffffff0 },
+ { 0x0007c0f0, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, smsc_tbl);
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 6bdb0d5..7229009 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -132,6 +132,14 @@ static void __exit ste10Xp_exit(void)
module_init(ste10Xp_init);
module_exit(ste10Xp_exit);
+static struct mdio_device_id ste10Xp_tbl[] = {
+ { STE101P_PHY_ID, 0xfffffff0 },
+ { STE100P_PHY_ID, 0xffffffff },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl);
+
MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index dd3b244..45cce50 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -191,3 +191,11 @@ static void __exit vsc82xx_exit(void)
module_init(vsc82xx_init);
module_exit(vsc82xx_exit);
+
+static struct mdio_device_id vitesse_tbl[] = {
+ { PHY_ID_VSC8244, 0x000fffc0 },
+ { PHY_ID_VSC8221, 0x000ffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, vitesse_tbl);
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 9a2103a..ec0349e 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -979,7 +979,6 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: send request\n", dev->name);
spin_lock_irq(&nl->lock);
- dev->trans_start = jiffies;
snd->skb = skb;
snd->length.h = skb->len;
snd->state = PLIP_PK_TRIGGER;
@@ -1192,8 +1191,6 @@ plip_wakeup(void *handle)
/* Clear the data port. */
write_data (dev, 0x00);
}
-
- return;
}
static int
@@ -1309,7 +1306,6 @@ err_parport_unregister:
parport_unregister_device(nl->pardev);
err_free_dev:
free_netdev(dev);
- return;
}
/* plip_detach() is called (by the parport code) when a port is
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 8518a2e..5441688 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2174,6 +2174,24 @@ int ppp_unit_number(struct ppp_channel *chan)
}
/*
+ * Return the PPP device interface name of a channel.
+ */
+char *ppp_dev_name(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ char *name = NULL;
+
+ if (pch) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp && pch->ppp->dev)
+ name = pch->ppp->dev->name;
+ read_unlock_bh(&pch->upl);
+ }
+ return name;
+}
+
+
+/*
* Disconnect a channel from the generic layer.
* This must be called in process context.
*/
@@ -2901,6 +2919,7 @@ EXPORT_SYMBOL(ppp_register_channel);
EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
+EXPORT_SYMBOL(ppp_dev_name);
EXPORT_SYMBOL(ppp_input);
EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index cdd11ba..b1b93ff 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -89,7 +89,6 @@
#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
-static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
@@ -258,7 +257,7 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
if (dev) {
ifindex = dev->ifindex;
- pn = net_generic(net, pppoe_net_id);
+ pn = pppoe_pernet(net);
pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
sp->sa_addr.pppoe.remote, ifindex);
}
@@ -290,12 +289,6 @@ static void pppoe_flush_dev(struct net_device *dev)
struct pppoe_net *pn;
int i;
- BUG_ON(dev == NULL);
-
- pn = pppoe_pernet(dev_net(dev));
- if (!pn) /* already freed */
- return;
-
write_lock_bh(&pn->hash_lock);
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
struct pppox_sock *po = pn->hash_table[i];
@@ -368,7 +361,7 @@ static int pppoe_device_event(struct notifier_block *this,
default:
break;
- };
+ }
return NOTIFY_DONE;
}
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
deleted file mode 100644
index 449a982..0000000
--- a/drivers/net/pppol2tp.c
+++ /dev/null
@@ -1,2680 +0,0 @@
-/*****************************************************************************
- * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
- *
- * PPPoX --- Generic PPP encapsulation socket family
- * PPPoL2TP --- PPP over L2TP (RFC 2661)
- *
- * Version: 1.0.0
- *
- * Authors: Martijn van Oosterhout <kleptog@svana.org>
- * James Chapman (jchapman@katalix.com)
- * Contributors:
- * Michal Ostrowski <mostrows@speakeasy.net>
- * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
- * David S. Miller (davem@redhat.com)
- *
- * License:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-/* This driver handles only L2TP data frames; control frames are handled by a
- * userspace application.
- *
- * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
- * attaches it to a bound UDP socket with local tunnel_id / session_id and
- * peer tunnel_id / session_id set. Data can then be sent or received using
- * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
- * can be read or modified using ioctl() or [gs]etsockopt() calls.
- *
- * When a PPPoL2TP socket is connected with local and peer session_id values
- * zero, the socket is treated as a special tunnel management socket.
- *
- * Here's example userspace code to create a socket for sending/receiving data
- * over an L2TP session:-
- *
- * struct sockaddr_pppol2tp sax;
- * int fd;
- * int session_fd;
- *
- * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
- *
- * sax.sa_family = AF_PPPOX;
- * sax.sa_protocol = PX_PROTO_OL2TP;
- * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
- * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
- * sax.pppol2tp.addr.sin_port = addr->sin_port;
- * sax.pppol2tp.addr.sin_family = AF_INET;
- * sax.pppol2tp.s_tunnel = tunnel_id;
- * sax.pppol2tp.s_session = session_id;
- * sax.pppol2tp.d_tunnel = peer_tunnel_id;
- * sax.pppol2tp.d_session = peer_session_id;
- *
- * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
- *
- * A pppd plugin that allows PPP traffic to be carried over L2TP using
- * this driver is available from the OpenL2TP project at
- * http://openl2tp.sourceforge.net.
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <asm/uaccess.h>
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/jiffies.h>
-
-#include <linux/netdevice.h>
-#include <linux/net.h>
-#include <linux/inetdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/if_pppox.h>
-#include <linux/if_pppol2tp.h>
-#include <net/sock.h>
-#include <linux/ppp_channel.h>
-#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
-#include <linux/file.h>
-#include <linux/hash.h>
-#include <linux/sort.h>
-#include <linux/proc_fs.h>
-#include <linux/nsproxy.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-#include <net/dst.h>
-#include <net/ip.h>
-#include <net/udp.h>
-#include <net/xfrm.h>
-
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-
-
-#define PPPOL2TP_DRV_VERSION "V1.0"
-
-/* L2TP header constants */
-#define L2TP_HDRFLAG_T 0x8000
-#define L2TP_HDRFLAG_L 0x4000
-#define L2TP_HDRFLAG_S 0x0800
-#define L2TP_HDRFLAG_O 0x0200
-#define L2TP_HDRFLAG_P 0x0100
-
-#define L2TP_HDR_VER_MASK 0x000F
-#define L2TP_HDR_VER 0x0002
-
-/* Space for UDP, L2TP and PPP headers */
-#define PPPOL2TP_HEADER_OVERHEAD 40
-
-/* Just some random numbers */
-#define L2TP_TUNNEL_MAGIC 0x42114DDA
-#define L2TP_SESSION_MAGIC 0x0C04EB7D
-
-#define PPPOL2TP_HASH_BITS 4
-#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS)
-
-/* Default trace flags */
-#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0
-
-#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
- do { \
- if ((_mask) & (_type)) \
- printk(_lvl "PPPOL2TP: " _fmt, ##args); \
- } while(0)
-
-/* Number of bytes to build transmit L2TP headers.
- * Unfortunately the size is different depending on whether sequence numbers
- * are enabled.
- */
-#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
-#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
-
-struct pppol2tp_tunnel;
-
-/* Describes a session. It is the sk_user_data field in the PPPoL2TP
- * socket. Contains information to determine incoming packets and transmit
- * outgoing ones.
- */
-struct pppol2tp_session
-{
- int magic; /* should be
- * L2TP_SESSION_MAGIC */
- int owner; /* pid that opened the socket */
-
- struct sock *sock; /* Pointer to the session
- * PPPoX socket */
- struct sock *tunnel_sock; /* Pointer to the tunnel UDP
- * socket */
-
- struct pppol2tp_addr tunnel_addr; /* Description of tunnel */
-
- struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel
- * context */
-
- char name[20]; /* "sess xxxxx/yyyyy", where
- * x=tunnel_id, y=session_id */
- int mtu;
- int mru;
- int flags; /* accessed by PPPIOCGFLAGS.
- * Unused. */
- unsigned recv_seq:1; /* expect receive packets with
- * sequence numbers? */
- unsigned send_seq:1; /* send packets with sequence
- * numbers? */
- unsigned lns_mode:1; /* behave as LNS? LAC enables
- * sequence numbers under
- * control of LNS. */
- int debug; /* bitmask of debug message
- * categories */
- int reorder_timeout; /* configured reorder timeout
- * (in jiffies) */
- u16 nr; /* session NR state (receive) */
- u16 ns; /* session NR state (send) */
- struct sk_buff_head reorder_q; /* receive reorder queue */
- struct pppol2tp_ioc_stats stats;
- struct hlist_node hlist; /* Hash list node */
-};
-
-/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
- * all the associated sessions so incoming packets can be sorted out
- */
-struct pppol2tp_tunnel
-{
- int magic; /* Should be L2TP_TUNNEL_MAGIC */
- rwlock_t hlist_lock; /* protect session_hlist */
- struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE];
- /* hashed list of sessions,
- * hashed by id */
- int debug; /* bitmask of debug message
- * categories */
- char name[12]; /* "tunl xxxxx" */
- struct pppol2tp_ioc_stats stats;
-
- void (*old_sk_destruct)(struct sock *);
-
- struct sock *sock; /* Parent socket */
- struct list_head list; /* Keep a list of all open
- * prepared sockets */
- struct net *pppol2tp_net; /* the net we belong to */
-
- atomic_t ref_count;
-};
-
-/* Private data stored for received packets in the skb.
- */
-struct pppol2tp_skb_cb {
- u16 ns;
- u16 nr;
- u16 has_seq;
- u16 length;
- unsigned long expires;
-};
-
-#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
-
-static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
-static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
-
-static atomic_t pppol2tp_tunnel_count;
-static atomic_t pppol2tp_session_count;
-static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
-static const struct proto_ops pppol2tp_ops;
-
-/* per-net private data for this module */
-static int pppol2tp_net_id __read_mostly;
-struct pppol2tp_net {
- struct list_head pppol2tp_tunnel_list;
- rwlock_t pppol2tp_tunnel_list_lock;
-};
-
-static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net)
-{
- BUG_ON(!net);
-
- return net_generic(net, pppol2tp_net_id);
-}
-
-/* Helpers to obtain tunnel/session contexts from sockets.
- */
-static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
-{
- struct pppol2tp_session *session;
-
- if (sk == NULL)
- return NULL;
-
- sock_hold(sk);
- session = (struct pppol2tp_session *)(sk->sk_user_data);
- if (session == NULL) {
- sock_put(sk);
- goto out;
- }
-
- BUG_ON(session->magic != L2TP_SESSION_MAGIC);
-out:
- return session;
-}
-
-static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
-{
- struct pppol2tp_tunnel *tunnel;
-
- if (sk == NULL)
- return NULL;
-
- sock_hold(sk);
- tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
- if (tunnel == NULL) {
- sock_put(sk);
- goto out;
- }
-
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-out:
- return tunnel;
-}
-
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
-{
- atomic_inc(&tunnel->ref_count);
-}
-
-static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
-{
- if (atomic_dec_and_test(&tunnel->ref_count))
- pppol2tp_tunnel_free(tunnel);
-}
-
-/* Session hash list.
- * The session_id SHOULD be random according to RFC2661, but several
- * L2TP implementations (Cisco and Microsoft) use incrementing
- * session_ids. So we do a real hash on the session_id, rather than a
- * simple bitmask.
- */
-static inline struct hlist_head *
-pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
-{
- unsigned long hash_val = (unsigned long) session_id;
- return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
-}
-
-/* Lookup a session by id
- */
-static struct pppol2tp_session *
-pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
-{
- struct hlist_head *session_list =
- pppol2tp_session_id_hash(tunnel, session_id);
- struct pppol2tp_session *session;
- struct hlist_node *walk;
-
- read_lock_bh(&tunnel->hlist_lock);
- hlist_for_each_entry(session, walk, session_list, hlist) {
- if (session->tunnel_addr.s_session == session_id) {
- read_unlock_bh(&tunnel->hlist_lock);
- return session;
- }
- }
- read_unlock_bh(&tunnel->hlist_lock);
-
- return NULL;
-}
-
-/* Lookup a tunnel by id
- */
-static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id)
-{
- struct pppol2tp_tunnel *tunnel;
- struct pppol2tp_net *pn = pppol2tp_pernet(net);
-
- read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) {
- if (tunnel->stats.tunnel_id == tunnel_id) {
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
- return tunnel;
- }
- }
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- return NULL;
-}
-
-/*****************************************************************************
- * Receive data handling
- *****************************************************************************/
-
-/* Queue a skb in order. We come here only if the skb has an L2TP sequence
- * number.
- */
-static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
-{
- struct sk_buff *skbp;
- struct sk_buff *tmp;
- u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
-
- spin_lock_bh(&session->reorder_q.lock);
- skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
- if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
- __skb_queue_before(&session->reorder_q, skbp, skb);
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
- session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
- skb_queue_len(&session->reorder_q));
- session->stats.rx_oos_packets++;
- goto out;
- }
- }
-
- __skb_queue_tail(&session->reorder_q, skb);
-
-out:
- spin_unlock_bh(&session->reorder_q.lock);
-}
-
-/* Dequeue a single skb.
- */
-static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
-{
- struct pppol2tp_tunnel *tunnel = session->tunnel;
- int length = PPPOL2TP_SKB_CB(skb)->length;
- struct sock *session_sock = NULL;
-
- /* We're about to requeue the skb, so return resources
- * to its current owner (a socket receive buffer).
- */
- skb_orphan(skb);
-
- tunnel->stats.rx_packets++;
- tunnel->stats.rx_bytes += length;
- session->stats.rx_packets++;
- session->stats.rx_bytes += length;
-
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- /* Bump our Nr */
- session->nr++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated nr to %hu\n", session->name, session->nr);
- }
-
- /* If the socket is bound, send it in to PPP's input queue. Otherwise
- * queue it on the session socket.
- */
- session_sock = session->sock;
- if (session_sock->sk_state & PPPOX_BOUND) {
- struct pppox_sock *po;
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv %d byte data frame, passing to ppp\n",
- session->name, length);
-
- /* We need to forget all info related to the L2TP packet
- * gathered in the skb as we are going to reuse the same
- * skb for the inner packet.
- * Namely we need to:
- * - reset xfrm (IPSec) information as it applies to
- * the outer L2TP packet and not to the inner one
- * - release the dst to force a route lookup on the inner
- * IP packet since skb->dst currently points to the dst
- * of the UDP tunnel
- * - reset netfilter information as it doesn't apply
- * to the inner packet either
- */
- secpath_reset(skb);
- skb_dst_drop(skb);
- nf_reset(skb);
-
- po = pppox_sk(session_sock);
- ppp_input(&po->chan, skb);
- } else {
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: socket not bound\n", session->name);
-
- /* Not bound. Nothing we can do, so discard. */
- session->stats.rx_errors++;
- kfree_skb(skb);
- }
-
- sock_put(session->sock);
-}
-
-/* Dequeue skbs from the session's reorder_q, subject to packet order.
- * Skbs that have been in the queue for too long are simply discarded.
- */
-static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
-{
- struct sk_buff *skb;
- struct sk_buff *tmp;
-
- /* If the pkt at the head of the queue has the nr that we
- * expect to send up next, dequeue it and any other
- * in-sequence packets behind it.
- */
- spin_lock_bh(&session->reorder_q.lock);
- skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
- if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
- session->stats.rx_seq_discards++;
- session->stats.rx_errors++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %hu len %d discarded (too old), "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- __skb_unlink(skb, &session->reorder_q);
- kfree_skb(skb);
- sock_put(session->sock);
- continue;
- }
-
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: holding oos pkt %hu len %d, "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- goto out;
- }
- }
- __skb_unlink(skb, &session->reorder_q);
-
- /* Process the skb. We release the queue lock while we
- * do so to let other contexts process the queue.
- */
- spin_unlock_bh(&session->reorder_q.lock);
- pppol2tp_recv_dequeue_skb(session, skb);
- spin_lock_bh(&session->reorder_q.lock);
- }
-
-out:
- spin_unlock_bh(&session->reorder_q.lock);
-}
-
-static inline int pppol2tp_verify_udp_checksum(struct sock *sk,
- struct sk_buff *skb)
-{
- struct udphdr *uh = udp_hdr(skb);
- u16 ulen = ntohs(uh->len);
- struct inet_sock *inet;
- __wsum psum;
-
- if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
- return 0;
-
- inet = inet_sk(sk);
- psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
- IPPROTO_UDP, 0);
-
- if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
- !csum_fold(csum_add(psum, skb->csum)))
- return 0;
-
- skb->csum = psum;
-
- return __skb_checksum_complete(skb);
-}
-
-/* Internal receive frame. Do the real work of receiving an L2TP data frame
- * here. The skb is not on a list when we get here.
- * Returns 0 if the packet was a data packet and was successfully passed on.
- * Returns 1 if the packet was not a good data packet and could not be
- * forwarded. All such packets are passed up to userspace to deal with.
- */
-static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
-{
- struct pppol2tp_session *session = NULL;
- struct pppol2tp_tunnel *tunnel;
- unsigned char *ptr, *optr;
- u16 hdrflags;
- u16 tunnel_id, session_id;
- int length;
- int offset;
-
- tunnel = pppol2tp_sock_to_tunnel(sock);
- if (tunnel == NULL)
- goto no_tunnel;
-
- if (tunnel->sock && pppol2tp_verify_udp_checksum(tunnel->sock, skb))
- goto discard_bad_csum;
-
- /* UDP always verifies the packet length. */
- __skb_pull(skb, sizeof(struct udphdr));
-
- /* Short packet? */
- if (!pskb_may_pull(skb, 12)) {
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
- goto error;
- }
-
- /* Point to L2TP header */
- optr = ptr = skb->data;
-
- /* Get L2TP header flags */
- hdrflags = ntohs(*(__be16*)ptr);
-
- /* Trace packet contents, if enabled */
- if (tunnel->debug & PPPOL2TP_MSG_DATA) {
- length = min(16u, skb->len);
- if (!pskb_may_pull(skb, length))
- goto error;
-
- printk(KERN_DEBUG "%s: recv: ", tunnel->name);
-
- offset = 0;
- do {
- printk(" %02X", ptr[offset]);
- } while (++offset < length);
-
- printk("\n");
- }
-
- /* Get length of L2TP packet */
- length = skb->len;
-
- /* If type is control packet, it is handled by userspace. */
- if (hdrflags & L2TP_HDRFLAG_T) {
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv control packet, len=%d\n", tunnel->name, length);
- goto error;
- }
-
- /* Skip flags */
- ptr += 2;
-
- /* If length is present, skip it */
- if (hdrflags & L2TP_HDRFLAG_L)
- ptr += 2;
-
- /* Extract tunnel and session ID */
- tunnel_id = ntohs(*(__be16 *) ptr);
- ptr += 2;
- session_id = ntohs(*(__be16 *) ptr);
- ptr += 2;
-
- /* Find the session context */
- session = pppol2tp_session_find(tunnel, session_id);
- if (!session) {
- /* Not found? Pass to userspace to deal with */
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: no socket found (%hu/%hu). Passing up.\n",
- tunnel->name, tunnel_id, session_id);
- goto error;
- }
- sock_hold(session->sock);
-
- /* The ref count on the socket was increased by the above call since
- * we now hold a pointer to the session. Take care to do sock_put()
- * when exiting this function from now on...
- */
-
- /* Handle the optional sequence numbers. If we are the LAC,
- * enable/disable sequence numbers under the control of the LNS. If
- * no sequence numbers present but we were expecting them, discard
- * frame.
- */
- if (hdrflags & L2TP_HDRFLAG_S) {
- u16 ns, nr;
- ns = ntohs(*(__be16 *) ptr);
- ptr += 2;
- nr = ntohs(*(__be16 *) ptr);
- ptr += 2;
-
- /* Received a packet with sequence numbers. If we're the LNS,
- * check if we sre sending sequence numbers and if not,
- * configure it so.
- */
- if ((!session->lns_mode) && (!session->send_seq)) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to enable seq numbers by LNS\n",
- session->name);
- session->send_seq = -1;
- }
-
- /* Store L2TP info in the skb */
- PPPOL2TP_SKB_CB(skb)->ns = ns;
- PPPOL2TP_SKB_CB(skb)->nr = nr;
- PPPOL2TP_SKB_CB(skb)->has_seq = 1;
-
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
- session->name, ns, nr, session->nr);
- } else {
- /* No sequence numbers.
- * If user has configured mandatory sequence numbers, discard.
- */
- if (session->recv_seq) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
- session->stats.rx_seq_discards++;
- goto discard;
- }
-
- /* If we're the LAC and we're sending sequence numbers, the
- * LNS has requested that we no longer send sequence numbers.
- * If we're the LNS and we're sending sequence numbers, the
- * LAC is broken. Discard the frame.
- */
- if ((!session->lns_mode) && (session->send_seq)) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to disable seq numbers by LNS\n",
- session->name);
- session->send_seq = 0;
- } else if (session->send_seq) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
- session->stats.rx_seq_discards++;
- goto discard;
- }
-
- /* Store L2TP info in the skb */
- PPPOL2TP_SKB_CB(skb)->has_seq = 0;
- }
-
- /* If offset bit set, skip it. */
- if (hdrflags & L2TP_HDRFLAG_O) {
- offset = ntohs(*(__be16 *)ptr);
- ptr += 2 + offset;
- }
-
- offset = ptr - optr;
- if (!pskb_may_pull(skb, offset))
- goto discard;
-
- __skb_pull(skb, offset);
-
- /* Skip PPP header, if present. In testing, Microsoft L2TP clients
- * don't send the PPP header (PPP header compression enabled), but
- * other clients can include the header. So we cope with both cases
- * here. The PPP header is always FF03 when using L2TP.
- *
- * Note that skb->data[] isn't dereferenced from a u16 ptr here since
- * the field may be unaligned.
- */
- if (!pskb_may_pull(skb, 2))
- goto discard;
-
- if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
- skb_pull(skb, 2);
-
- /* Prepare skb for adding to the session's reorder_q. Hold
- * packets for max reorder_timeout or 1 second if not
- * reordering.
- */
- PPPOL2TP_SKB_CB(skb)->length = length;
- PPPOL2TP_SKB_CB(skb)->expires = jiffies +
- (session->reorder_timeout ? session->reorder_timeout : HZ);
-
- /* Add packet to the session's receive queue. Reordering is done here, if
- * enabled. Saved L2TP protocol info is stored in skb->sb[].
- */
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- if (session->reorder_timeout != 0) {
- /* Packet reordering enabled. Add skb to session's
- * reorder queue, in order of ns.
- */
- pppol2tp_recv_queue_skb(session, skb);
- } else {
- /* Packet reordering disabled. Discard out-of-sequence
- * packets
- */
- if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
- session->stats.rx_seq_discards++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %hu len %d discarded, "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- goto discard;
- }
- skb_queue_tail(&session->reorder_q, skb);
- }
- } else {
- /* No sequence numbers. Add the skb to the tail of the
- * reorder queue. This ensures that it will be
- * delivered after all previous sequenced skbs.
- */
- skb_queue_tail(&session->reorder_q, skb);
- }
-
- /* Try to dequeue as many skbs from reorder_q as we can. */
- pppol2tp_recv_dequeue(session);
- sock_put(sock);
-
- return 0;
-
-discard:
- session->stats.rx_errors++;
- kfree_skb(skb);
- sock_put(session->sock);
- sock_put(sock);
-
- return 0;
-
-discard_bad_csum:
- LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
- UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
- tunnel->stats.rx_errors++;
- kfree_skb(skb);
- sock_put(sock);
-
- return 0;
-
-error:
- /* Put UDP header back */
- __skb_push(skb, sizeof(struct udphdr));
- sock_put(sock);
-
-no_tunnel:
- return 1;
-}
-
-/* UDP encapsulation receive handler. See net/ipv4/udp.c.
- * Return codes:
- * 0 : success.
- * <0: error
- * >0: skb should be passed up to userspace as UDP.
- */
-static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
-{
- struct pppol2tp_tunnel *tunnel;
-
- tunnel = pppol2tp_sock_to_tunnel(sk);
- if (tunnel == NULL)
- goto pass_up;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: received %d bytes\n", tunnel->name, skb->len);
-
- if (pppol2tp_recv_core(sk, skb))
- goto pass_up_put;
-
- sock_put(sk);
- return 0;
-
-pass_up_put:
- sock_put(sk);
-pass_up:
- return 1;
-}
-
-/* Receive message. This is the recvmsg for the PPPoL2TP socket.
- */
-static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len,
- int flags)
-{
- int err;
- struct sk_buff *skb;
- struct sock *sk = sock->sk;
-
- err = -EIO;
- if (sk->sk_state & PPPOX_BOUND)
- goto end;
-
- msg->msg_namelen = 0;
-
- err = 0;
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &err);
- if (!skb)
- goto end;
-
- if (len > skb->len)
- len = skb->len;
- else if (len < skb->len)
- msg->msg_flags |= MSG_TRUNC;
-
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
- if (likely(err == 0))
- err = len;
-
- kfree_skb(skb);
-end:
- return err;
-}
-
-/************************************************************************
- * Transmit handling
- ***********************************************************************/
-
-/* Tell how big L2TP headers are for a particular session. This
- * depends on whether sequence numbers are being used.
- */
-static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
-{
- if (session->send_seq)
- return PPPOL2TP_L2TP_HDR_SIZE_SEQ;
-
- return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
-}
-
-/* Build an L2TP header for the session into the buffer provided.
- */
-static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
- void *buf)
-{
- __be16 *bufp = buf;
- u16 flags = L2TP_HDR_VER;
-
- if (session->send_seq)
- flags |= L2TP_HDRFLAG_S;
-
- /* Setup L2TP header.
- * FIXME: Can this ever be unaligned? Is direct dereferencing of
- * 16-bit header fields safe here for all architectures?
- */
- *bufp++ = htons(flags);
- *bufp++ = htons(session->tunnel_addr.d_tunnel);
- *bufp++ = htons(session->tunnel_addr.d_session);
- if (session->send_seq) {
- *bufp++ = htons(session->ns);
- *bufp++ = 0;
- session->ns++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated ns to %hu\n", session->name, session->ns);
- }
-}
-
-/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
- * when a user application does a sendmsg() on the session socket. L2TP and
- * PPP headers must be inserted into the user's data.
- */
-static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
- size_t total_len)
-{
- static const unsigned char ppph[2] = { 0xff, 0x03 };
- struct sock *sk = sock->sk;
- struct inet_sock *inet;
- __wsum csum;
- struct sk_buff *skb;
- int error;
- int hdr_len;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- struct udphdr *uh;
- unsigned int len;
- struct sock *sk_tun;
- u16 udp_len;
-
- error = -ENOTCONN;
- if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
- goto error;
-
- /* Get session and tunnel contexts */
- error = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto error;
-
- sk_tun = session->tunnel_sock;
- tunnel = pppol2tp_sock_to_tunnel(sk_tun);
- if (tunnel == NULL)
- goto error_put_sess;
-
- /* What header length is configured for this session? */
- hdr_len = pppol2tp_l2tp_header_len(session);
-
- /* Allocate a socket buffer */
- error = -ENOMEM;
- skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
- sizeof(struct udphdr) + hdr_len +
- sizeof(ppph) + total_len,
- 0, GFP_KERNEL);
- if (!skb)
- goto error_put_sess_tun;
-
- /* Reserve space for headers. */
- skb_reserve(skb, NET_SKB_PAD);
- skb_reset_network_header(skb);
- skb_reserve(skb, sizeof(struct iphdr));
- skb_reset_transport_header(skb);
-
- /* Build UDP header */
- inet = inet_sk(sk_tun);
- udp_len = hdr_len + sizeof(ppph) + total_len;
- uh = (struct udphdr *) skb->data;
- uh->source = inet->inet_sport;
- uh->dest = inet->inet_dport;
- uh->len = htons(udp_len);
- uh->check = 0;
- skb_put(skb, sizeof(struct udphdr));
-
- /* Build L2TP header */
- pppol2tp_build_l2tp_header(session, skb->data);
- skb_put(skb, hdr_len);
-
- /* Add PPP header */
- skb->data[0] = ppph[0];
- skb->data[1] = ppph[1];
- skb_put(skb, 2);
-
- /* Copy user data into skb */
- error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
- if (error < 0) {
- kfree_skb(skb);
- goto error_put_sess_tun;
- }
- skb_put(skb, total_len);
-
- /* Calculate UDP checksum if configured to do so */
- if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
- skb->ip_summed = CHECKSUM_NONE;
- else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- csum = skb_checksum(skb, 0, udp_len, 0);
- uh->check = csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, csum);
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
- } else {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, 0);
- }
-
- /* Debug */
- if (session->send_seq)
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes, ns=%hu\n", session->name,
- total_len, session->ns - 1);
- else
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes\n", session->name, total_len);
-
- if (session->debug & PPPOL2TP_MSG_DATA) {
- int i;
- unsigned char *datap = skb->data;
-
- printk(KERN_DEBUG "%s: xmit:", session->name);
- for (i = 0; i < total_len; i++) {
- printk(" %02X", *datap++);
- if (i == 15) {
- printk(" ...");
- break;
- }
- }
- printk("\n");
- }
-
- /* Queue the packet to IP for output */
- len = skb->len;
- error = ip_queue_xmit(skb, 1);
-
- /* Update stats */
- if (error >= 0) {
- tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += len;
- session->stats.tx_packets++;
- session->stats.tx_bytes += len;
- } else {
- tunnel->stats.tx_errors++;
- session->stats.tx_errors++;
- }
-
- return error;
-
-error_put_sess_tun:
- sock_put(session->tunnel_sock);
-error_put_sess:
- sock_put(sk);
-error:
- return error;
-}
-
-/* Automatically called when the skb is freed.
- */
-static void pppol2tp_sock_wfree(struct sk_buff *skb)
-{
- sock_put(skb->sk);
-}
-
-/* For data skbs that we transmit, we associate with the tunnel socket
- * but don't do accounting.
- */
-static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
-{
- sock_hold(sk);
- skb->sk = sk;
- skb->destructor = pppol2tp_sock_wfree;
-}
-
-/* Transmit function called by generic PPP driver. Sends PPP frame
- * over PPPoL2TP socket.
- *
- * This is almost the same as pppol2tp_sendmsg(), but rather than
- * being called with a msghdr from userspace, it is called with a skb
- * from the kernel.
- *
- * The supplied skb from ppp doesn't have enough headroom for the
- * insertion of L2TP, UDP and IP headers so we need to allocate more
- * headroom in the skb. This will create a cloned skb. But we must be
- * careful in the error case because the caller will expect to free
- * the skb it supplied, not our cloned skb. So we take care to always
- * leave the original skb unfreed if we return an error.
- */
-static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
-{
- static const u8 ppph[2] = { 0xff, 0x03 };
- struct sock *sk = (struct sock *) chan->private;
- struct sock *sk_tun;
- int hdr_len;
- u16 udp_len;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- int rc;
- int headroom;
- int data_len = skb->len;
- struct inet_sock *inet;
- __wsum csum;
- struct udphdr *uh;
- unsigned int len;
- int old_headroom;
- int new_headroom;
-
- if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
- goto abort;
-
- /* Get session and tunnel contexts from the socket */
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto abort;
-
- sk_tun = session->tunnel_sock;
- if (sk_tun == NULL)
- goto abort_put_sess;
- tunnel = pppol2tp_sock_to_tunnel(sk_tun);
- if (tunnel == NULL)
- goto abort_put_sess;
-
- /* What header length is configured for this session? */
- hdr_len = pppol2tp_l2tp_header_len(session);
-
- /* Check that there's enough headroom in the skb to insert IP,
- * UDP and L2TP and PPP headers. If not enough, expand it to
- * make room. Adjust truesize.
- */
- headroom = NET_SKB_PAD + sizeof(struct iphdr) +
- sizeof(struct udphdr) + hdr_len + sizeof(ppph);
- old_headroom = skb_headroom(skb);
- if (skb_cow_head(skb, headroom))
- goto abort_put_sess_tun;
-
- new_headroom = skb_headroom(skb);
- skb_orphan(skb);
- skb->truesize += new_headroom - old_headroom;
-
- /* Setup PPP header */
- __skb_push(skb, sizeof(ppph));
- skb->data[0] = ppph[0];
- skb->data[1] = ppph[1];
-
- /* Setup L2TP header */
- pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len));
-
- udp_len = sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len;
-
- /* Setup UDP header */
- inet = inet_sk(sk_tun);
- __skb_push(skb, sizeof(*uh));
- skb_reset_transport_header(skb);
- uh = udp_hdr(skb);
- uh->source = inet->inet_sport;
- uh->dest = inet->inet_dport;
- uh->len = htons(udp_len);
- uh->check = 0;
-
- /* Debug */
- if (session->send_seq)
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %d bytes, ns=%hu\n", session->name,
- data_len, session->ns - 1);
- else
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %d bytes\n", session->name, data_len);
-
- if (session->debug & PPPOL2TP_MSG_DATA) {
- int i;
- unsigned char *datap = skb->data;
-
- printk(KERN_DEBUG "%s: xmit:", session->name);
- for (i = 0; i < data_len; i++) {
- printk(" %02X", *datap++);
- if (i == 31) {
- printk(" ...");
- break;
- }
- }
- printk("\n");
- }
-
- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
- IPSKB_REROUTED);
- nf_reset(skb);
-
- /* Get routing info from the tunnel socket */
- skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(__sk_dst_get(sk_tun)));
- pppol2tp_skb_set_owner_w(skb, sk_tun);
-
- /* Calculate UDP checksum if configured to do so */
- if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
- skb->ip_summed = CHECKSUM_NONE;
- else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
- (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- csum = skb_checksum(skb, 0, udp_len, 0);
- uh->check = csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, csum);
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
- } else {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, 0);
- }
-
- /* Queue the packet to IP for output */
- len = skb->len;
- rc = ip_queue_xmit(skb, 1);
-
- /* Update stats */
- if (rc >= 0) {
- tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += len;
- session->stats.tx_packets++;
- session->stats.tx_bytes += len;
- } else {
- tunnel->stats.tx_errors++;
- session->stats.tx_errors++;
- }
-
- sock_put(sk_tun);
- sock_put(sk);
- return 1;
-
-abort_put_sess_tun:
- sock_put(sk_tun);
-abort_put_sess:
- sock_put(sk);
-abort:
- /* Free the original skb */
- kfree_skb(skb);
- return 1;
-}
-
-/*****************************************************************************
- * Session (and tunnel control) socket create/destroy.
- *****************************************************************************/
-
-/* When the tunnel UDP socket is closed, all the attached sockets need to go
- * too.
- */
-static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
-{
- int hash;
- struct hlist_node *walk;
- struct hlist_node *tmp;
- struct pppol2tp_session *session;
- struct sock *sk;
-
- BUG_ON(tunnel == NULL);
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing all sessions...\n", tunnel->name);
-
- write_lock_bh(&tunnel->hlist_lock);
- for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
-again:
- hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
- struct sk_buff *skb;
-
- session = hlist_entry(walk, struct pppol2tp_session, hlist);
-
- sk = session->sock;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing session\n", session->name);
-
- hlist_del_init(&session->hlist);
-
- /* Since we should hold the sock lock while
- * doing any unbinding, we need to release the
- * lock we're holding before taking that lock.
- * Hold a reference to the sock so it doesn't
- * disappear as we're jumping between locks.
- */
- sock_hold(sk);
- write_unlock_bh(&tunnel->hlist_lock);
- lock_sock(sk);
-
- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
- pppox_unbind_sock(sk);
- sk->sk_state = PPPOX_DEAD;
- sk->sk_state_change(sk);
- }
-
- /* Purge any queued data */
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
- while ((skb = skb_dequeue(&session->reorder_q))) {
- kfree_skb(skb);
- sock_put(sk);
- }
-
- release_sock(sk);
- sock_put(sk);
-
- /* Now restart from the beginning of this hash
- * chain. We always remove a session from the
- * list so we are guaranteed to make forward
- * progress.
- */
- write_lock_bh(&tunnel->hlist_lock);
- goto again;
- }
- }
- write_unlock_bh(&tunnel->hlist_lock);
-}
-
-/* Really kill the tunnel.
- * Come here only when all sessions have been cleared from the tunnel.
- */
-static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
-{
- struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net);
-
- /* Remove from socket list */
- write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_del_init(&tunnel->list);
- write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- atomic_dec(&pppol2tp_tunnel_count);
- kfree(tunnel);
-}
-
-/* Tunnel UDP socket destruct hook.
- * The tunnel context is deleted only when all session sockets have been
- * closed.
- */
-static void pppol2tp_tunnel_destruct(struct sock *sk)
-{
- struct pppol2tp_tunnel *tunnel;
-
- tunnel = sk->sk_user_data;
- if (tunnel == NULL)
- goto end;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing...\n", tunnel->name);
-
- /* Close all sessions */
- pppol2tp_tunnel_closeall(tunnel);
-
- /* No longer an encapsulation socket. See net/ipv4/udp.c */
- (udp_sk(sk))->encap_type = 0;
- (udp_sk(sk))->encap_rcv = NULL;
-
- /* Remove hooks into tunnel socket */
- tunnel->sock = NULL;
- sk->sk_destruct = tunnel->old_sk_destruct;
- sk->sk_user_data = NULL;
-
- /* Call original (UDP) socket descructor */
- if (sk->sk_destruct != NULL)
- (*sk->sk_destruct)(sk);
-
- pppol2tp_tunnel_dec_refcount(tunnel);
-
-end:
- return;
-}
-
-/* Really kill the session socket. (Called from sock_put() if
- * refcnt == 0.)
- */
-static void pppol2tp_session_destruct(struct sock *sk)
-{
- struct pppol2tp_session *session = NULL;
-
- if (sk->sk_user_data != NULL) {
- struct pppol2tp_tunnel *tunnel;
-
- session = sk->sk_user_data;
- if (session == NULL)
- goto out;
-
- BUG_ON(session->magic != L2TP_SESSION_MAGIC);
-
- /* Don't use pppol2tp_sock_to_tunnel() here to
- * get the tunnel context because the tunnel
- * socket might have already been closed (its
- * sk->sk_user_data will be NULL) so use the
- * session's private tunnel ptr instead.
- */
- tunnel = session->tunnel;
- if (tunnel != NULL) {
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-
- /* If session_id is zero, this is a null
- * session context, which was created for a
- * socket that is being used only to manage
- * tunnels.
- */
- if (session->tunnel_addr.s_session != 0) {
- /* Delete the session socket from the
- * hash
- */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_del_init(&session->hlist);
- write_unlock_bh(&tunnel->hlist_lock);
-
- atomic_dec(&pppol2tp_session_count);
- }
-
- /* This will delete the tunnel context if this
- * is the last session on the tunnel.
- */
- session->tunnel = NULL;
- session->tunnel_sock = NULL;
- pppol2tp_tunnel_dec_refcount(tunnel);
- }
- }
-
- kfree(session);
-out:
- return;
-}
-
-/* Called when the PPPoX socket (session) is closed.
- */
-static int pppol2tp_release(struct socket *sock)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session;
- int error;
-
- if (!sk)
- return 0;
-
- error = -EBADF;
- lock_sock(sk);
- if (sock_flag(sk, SOCK_DEAD) != 0)
- goto error;
-
- pppox_unbind_sock(sk);
-
- /* Signal the death of the socket. */
- sk->sk_state = PPPOX_DEAD;
- sock_orphan(sk);
- sock->sk = NULL;
-
- session = pppol2tp_sock_to_session(sk);
-
- /* Purge any queued data */
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
- if (session != NULL) {
- struct sk_buff *skb;
- while ((skb = skb_dequeue(&session->reorder_q))) {
- kfree_skb(skb);
- sock_put(sk);
- }
- sock_put(sk);
- }
-
- release_sock(sk);
-
- /* This will delete the session context via
- * pppol2tp_session_destruct() if the socket's refcnt drops to
- * zero.
- */
- sock_put(sk);
-
- return 0;
-
-error:
- release_sock(sk);
- return error;
-}
-
-/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
- * sockets attached to it.
- */
-static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
- int fd, u16 tunnel_id, int *error)
-{
- int err;
- struct socket *sock = NULL;
- struct sock *sk;
- struct pppol2tp_tunnel *tunnel;
- struct pppol2tp_net *pn;
- struct sock *ret = NULL;
-
- /* Get the tunnel UDP socket from the fd, which was opened by
- * the userspace L2TP daemon.
- */
- err = -EBADF;
- sock = sockfd_lookup(fd, &err);
- if (!sock) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
- tunnel_id, fd, err);
- goto err;
- }
-
- sk = sock->sk;
-
- /* Quick sanity checks */
- err = -EPROTONOSUPPORT;
- if (sk->sk_protocol != IPPROTO_UDP) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
- tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
- goto err;
- }
- err = -EAFNOSUPPORT;
- if (sock->ops->family != AF_INET) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: fd %d wrong family, got %d, expected %d\n",
- tunnel_id, fd, sock->ops->family, AF_INET);
- goto err;
- }
-
- err = -ENOTCONN;
-
- /* Check if this socket has already been prepped */
- tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
- if (tunnel != NULL) {
- /* User-data field already set */
- err = -EBUSY;
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-
- /* This socket has already been prepped */
- ret = tunnel->sock;
- goto out;
- }
-
- /* This socket is available and needs prepping. Create a new tunnel
- * context and init it.
- */
- sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
- if (sk->sk_user_data == NULL) {
- err = -ENOMEM;
- goto err;
- }
-
- tunnel->magic = L2TP_TUNNEL_MAGIC;
- sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);
-
- tunnel->stats.tunnel_id = tunnel_id;
- tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;
-
- /* Hook on the tunnel socket destructor so that we can cleanup
- * if the tunnel socket goes away.
- */
- tunnel->old_sk_destruct = sk->sk_destruct;
- sk->sk_destruct = pppol2tp_tunnel_destruct;
-
- tunnel->sock = sk;
- sk->sk_allocation = GFP_ATOMIC;
-
- /* Misc init */
- rwlock_init(&tunnel->hlist_lock);
-
- /* The net we belong to */
- tunnel->pppol2tp_net = net;
- pn = pppol2tp_pernet(net);
-
- /* Add tunnel to our list */
- INIT_LIST_HEAD(&tunnel->list);
- write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_add(&tunnel->list, &pn->pppol2tp_tunnel_list);
- write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
- atomic_inc(&pppol2tp_tunnel_count);
-
- /* Bump the reference count. The tunnel context is deleted
- * only when this drops to zero.
- */
- pppol2tp_tunnel_inc_refcount(tunnel);
-
- /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
- (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
- (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;
-
- ret = tunnel->sock;
-
- *error = 0;
-out:
- if (sock)
- sockfd_put(sock);
-
- return ret;
-
-err:
- *error = err;
- goto out;
-}
-
-static struct proto pppol2tp_sk_proto = {
- .name = "PPPOL2TP",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct pppox_sock),
-};
-
-/* socket() handler. Initialize a new struct sock.
- */
-static int pppol2tp_create(struct net *net, struct socket *sock)
-{
- int error = -ENOMEM;
- struct sock *sk;
-
- sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
- if (!sk)
- goto out;
-
- sock_init_data(sock, sk);
-
- sock->state = SS_UNCONNECTED;
- sock->ops = &pppol2tp_ops;
-
- sk->sk_backlog_rcv = pppol2tp_recv_core;
- sk->sk_protocol = PX_PROTO_OL2TP;
- sk->sk_family = PF_PPPOX;
- sk->sk_state = PPPOX_NONE;
- sk->sk_type = SOCK_STREAM;
- sk->sk_destruct = pppol2tp_session_destruct;
-
- error = 0;
-
-out:
- return error;
-}
-
-/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
- */
-static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
- int sockaddr_len, int flags)
-{
- struct sock *sk = sock->sk;
- struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
- struct pppox_sock *po = pppox_sk(sk);
- struct sock *tunnel_sock = NULL;
- struct pppol2tp_session *session = NULL;
- struct pppol2tp_tunnel *tunnel;
- struct dst_entry *dst;
- int error = 0;
-
- lock_sock(sk);
-
- error = -EINVAL;
- if (sp->sa_protocol != PX_PROTO_OL2TP)
- goto end;
-
- /* Check for already bound sockets */
- error = -EBUSY;
- if (sk->sk_state & PPPOX_CONNECTED)
- goto end;
-
- /* We don't supporting rebinding anyway */
- error = -EALREADY;
- if (sk->sk_user_data)
- goto end; /* socket is already attached */
-
- /* Don't bind if s_tunnel is 0 */
- error = -EINVAL;
- if (sp->pppol2tp.s_tunnel == 0)
- goto end;
-
- /* Special case: prepare tunnel socket if s_session and
- * d_session is 0. Otherwise look up tunnel using supplied
- * tunnel id.
- */
- if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
- tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk),
- sp->pppol2tp.fd,
- sp->pppol2tp.s_tunnel,
- &error);
- if (tunnel_sock == NULL)
- goto end;
-
- sock_hold(tunnel_sock);
- tunnel = tunnel_sock->sk_user_data;
- } else {
- tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
-
- /* Error if we can't find the tunnel */
- error = -ENOENT;
- if (tunnel == NULL)
- goto end;
-
- tunnel_sock = tunnel->sock;
- }
-
- /* Check that this session doesn't already exist */
- error = -EEXIST;
- session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
- if (session != NULL)
- goto end;
-
- /* Allocate and initialize a new session context. */
- session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
- if (session == NULL) {
- error = -ENOMEM;
- goto end;
- }
-
- skb_queue_head_init(&session->reorder_q);
-
- session->magic = L2TP_SESSION_MAGIC;
- session->owner = current->pid;
- session->sock = sk;
- session->tunnel = tunnel;
- session->tunnel_sock = tunnel_sock;
- session->tunnel_addr = sp->pppol2tp;
- sprintf(&session->name[0], "sess %hu/%hu",
- session->tunnel_addr.s_tunnel,
- session->tunnel_addr.s_session);
-
- session->stats.tunnel_id = session->tunnel_addr.s_tunnel;
- session->stats.session_id = session->tunnel_addr.s_session;
-
- INIT_HLIST_NODE(&session->hlist);
-
- /* Inherit debug options from tunnel */
- session->debug = tunnel->debug;
-
- /* Default MTU must allow space for UDP/L2TP/PPP
- * headers.
- */
- session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
-
- /* If PMTU discovery was enabled, use the MTU that was discovered */
- dst = sk_dst_get(sk);
- if (dst != NULL) {
- u32 pmtu = dst_mtu(__sk_dst_get(sk));
- if (pmtu != 0)
- session->mtu = session->mru = pmtu -
- PPPOL2TP_HEADER_OVERHEAD;
- dst_release(dst);
- }
-
- /* Special case: if source & dest session_id == 0x0000, this socket is
- * being created to manage the tunnel. Don't add the session to the
- * session hash list, just set up the internal context for use by
- * ioctl() and sockopt() handlers.
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- error = 0;
- sk->sk_user_data = session;
- goto out_no_ppp;
- }
-
- /* Get tunnel context from the tunnel socket */
- tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
- if (tunnel == NULL) {
- error = -EBADF;
- goto end;
- }
-
- /* Right now, because we don't have a way to push the incoming skb's
- * straight through the UDP layer, the only header we need to worry
- * about is the L2TP header. This size is different depending on
- * whether sequence numbers are enabled for the data channel.
- */
- po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
-
- po->chan.private = sk;
- po->chan.ops = &pppol2tp_chan_ops;
- po->chan.mtu = session->mtu;
-
- error = ppp_register_net_channel(sock_net(sk), &po->chan);
- if (error)
- goto end_put_tun;
-
- /* This is how we get the session context from the socket. */
- sk->sk_user_data = session;
-
- /* Add session to the tunnel's hash list */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_add_head(&session->hlist,
- pppol2tp_session_id_hash(tunnel,
- session->tunnel_addr.s_session));
- write_unlock_bh(&tunnel->hlist_lock);
-
- atomic_inc(&pppol2tp_session_count);
-
-out_no_ppp:
- pppol2tp_tunnel_inc_refcount(tunnel);
- sk->sk_state = PPPOX_CONNECTED;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: created\n", session->name);
-
-end_put_tun:
- sock_put(tunnel_sock);
-end:
- release_sock(sk);
-
- if (error != 0) {
- if (session)
- PRINTK(session->debug,
- PPPOL2TP_MSG_CONTROL, KERN_WARNING,
- "%s: connect failed: %d\n",
- session->name, error);
- else
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
- "connect failed: %d\n", error);
- }
-
- return error;
-}
-
-/* getname() support.
- */
-static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
- int *usockaddr_len, int peer)
-{
- int len = sizeof(struct sockaddr_pppol2tp);
- struct sockaddr_pppol2tp sp;
- int error = 0;
- struct pppol2tp_session *session;
-
- error = -ENOTCONN;
- if (sock->sk->sk_state != PPPOX_CONNECTED)
- goto end;
-
- session = pppol2tp_sock_to_session(sock->sk);
- if (session == NULL) {
- error = -EBADF;
- goto end;
- }
-
- sp.sa_family = AF_PPPOX;
- sp.sa_protocol = PX_PROTO_OL2TP;
- memcpy(&sp.pppol2tp, &session->tunnel_addr,
- sizeof(struct pppol2tp_addr));
-
- memcpy(uaddr, &sp, len);
-
- *usockaddr_len = len;
-
- error = 0;
- sock_put(sock->sk);
-
-end:
- return error;
-}
-
-/****************************************************************************
- * ioctl() handlers.
- *
- * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
- * sockets. However, in order to control kernel tunnel features, we allow
- * userspace to create a special "tunnel" PPPoX socket which is used for
- * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
- * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
- * calls.
- ****************************************************************************/
-
-/* Session ioctl helper.
- */
-static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
- unsigned int cmd, unsigned long arg)
-{
- struct ifreq ifr;
- int err = 0;
- struct sock *sk = session->sock;
- int val = (int) arg;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
- session->name, cmd, arg);
-
- sock_hold(sk);
-
- switch (cmd) {
- case SIOCGIFMTU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
- break;
- ifr.ifr_mtu = session->mtu;
- if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mtu=%d\n", session->name, session->mtu);
- err = 0;
- break;
-
- case SIOCSIFMTU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
- break;
-
- session->mtu = ifr.ifr_mtu;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mtu=%d\n", session->name, session->mtu);
- err = 0;
- break;
-
- case PPPIOCGMRU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (put_user(session->mru, (int __user *) arg))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mru=%d\n", session->name, session->mru);
- err = 0;
- break;
-
- case PPPIOCSMRU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (get_user(val,(int __user *) arg))
- break;
-
- session->mru = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mru=%d\n", session->name, session->mru);
- err = 0;
- break;
-
- case PPPIOCGFLAGS:
- err = -EFAULT;
- if (put_user(session->flags, (int __user *) arg))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get flags=%d\n", session->name, session->flags);
- err = 0;
- break;
-
- case PPPIOCSFLAGS:
- err = -EFAULT;
- if (get_user(val, (int __user *) arg))
- break;
- session->flags = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set flags=%d\n", session->name, session->flags);
- err = 0;
- break;
-
- case PPPIOCGL2TPSTATS:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- if (copy_to_user((void __user *) arg, &session->stats,
- sizeof(session->stats)))
- break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", session->name);
- err = 0;
- break;
-
- default:
- err = -ENOSYS;
- break;
- }
-
- sock_put(sk);
-
- return err;
-}
-
-/* Tunnel ioctl helper.
- *
- * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
- * specifies a session_id, the session ioctl handler is called. This allows an
- * application to retrieve session stats via a tunnel socket.
- */
-static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
- unsigned int cmd, unsigned long arg)
-{
- int err = 0;
- struct sock *sk = tunnel->sock;
- struct pppol2tp_ioc_stats stats_req;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
- cmd, arg);
-
- sock_hold(sk);
-
- switch (cmd) {
- case PPPIOCGL2TPSTATS:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- if (copy_from_user(&stats_req, (void __user *) arg,
- sizeof(stats_req))) {
- err = -EFAULT;
- break;
- }
- if (stats_req.session_id != 0) {
- /* resend to session ioctl handler */
- struct pppol2tp_session *session =
- pppol2tp_session_find(tunnel, stats_req.session_id);
- if (session != NULL)
- err = pppol2tp_session_ioctl(session, cmd, arg);
- else
- err = -EBADR;
- break;
- }
-#ifdef CONFIG_XFRM
- tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
-#endif
- if (copy_to_user((void __user *) arg, &tunnel->stats,
- sizeof(tunnel->stats))) {
- err = -EFAULT;
- break;
- }
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", tunnel->name);
- err = 0;
- break;
-
- default:
- err = -ENOSYS;
- break;
- }
-
- sock_put(sk);
-
- return err;
-}
-
-/* Main ioctl() handler.
- * Dispatch to tunnel or session helpers depending on the socket.
- */
-static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- int err;
-
- if (!sk)
- return 0;
-
- err = -EBADF;
- if (sock_flag(sk, SOCK_DEAD) != 0)
- goto end;
-
- err = -ENOTCONN;
- if ((sk->sk_user_data == NULL) ||
- (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
- goto end;
-
- /* Get session context from the socket */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session's session_id is zero, treat ioctl as a
- * tunnel ioctl
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
- sock_put(session->tunnel_sock);
- goto end_put_sess;
- }
-
- err = pppol2tp_session_ioctl(session, cmd, arg);
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/*****************************************************************************
- * setsockopt() / getsockopt() support.
- *
- * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
- * sockets. In order to control kernel tunnel features, we allow userspace to
- * create a special "tunnel" PPPoX socket which is used for control only.
- * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
- * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
- *****************************************************************************/
-
-/* Tunnel setsockopt() helper.
- */
-static int pppol2tp_tunnel_setsockopt(struct sock *sk,
- struct pppol2tp_tunnel *tunnel,
- int optname, int val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_DEBUG:
- tunnel->debug = val;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", tunnel->name, tunnel->debug);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Session setsockopt helper.
- */
-static int pppol2tp_session_setsockopt(struct sock *sk,
- struct pppol2tp_session *session,
- int optname, int val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_RECVSEQ:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->recv_seq = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set recv_seq=%d\n", session->name,
- session->recv_seq);
- break;
-
- case PPPOL2TP_SO_SENDSEQ:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->send_seq = val ? -1 : 0;
- {
- struct sock *ssk = session->sock;
- struct pppox_sock *po = pppox_sk(ssk);
- po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
- PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
- }
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set send_seq=%d\n", session->name, session->send_seq);
- break;
-
- case PPPOL2TP_SO_LNSMODE:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->lns_mode = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set lns_mode=%d\n", session->name,
- session->lns_mode);
- break;
-
- case PPPOL2TP_SO_DEBUG:
- session->debug = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", session->name, session->debug);
- break;
-
- case PPPOL2TP_SO_REORDERTO:
- session->reorder_timeout = msecs_to_jiffies(val);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set reorder_timeout=%d\n", session->name,
- session->reorder_timeout);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Main setsockopt() entry point.
- * Does API checks, then calls either the tunnel or session setsockopt
- * handler, according to whether the PPPoL2TP socket is a for a regular
- * session or the special tunnel type.
- */
-static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session = sk->sk_user_data;
- struct pppol2tp_tunnel *tunnel;
- int val;
- int err;
-
- if (level != SOL_PPPOL2TP)
- return udp_prot.setsockopt(sk, level, optname, optval, optlen);
-
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (get_user(val, (int __user *)optval))
- return -EFAULT;
-
- err = -ENOTCONN;
- if (sk->sk_user_data == NULL)
- goto end;
-
- /* Get session context from the socket */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session_id == 0x0000, treat as operation on tunnel
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
- sock_put(session->tunnel_sock);
- } else
- err = pppol2tp_session_setsockopt(sk, session, optname, val);
-
- err = 0;
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/* Tunnel getsockopt helper. Called with sock locked.
- */
-static int pppol2tp_tunnel_getsockopt(struct sock *sk,
- struct pppol2tp_tunnel *tunnel,
- int optname, int *val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_DEBUG:
- *val = tunnel->debug;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%x\n", tunnel->name, tunnel->debug);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Session getsockopt helper. Called with sock locked.
- */
-static int pppol2tp_session_getsockopt(struct sock *sk,
- struct pppol2tp_session *session,
- int optname, int *val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_RECVSEQ:
- *val = session->recv_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get recv_seq=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_SENDSEQ:
- *val = session->send_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get send_seq=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_LNSMODE:
- *val = session->lns_mode;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get lns_mode=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_DEBUG:
- *val = session->debug;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_REORDERTO:
- *val = (int) jiffies_to_msecs(session->reorder_timeout);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get reorder_timeout=%d\n", session->name, *val);
- break;
-
- default:
- err = -ENOPROTOOPT;
- }
-
- return err;
-}
-
-/* Main getsockopt() entry point.
- * Does API checks, then calls either the tunnel or session getsockopt
- * handler, according to whether the PPPoX socket is a for a regular session
- * or the special tunnel type.
- */
-static int pppol2tp_getsockopt(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session = sk->sk_user_data;
- struct pppol2tp_tunnel *tunnel;
- int val, len;
- int err;
-
- if (level != SOL_PPPOL2TP)
- return udp_prot.getsockopt(sk, level, optname, optval, optlen);
-
- if (get_user(len, (int __user *) optlen))
- return -EFAULT;
-
- len = min_t(unsigned int, len, sizeof(int));
-
- if (len < 0)
- return -EINVAL;
-
- err = -ENOTCONN;
- if (sk->sk_user_data == NULL)
- goto end;
-
- /* Get the session context */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session_id == 0x0000, treat as operation on tunnel */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
- sock_put(session->tunnel_sock);
- } else
- err = pppol2tp_session_getsockopt(sk, session, optname, &val);
-
- err = -EFAULT;
- if (put_user(len, (int __user *) optlen))
- goto end_put_sess;
-
- if (copy_to_user((void __user *) optval, &val, len))
- goto end_put_sess;
-
- err = 0;
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/*****************************************************************************
- * /proc filesystem for debug
- *****************************************************************************/
-
-#ifdef CONFIG_PROC_FS
-
-#include <linux/seq_file.h>
-
-struct pppol2tp_seq_data {
- struct seq_net_private p;
- struct pppol2tp_tunnel *tunnel; /* current tunnel */
- struct pppol2tp_session *session; /* NULL means get first session in tunnel */
-};
-
-static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
-{
- struct pppol2tp_session *session = NULL;
- struct hlist_node *walk;
- int found = 0;
- int next = 0;
- int i;
-
- read_lock_bh(&tunnel->hlist_lock);
- for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
- hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
- if (curr == NULL) {
- found = 1;
- goto out;
- }
- if (session == curr) {
- next = 1;
- continue;
- }
- if (next) {
- found = 1;
- goto out;
- }
- }
- }
-out:
- read_unlock_bh(&tunnel->hlist_lock);
- if (!found)
- session = NULL;
-
- return session;
-}
-
-static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn,
- struct pppol2tp_tunnel *curr)
-{
- struct pppol2tp_tunnel *tunnel = NULL;
-
- read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) {
- goto out;
- }
- tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
-out:
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- return tunnel;
-}
-
-static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
-{
- struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
- struct pppol2tp_net *pn;
- loff_t pos = *offs;
-
- if (!pos)
- goto out;
-
- BUG_ON(m->private == NULL);
- pd = m->private;
- pn = pppol2tp_pernet(seq_file_net(m));
-
- if (pd->tunnel == NULL) {
- if (!list_empty(&pn->pppol2tp_tunnel_list))
- pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
- } else {
- pd->session = next_session(pd->tunnel, pd->session);
- if (pd->session == NULL) {
- pd->tunnel = next_tunnel(pn, pd->tunnel);
- }
- }
-
- /* NULL tunnel and session indicates end of list */
- if ((pd->tunnel == NULL) && (pd->session == NULL))
- pd = NULL;
-
-out:
- return pd;
-}
-
-static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
-{
- (*pos)++;
- return NULL;
-}
-
-static void pppol2tp_seq_stop(struct seq_file *p, void *v)
-{
- /* nothing to do */
-}
-
-static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_tunnel *tunnel = v;
-
- seq_printf(m, "\nTUNNEL '%s', %c %d\n",
- tunnel->name,
- (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
- atomic_read(&tunnel->ref_count) - 1);
- seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
- tunnel->debug,
- (unsigned long long)tunnel->stats.tx_packets,
- (unsigned long long)tunnel->stats.tx_bytes,
- (unsigned long long)tunnel->stats.tx_errors,
- (unsigned long long)tunnel->stats.rx_packets,
- (unsigned long long)tunnel->stats.rx_bytes,
- (unsigned long long)tunnel->stats.rx_errors);
-}
-
-static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_session *session = v;
-
- seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
- "%04X/%04X %d %c\n",
- session->name,
- ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
- ntohs(session->tunnel_addr.addr.sin_port),
- session->tunnel_addr.s_tunnel,
- session->tunnel_addr.s_session,
- session->tunnel_addr.d_tunnel,
- session->tunnel_addr.d_session,
- session->sock->sk_state,
- (session == session->sock->sk_user_data) ?
- 'Y' : 'N');
- seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
- session->mtu, session->mru,
- session->recv_seq ? 'R' : '-',
- session->send_seq ? 'S' : '-',
- session->lns_mode ? "LNS" : "LAC",
- session->debug,
- jiffies_to_msecs(session->reorder_timeout));
- seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
- session->nr, session->ns,
- (unsigned long long)session->stats.tx_packets,
- (unsigned long long)session->stats.tx_bytes,
- (unsigned long long)session->stats.tx_errors,
- (unsigned long long)session->stats.rx_packets,
- (unsigned long long)session->stats.rx_bytes,
- (unsigned long long)session->stats.rx_errors);
-}
-
-static int pppol2tp_seq_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_seq_data *pd = v;
-
- /* display header on line 1 */
- if (v == SEQ_START_TOKEN) {
- seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
- seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
- seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
- seq_puts(m, " SESSION name, addr/port src-tid/sid "
- "dest-tid/sid state user-data-ok\n");
- seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
- seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
- goto out;
- }
-
- /* Show the tunnel or session context.
- */
- if (pd->session == NULL)
- pppol2tp_seq_tunnel_show(m, pd->tunnel);
- else
- pppol2tp_seq_session_show(m, pd->session);
-
-out:
- return 0;
-}
-
-static const struct seq_operations pppol2tp_seq_ops = {
- .start = pppol2tp_seq_start,
- .next = pppol2tp_seq_next,
- .stop = pppol2tp_seq_stop,
- .show = pppol2tp_seq_show,
-};
-
-/* Called when our /proc file is opened. We allocate data for use when
- * iterating our tunnel / session contexts and store it in the private
- * data of the seq_file.
- */
-static int pppol2tp_proc_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &pppol2tp_seq_ops,
- sizeof(struct pppol2tp_seq_data));
-}
-
-static const struct file_operations pppol2tp_proc_fops = {
- .owner = THIS_MODULE,
- .open = pppol2tp_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-#endif /* CONFIG_PROC_FS */
-
-/*****************************************************************************
- * Init and cleanup
- *****************************************************************************/
-
-static const struct proto_ops pppol2tp_ops = {
- .family = AF_PPPOX,
- .owner = THIS_MODULE,
- .release = pppol2tp_release,
- .bind = sock_no_bind,
- .connect = pppol2tp_connect,
- .socketpair = sock_no_socketpair,
- .accept = sock_no_accept,
- .getname = pppol2tp_getname,
- .poll = datagram_poll,
- .listen = sock_no_listen,
- .shutdown = sock_no_shutdown,
- .setsockopt = pppol2tp_setsockopt,
- .getsockopt = pppol2tp_getsockopt,
- .sendmsg = pppol2tp_sendmsg,
- .recvmsg = pppol2tp_recvmsg,
- .mmap = sock_no_mmap,
- .ioctl = pppox_ioctl,
-};
-
-static struct pppox_proto pppol2tp_proto = {
- .create = pppol2tp_create,
- .ioctl = pppol2tp_ioctl
-};
-
-static __net_init int pppol2tp_init_net(struct net *net)
-{
- struct pppol2tp_net *pn = pppol2tp_pernet(net);
- struct proc_dir_entry *pde;
-
- INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list);
- rwlock_init(&pn->pppol2tp_tunnel_list_lock);
-
- pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
-#ifdef CONFIG_PROC_FS
- if (!pde)
- return -ENOMEM;
-#endif
-
- return 0;
-}
-
-static __net_exit void pppol2tp_exit_net(struct net *net)
-{
- proc_net_remove(net, "pppol2tp");
-}
-
-static struct pernet_operations pppol2tp_net_ops = {
- .init = pppol2tp_init_net,
- .exit = pppol2tp_exit_net,
- .id = &pppol2tp_net_id,
- .size = sizeof(struct pppol2tp_net),
-};
-
-static int __init pppol2tp_init(void)
-{
- int err;
-
- err = proto_register(&pppol2tp_sk_proto, 0);
- if (err)
- goto out;
- err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
- if (err)
- goto out_unregister_pppol2tp_proto;
-
- err = register_pernet_device(&pppol2tp_net_ops);
- if (err)
- goto out_unregister_pppox_proto;
-
- printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
- PPPOL2TP_DRV_VERSION);
-
-out:
- return err;
-out_unregister_pppox_proto:
- unregister_pppox_proto(PX_PROTO_OL2TP);
-out_unregister_pppol2tp_proto:
- proto_unregister(&pppol2tp_sk_proto);
- goto out;
-}
-
-static void __exit pppol2tp_exit(void)
-{
- unregister_pppox_proto(PX_PROTO_OL2TP);
- unregister_pernet_device(&pppol2tp_net_ops);
- proto_unregister(&pppol2tp_sk_proto);
-}
-
-module_init(pppol2tp_init);
-module_exit(pppol2tp_exit);
-
-MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>, "
- "James Chapman <jchapman@katalix.com>");
-MODULE_DESCRIPTION("PPP over L2TP over UDP");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 5bf229b..87d6b8f 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -327,7 +327,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
unsigned int bufsize;
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
- dev_info(ctodev(card), "%s: ERROR status \n", __func__);
+ dev_info(ctodev(card), "%s: ERROR status\n", __func__);
/* we need to round up the buffer size to a multiple of 128 */
bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
@@ -547,7 +547,7 @@ out:
void gelic_net_set_multi(struct net_device *netdev)
{
struct gelic_card *card = netdev_card(netdev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
unsigned int i;
uint8_t *p;
u64 addr;
@@ -581,9 +581,9 @@ void gelic_net_set_multi(struct net_device *netdev)
}
/* set multicast addresses */
- netdev_for_each_mc_addr(mc, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
addr = 0;
- p = mc->dmi_addr;
+ p = ha->addr;
for (i = 0; i < ETH_ALEN; i++) {
addr <<= 8;
addr |= *p++;
@@ -903,9 +903,6 @@ int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
gelic_descr_release_tx(card, descr->next);
card->tx_chain.tail = descr->next->next;
dev_info(ctodev(card), "%s: kick failure\n", __func__);
- } else {
- /* OK, DMA started/reserved */
- netdev->trans_start = jiffies;
}
spin_unlock_irqrestore(&card->tx_lock, flags);
@@ -1435,7 +1432,7 @@ static void gelic_net_tx_timeout_task(struct work_struct *work)
container_of(work, struct gelic_card, tx_timeout_task);
struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0];
- dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__);
+ dev_info(ctodev(card), "%s:Timed out. Restarting...\n", __func__);
if (!(netdev->flags & IFF_UP))
goto out;
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 369a801..43b8d77 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -301,7 +301,6 @@ static void gelic_wl_get_ch_info(struct gelic_wl_info *wl)
/* 16 bits of MSB has available channels */
wl->ch_info = ch_info_raw >> 48;
}
- return;
}
/* SIOGIWRANGE */
@@ -528,7 +527,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
u8 item_len;
u8 item_id;
- pr_debug("%s: data=%p len=%ld \n", __func__,
+ pr_debug("%s: data=%p len=%ld\n", __func__,
data, len);
memset(ie_info, 0, sizeof(struct ie_info));
@@ -897,7 +896,7 @@ static int gelic_wl_set_auth(struct net_device *netdev,
default:
ret = -EOPNOTSUPP;
break;
- };
+ }
if (!ret)
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
@@ -979,7 +978,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
pr_debug("%s: essid = '%s'\n", __func__, extra);
set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
} else {
- pr_debug("%s: ESSID any \n", __func__);
+ pr_debug("%s: ESSID any\n", __func__);
clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
}
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
@@ -987,7 +986,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
gelic_wl_try_associate(netdev); /* FIXME */
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return 0;
}
@@ -998,7 +997,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
unsigned long irqflag;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
mutex_lock(&wl->assoc_stat_lock);
spin_lock_irqsave(&wl->lock, irqflag);
if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) ||
@@ -1011,7 +1010,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
mutex_unlock(&wl->assoc_stat_lock);
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> len=%d \n", __func__, data->essid.length);
+ pr_debug("%s: -> len=%d\n", __func__, data->essid.length);
return 0;
}
@@ -1028,7 +1027,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
int key_index, index_specified;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
flags = enc->flags & IW_ENCODE_FLAGS;
key_index = enc->flags & IW_ENCODE_INDEX;
@@ -1087,7 +1086,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
done:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
@@ -1101,7 +1100,7 @@ static int gelic_wl_get_encode(struct net_device *netdev,
unsigned int key_index, index_specified;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
key_index = enc->flags & IW_ENCODE_INDEX;
pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__,
enc->flags, enc->pointer, enc->length, extra);
@@ -1215,7 +1214,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
int key_index;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
flags = enc->flags & IW_ENCODE_FLAGS;
alg = ext->alg;
key_index = enc->flags & IW_ENCODE_INDEX;
@@ -1288,7 +1287,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
}
done:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
@@ -1304,7 +1303,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
int ret = 0;
int max_key_len;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
max_key_len = enc->length - sizeof(struct iw_encode_ext);
if (max_key_len < 0)
@@ -1359,7 +1358,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
}
out:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
/* SIOC{S,G}IWMODE */
@@ -1370,7 +1369,7 @@ static int gelic_wl_set_mode(struct net_device *netdev,
__u32 mode = data->mode;
int ret;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
if (mode == IW_MODE_INFRA)
ret = 0;
else
@@ -1384,7 +1383,7 @@ static int gelic_wl_get_mode(struct net_device *netdev,
union iwreq_data *data, char *extra)
{
__u32 *mode = &data->mode;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
*mode = IW_MODE_INFRA;
pr_debug("%s: ->\n", __func__);
return 0;
@@ -1992,7 +1991,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
case GELIC_WL_WPA_LEVEL_WPA2:
ret = gelic_wl_do_wpa_setup(wl);
break;
- };
+ }
if (ret) {
pr_debug("%s: WEP/WPA setup failed %d\n", __func__,
@@ -2022,7 +2021,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
if (!rc) {
/* timeouted. Maybe key or cyrpt mode is wrong */
- pr_info("%s: connect timeout \n", __func__);
+ pr_info("%s: connect timeout\n", __func__);
cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC,
NULL, 0);
kfree(cmd);
@@ -2063,7 +2062,7 @@ static void gelic_wl_connected_event(struct gelic_wl_info *wl,
}
if (desired_event == event) {
- pr_debug("%s: completed \n", __func__);
+ pr_debug("%s: completed\n", __func__);
complete(&wl->assoc_done);
netif_carrier_on(port_to_netdev(wl_port(wl)));
} else
@@ -2280,26 +2279,25 @@ void gelic_wl_interrupt(struct net_device *netdev, u64 status)
/*
* driver helpers
*/
-#define IW_IOCTL(n) [(n) - SIOCSIWCOMMIT]
static const iw_handler gelic_wl_wext_handler[] =
{
- IW_IOCTL(SIOCGIWNAME) = gelic_wl_get_name,
- IW_IOCTL(SIOCGIWRANGE) = gelic_wl_get_range,
- IW_IOCTL(SIOCSIWSCAN) = gelic_wl_set_scan,
- IW_IOCTL(SIOCGIWSCAN) = gelic_wl_get_scan,
- IW_IOCTL(SIOCSIWAUTH) = gelic_wl_set_auth,
- IW_IOCTL(SIOCGIWAUTH) = gelic_wl_get_auth,
- IW_IOCTL(SIOCSIWESSID) = gelic_wl_set_essid,
- IW_IOCTL(SIOCGIWESSID) = gelic_wl_get_essid,
- IW_IOCTL(SIOCSIWENCODE) = gelic_wl_set_encode,
- IW_IOCTL(SIOCGIWENCODE) = gelic_wl_get_encode,
- IW_IOCTL(SIOCSIWAP) = gelic_wl_set_ap,
- IW_IOCTL(SIOCGIWAP) = gelic_wl_get_ap,
- IW_IOCTL(SIOCSIWENCODEEXT) = gelic_wl_set_encodeext,
- IW_IOCTL(SIOCGIWENCODEEXT) = gelic_wl_get_encodeext,
- IW_IOCTL(SIOCSIWMODE) = gelic_wl_set_mode,
- IW_IOCTL(SIOCGIWMODE) = gelic_wl_get_mode,
- IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
+ IW_HANDLER(SIOCGIWNAME, gelic_wl_get_name),
+ IW_HANDLER(SIOCGIWRANGE, gelic_wl_get_range),
+ IW_HANDLER(SIOCSIWSCAN, gelic_wl_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, gelic_wl_get_scan),
+ IW_HANDLER(SIOCSIWAUTH, gelic_wl_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, gelic_wl_get_auth),
+ IW_HANDLER(SIOCSIWESSID, gelic_wl_set_essid),
+ IW_HANDLER(SIOCGIWESSID, gelic_wl_get_essid),
+ IW_HANDLER(SIOCSIWENCODE, gelic_wl_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, gelic_wl_get_encode),
+ IW_HANDLER(SIOCSIWAP, gelic_wl_set_ap),
+ IW_HANDLER(SIOCGIWAP, gelic_wl_get_ap),
+ IW_HANDLER(SIOCSIWENCODEEXT, gelic_wl_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, gelic_wl_get_encodeext),
+ IW_HANDLER(SIOCSIWMODE, gelic_wl_set_mode),
+ IW_HANDLER(SIOCGIWMODE, gelic_wl_get_mode),
+ IW_HANDLER(SIOCGIWNICKN, gelic_wl_get_nick),
};
static const struct iw_handler_def gelic_wl_wext_handler_def = {
@@ -2318,7 +2316,7 @@ static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
pr_debug("%s:start\n", __func__);
netdev = alloc_etherdev(sizeof(struct gelic_port) +
sizeof(struct gelic_wl_info));
- pr_debug("%s: netdev =%p card=%p \np", __func__, netdev, card);
+ pr_debug("%s: netdev =%p card=%p\n", __func__, netdev, card);
if (!netdev)
return NULL;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 4ef0afb..54ebb65 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -222,7 +222,6 @@ static void ql_write_common_reg_l(struct ql3_adapter *qdev,
writel(value, reg);
readl(reg);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
- return;
}
static void ql_write_common_reg(struct ql3_adapter *qdev,
@@ -230,7 +229,6 @@ static void ql_write_common_reg(struct ql3_adapter *qdev,
{
writel(value, reg);
readl(reg);
- return;
}
static void ql_write_nvram_reg(struct ql3_adapter *qdev,
@@ -239,7 +237,6 @@ static void ql_write_nvram_reg(struct ql3_adapter *qdev,
writel(value, reg);
readl(reg);
udelay(1);
- return;
}
static void ql_write_page0_reg(struct ql3_adapter *qdev,
@@ -249,7 +246,6 @@ static void ql_write_page0_reg(struct ql3_adapter *qdev,
ql_set_register_page(qdev,0);
writel(value, reg);
readl(reg);
- return;
}
/*
@@ -262,7 +258,6 @@ static void ql_write_page1_reg(struct ql3_adapter *qdev,
ql_set_register_page(qdev,1);
writel(value, reg);
readl(reg);
- return;
}
/*
@@ -275,7 +270,6 @@ static void ql_write_page2_reg(struct ql3_adapter *qdev,
ql_set_register_page(qdev,2);
writel(value, reg);
readl(reg);
- return;
}
static void ql_disable_interrupts(struct ql3_adapter *qdev)
@@ -343,8 +337,8 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high =
cpu_to_le32(MS_64BITS(map));
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
}
@@ -1924,8 +1918,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high =
cpu_to_le32(MS_64BITS(map));
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
--qdev->lrg_buf_skb_check;
@@ -2041,16 +2035,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_len(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
tx_cb->seg_count--;
if (tx_cb->seg_count) {
for (i = 1; i < tx_cb->seg_count; i++) {
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[i],
+ dma_unmap_addr(&tx_cb->map[i],
mapaddr),
- pci_unmap_len(&tx_cb->map[i], maplen),
+ dma_unmap_len(&tx_cb->map[i], maplen),
PCI_DMA_TODEVICE);
}
}
@@ -2119,8 +2113,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
skb_put(skb, length);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb2, mapaddr),
- pci_unmap_len(lrg_buf_cb2, maplen),
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE);
prefetch(skb->data);
skb->ip_summed = CHECKSUM_NONE;
@@ -2165,8 +2159,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb_put(skb2, length); /* Just the second buffer length here. */
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb2, mapaddr),
- pci_unmap_len(lrg_buf_cb2, maplen),
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE);
prefetch(skb2->data);
@@ -2258,7 +2252,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
"%x.\n",
ndev->name, net_rsp->opcode);
printk(KERN_ERR PFX
- "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
+ "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
(unsigned long int)tmp[0],
(unsigned long int)tmp[1],
(unsigned long int)tmp[2],
@@ -2454,8 +2448,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(len);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
seg++;
if (seg_cnt == 1) {
@@ -2488,9 +2482,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->len =
cpu_to_le32(sizeof(struct oal) |
OAL_CONT_ENTRY);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen,
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
sizeof(struct oal));
oal_entry = (struct oal_entry *)oal;
oal++;
@@ -2512,8 +2506,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(frag->size);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen,
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
frag->size);
}
/* Terminate the last segment. */
@@ -2539,22 +2533,22 @@ map_error:
(seg == 12 && seg_cnt > 13) || /* but necessary. */
(seg == 17 && seg_cnt > 18)) {
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[seg], mapaddr),
- pci_unmap_len(&tx_cb->map[seg], maplen),
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
PCI_DMA_TODEVICE);
oal++;
seg++;
}
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[seg], mapaddr),
- pci_unmap_len(&tx_cb->map[seg], maplen),
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
PCI_DMA_TODEVICE);
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_addr(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_addr(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
return NETDEV_TX_BUSY;
@@ -2841,8 +2835,8 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb, mapaddr),
- pci_unmap_len(lrg_buf_cb, maplen),
+ dma_unmap_addr(lrg_buf_cb, mapaddr),
+ dma_unmap_len(lrg_buf_cb, maplen),
PCI_DMA_FROMDEVICE);
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
} else {
@@ -2912,8 +2906,8 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
return -ENOMEM;
}
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
lrg_buf_cb->buf_phy_addr_low =
@@ -3793,13 +3787,13 @@ static void ql_reset_work(struct work_struct *work)
"%s: Freeing lost SKB.\n",
qdev->ndev->name);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_len(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
for(j=1;j<tx_cb->seg_count;j++) {
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[j],mapaddr),
- pci_unmap_len(&tx_cb->map[j],maplen),
+ dma_unmap_addr(&tx_cb->map[j],mapaddr),
+ dma_unmap_len(&tx_cb->map[j],maplen),
PCI_DMA_TODEVICE);
}
dev_kfree_skb(tx_cb->skb);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 7113e71..3362a66 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -998,8 +998,8 @@ enum link_state_t {
struct ql_rcv_buf_cb {
struct ql_rcv_buf_cb *next;
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
__le32 buf_phy_addr_low;
__le32 buf_phy_addr_high;
int index;
@@ -1029,8 +1029,8 @@ struct oal {
};
struct map_list {
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct ql_tx_buf_cb {
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 0da94b2..896d40d 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,9 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 0
-#define QLCNIC_LINUX_VERSIONID "5.0.0"
+#define _QLCNIC_LINUX_SUBVERSION 2
+#define QLCNIC_LINUX_VERSIONID "5.0.2"
+#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -98,8 +99,6 @@
#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
#define QLCNIC_LRO_BUFFER_EXTRA 2048
-#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
-
/* Opcodes to be used with the commands */
#define TX_ETHER_PKT 0x01
#define TX_TCP_PKT 0x02
@@ -133,7 +132,6 @@
#define RCV_RING_NORMAL 0
#define RCV_RING_JUMBO 1
-#define RCV_RING_LRO 2
#define MIN_CMD_DESCRIPTORS 64
#define MIN_RCV_DESCRIPTORS 64
@@ -144,7 +142,6 @@
#define MAX_RCV_DESCRIPTORS_10G 8192
#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
-#define MAX_LRO_RCV_DESCRIPTORS 8
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
@@ -152,8 +149,6 @@
#define get_next_index(index, length) \
(((index) + 1) & ((length) - 1))
-#define MPORT_MULTI_FUNCTION_MODE 0x2222
-
/*
* Following data structures describe the descriptors that will be used.
* Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
@@ -399,13 +394,9 @@ struct qlcnic_hardware_context {
unsigned long pci_len0;
- u32 ocm_win;
- u32 crb_win;
-
rwlock_t crb_lock;
struct mutex mem_lock;
- u8 cut_through;
u8 revision_id;
u8 pci_func;
u8 linkup;
@@ -428,6 +419,10 @@ struct qlcnic_adapter_stats {
u64 xmit_on;
u64 xmit_off;
u64 skb_alloc_failure;
+ u64 null_skb;
+ u64 null_rxbuf;
+ u64 rx_dma_map_error;
+ u64 tx_dma_map_error;
};
/*
@@ -916,14 +911,12 @@ struct qlcnic_adapter {
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
- u16 num_lro_rxd;
u8 max_rds_rings;
u8 max_sds_rings;
u8 driver_mismatch;
u8 msix_supported;
u8 rx_csum;
- u8 pci_using_dac;
u8 portnum;
u8 physical_port;
@@ -958,11 +951,15 @@ struct qlcnic_adapter {
u8 dev_state;
u8 diag_test;
u8 diag_cnt;
+ u8 reset_ack_timeo;
+ u8 dev_init_timeo;
u8 rsrd1;
- u16 rsrd2;
+ u16 msg_enable;
u8 mac_addr[ETH_ALEN];
+ u64 dev_rst_time;
+
struct qlcnic_adapter_stats stats;
struct qlcnic_recv_context recv_ctx;
@@ -994,6 +991,11 @@ u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
+void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
#define QLCRD32(adapter, off) \
(qlcnic_hw_read_wx_2M(adapter, off))
@@ -1035,6 +1037,7 @@ int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1128,4 +1131,11 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
extern const struct ethtool_ops qlcnic_ethtool_ops;
+#define QLCDB(adapter, lvl, _fmt, _args...) do { \
+ if (NETIF_MSG_##lvl & adapter->msg_enable) \
+ printk(KERN_INFO "%s: %s: " _fmt, \
+ dev_name(&adapter->pdev->dev), \
+ __func__, ##_args); \
+ } while (0)
+
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 0a6a399..c2c1f5c 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -421,7 +421,8 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
if (addr == NULL) {
dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_out_free;
}
tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index f83e15f..3bd514e 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -69,6 +69,14 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
+ {"null skb",
+ QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)},
+ {"null rxbuf",
+ QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
+ {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+ QLC_OFF(stats.tx_dma_map_error)},
};
@@ -404,7 +412,6 @@ qlcnic_get_ringparam(struct net_device *dev,
ring->rx_pending = adapter->num_rxd;
ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
- ring->rx_jumbo_pending += adapter->num_lro_rxd;
ring->tx_pending = adapter->num_txd;
if (adapter->ahw.port_type == QLCNIC_GBE) {
@@ -598,19 +605,12 @@ qlcnic_set_pauseparam(struct net_device *netdev,
static int qlcnic_reg_test(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- u32 data_read, data_written;
+ u32 data_read;
data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
- data_written = (u32)0xa5a5a5a5;
-
- QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
- data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
- if (data_written != data_read)
- return 1;
-
return 0;
}
@@ -998,6 +998,20 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
return 0;
}
+static u32 qlcnic_get_msglevel(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = msglvl;
+}
+
const struct ethtool_ops qlcnic_ethtool_ops = {
.get_settings = qlcnic_get_settings,
.set_settings = qlcnic_set_settings,
@@ -1029,4 +1043,6 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_flags = ethtool_op_get_flags,
.set_flags = qlcnic_set_flags,
.phys_id = qlcnic_blink_led,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 0469f84..ad9d167 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -435,9 +435,10 @@ enum {
#define QLCNIC_PCI_MS_2M (0x80000)
#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_CAMQM (0x04800000UL)
+#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
-#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
@@ -448,7 +449,7 @@ enum {
#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
-#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
/*
* Register offsets for MN
@@ -562,39 +563,16 @@ enum {
#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
-#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
-#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
-
#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
#define CRB_V2P_0 (QLCNIC_REG(0x290))
#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
-#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
-#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
-#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
-#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
-
#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
- * capabilities register, can be used to selectively enable/disable features
- * for backward compability
- */
-#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
-#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
-#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
-#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
-
-#define INTR_SCHEME_PERPORT 0x1
-#define MSI_MODE_MULTIFUNC 0x1
-
-/* used for ethtool tests */
-#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
-
-/*
* CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
* which can be read by the Phantom host to get producer/consumer indexes from
* Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
@@ -693,15 +671,24 @@ enum {
#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
-#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
-
- /* Device State */
-#define QLCNIC_DEV_COLD 1
-#define QLCNIC_DEV_INITALIZING 2
-#define QLCNIC_DEV_READY 3
-#define QLCNIC_DEV_NEED_RESET 4
-#define QLCNIC_DEV_NEED_QUISCENT 5
-#define QLCNIC_DEV_FAILED 6
+#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
+#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
+#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
+
+/* Device State */
+#define QLCNIC_DEV_COLD 0x1
+#define QLCNIC_DEV_INITIALIZING 0x2
+#define QLCNIC_DEV_READY 0x3
+#define QLCNIC_DEV_NEED_RESET 0x4
+#define QLCNIC_DEV_NEED_QUISCENT 0x5
+#define QLCNIC_DEV_FAILED 0x6
+#define QLCNIC_DEV_QUISCENT 0x7
+
+#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
+#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
+#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
@@ -709,9 +696,8 @@ enum {
#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
-#define FW_POLL_DELAY (2 * HZ)
-#define FW_FAIL_THRESH 3
-#define FW_POLL_THRESH 10
+#define FW_POLL_DELAY (1 * HZ)
+#define FW_FAIL_THRESH 2
#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e73ba45..0c2e1f0 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -54,21 +54,6 @@ static inline void writeq(u64 val, void __iomem *addr)
}
#endif
-#define ADDR_IN_RANGE(addr, low, high) \
- (((addr) < (high)) && ((addr) >= (low)))
-
-#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
- ((adapter)->ahw.pci_base0 + (off))
-
-static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
- unsigned long off)
-{
- if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
- return PCI_OFFSET_FIRST_RANGE(adapter, off);
-
- return NULL;
-}
-
static const struct crb_128M_2M_block_map
crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
{{{0, 0, 0, 0} } }, /* 0: PCI */
@@ -310,8 +295,12 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
if (done == 1)
break;
- if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock;reg_id=%d\n",
+ sem, id_reg);
return -EIO;
+ }
msleep(1);
}
@@ -427,7 +416,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
@@ -449,8 +438,8 @@ void qlcnic_set_multi(struct net_device *netdev)
}
if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ qlcnic_nic_add_mac(adapter, ha->addr);
}
}
@@ -787,9 +776,6 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
window = CRB_HI(off);
- if (adapter->ahw.crb_win == window)
- return;
-
writel(window, addr);
if (readl(addr) != window) {
if (printk_ratelimit())
@@ -797,7 +783,6 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
"failed to set CRB window to %d off 0x%lx\n",
window, off);
}
- adapter->ahw.crb_win = window;
}
int
@@ -878,13 +863,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
u64 addr, u32 *start)
{
u32 window;
- struct pci_dev *pdev = adapter->pdev;
-
- if ((addr & 0x00ff800) == 0xff800) {
- if (printk_ratelimit())
- dev_warn(&pdev->dev, "QM access not handled\n");
- return -EIO;
- }
window = OCM_WIN_P3P(addr);
@@ -892,7 +870,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
/* read back to flush */
readl(adapter->ahw.ocm_win_crb);
- adapter->ahw.ocm_win = window;
*start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
return 0;
}
@@ -901,8 +878,7 @@ static int
qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
u64 *data, int op)
{
- void __iomem *addr, *mem_ptr = NULL;
- resource_size_t mem_base;
+ void __iomem *addr;
int ret;
u32 start;
@@ -912,21 +888,8 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
if (ret != 0)
goto unlock;
- addr = pci_base_offset(adapter, start);
- if (addr)
- goto noremap;
-
- mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
-
- mem_ptr = ioremap(mem_base, PAGE_SIZE);
- if (mem_ptr == NULL) {
- ret = -EIO;
- goto unlock;
- }
+ addr = adapter->ahw.pci_base0 + start;
- addr = mem_ptr + (start & (PAGE_SIZE - 1));
-
-noremap:
if (op == 0) /* read */
*data = readq(addr);
else /* write */
@@ -935,11 +898,31 @@ noremap:
unlock:
mutex_unlock(&adapter->ahw.mem_lock);
- if (mem_ptr)
- iounmap(mem_ptr);
return ret;
}
+void
+qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+ *data = readq(addr);
+ mutex_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+ writeq(data, addr);
+ mutex_unlock(&adapter->ahw.mem_lock);
+}
+
#define MAX_CTL_CHECK 1000
int
@@ -948,7 +931,6 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
{
int i, j, ret;
u32 temp, off8;
- u64 stride;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -957,7 +939,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
/* P3 onward, test agent base for MIU and SIU is same */
if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
- QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ QLCNIC_ADDR_QDR_NET_MAX)) {
mem_crb = qlcnic_get_ioaddr(adapter,
QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
goto correct;
@@ -975,9 +957,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
return -EIO;
correct:
- stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & ~0xf;
mutex_lock(&adapter->ahw.mem_lock);
@@ -985,30 +965,28 @@ correct:
writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
i = 0;
- if (stride == 16) {
- writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
- writel((TA_CTL_START | TA_CTL_ENABLE),
- (mem_crb + TEST_AGT_CTRL));
-
- for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = readl(mem_crb + TEST_AGT_CTRL);
- if ((temp & TA_CTL_BUSY) == 0)
- break;
- }
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE),
+ (mem_crb + TEST_AGT_CTRL));
- if (j >= MAX_CTL_CHECK) {
- ret = -EIO;
- goto done;
- }
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
- i = (off & 0xf) ? 0 : 2;
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
- mem_crb + MIU_TEST_AGT_WRDATA(i));
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
- i = (off & 0xf) ? 2 : 0;
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
}
+ i = (off & 0xf) ? 0 : 2;
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ i = (off & 0xf) ? 2 : 0;
+
writel(data & 0xffffffff,
mem_crb + MIU_TEST_AGT_WRDATA(i));
writel((data >> 32) & 0xffffffff,
@@ -1044,7 +1022,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
{
int j, ret;
u32 temp, off8;
- u64 val, stride;
+ u64 val;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -1053,7 +1031,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
/* P3 onward, test agent base for MIU and SIU is same */
if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
- QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ QLCNIC_ADDR_QDR_NET_MAX)) {
mem_crb = qlcnic_get_ioaddr(adapter,
QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
goto correct;
@@ -1073,9 +1051,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
return -EIO;
correct:
- stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & ~0xf;
mutex_lock(&adapter->ahw.mem_lock);
@@ -1097,7 +1073,7 @@ correct:
ret = -EIO;
} else {
off8 = MIU_TEST_AGT_RDDATA_LO;
- if ((stride == 16) && (off & 0xf))
+ if (off & 0xf)
off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
temp = readl(mem_crb + off8 + 4);
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9d2c124..71a4e66 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -210,7 +210,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
if (cmd_buf_arr == NULL) {
dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
- return -ENOMEM;
+ goto err_out;
}
memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
@@ -221,7 +221,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
rds_ring = kzalloc(size, GFP_KERNEL);
if (rds_ring == NULL) {
dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
- return -ENOMEM;
+ goto err_out;
}
recv_ctx->rds_rings = rds_ring;
@@ -230,17 +230,8 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
switch (ring) {
case RCV_RING_NORMAL:
rds_ring->num_desc = adapter->num_rxd;
- if (adapter->ahw.cut_through) {
- rds_ring->dma_size =
- QLCNIC_CT_DEFAULT_RX_BUF_LEN;
- rds_ring->skb_size =
- QLCNIC_CT_DEFAULT_RX_BUF_LEN;
- } else {
- rds_ring->dma_size =
- QLCNIC_P3_RX_BUF_MAX_LEN;
- rds_ring->skb_size =
- rds_ring->dma_size + NET_IP_ALIGN;
- }
+ rds_ring->dma_size = QLCNIC_P3_RX_BUF_MAX_LEN;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
break;
case RCV_RING_JUMBO:
@@ -254,13 +245,6 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
rds_ring->skb_size =
rds_ring->dma_size + NET_IP_ALIGN;
break;
-
- case RCV_RING_LRO:
- rds_ring->num_desc = adapter->num_lro_rxd;
- rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
- rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
- break;
-
}
rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
@@ -530,6 +514,36 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
return 0;
}
+int
+qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
+
+ int timeo;
+ u32 val;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = (val >> (adapter->portnum * 4)) & 0xf;
+
+ if ((val & 0x3) != 1) {
+ dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n",
+ val);
+ return -EIO;
+ }
+
+ adapter->physical_port = (val >> 2);
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
+ timeo = 30;
+
+ adapter->dev_init_timeo = timeo;
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
+ timeo = 10;
+
+ adapter->reset_ack_timeo = timeo;
+
+ return 0;
+}
+
static int
qlcnic_has_mn(struct qlcnic_adapter *adapter)
{
@@ -540,12 +554,10 @@ qlcnic_has_mn(struct qlcnic_adapter *adapter)
QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
- if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+ return 1;
- capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
- if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
- return 1;
- }
return 0;
}
@@ -612,7 +624,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size * (idx + 1)));
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
@@ -621,7 +633,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
- data_size = descr->findex + cpu_to_le32(descr->size);
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
@@ -647,7 +659,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size * (idx + 1)));
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
@@ -655,7 +667,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
offs = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
- data_size = descr->findex + cpu_to_le32(descr->size);
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
@@ -950,6 +962,16 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
flashaddr += 8;
}
+
+ size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
} else {
u64 data;
u32 hi, lo;
@@ -1162,9 +1184,6 @@ int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
if (err)
return err;
- QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
- QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
- QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
return err;
@@ -1254,13 +1273,13 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
skb = buffer->skb;
- if (!adapter->ahw.cut_through)
- skb_reserve(skb, 2);
+ skb_reserve(skb, 2);
dma = pci_map_single(pdev, skb->data,
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, dma)) {
+ adapter->stats.rx_dma_map_error++;
dev_kfree_skb_any(skb);
buffer->skb = NULL;
return -ENOMEM;
@@ -1285,8 +1304,10 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
- if (!skb)
+ if (!skb) {
+ adapter->stats.null_skb++;
goto no_skb;
+ }
if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
adapter->stats.csummed++;
@@ -1476,6 +1497,8 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
if (rxbuf)
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
skip:
for (; desc_cnt > 0; desc_cnt--) {
@@ -1523,9 +1546,10 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
int producer, count = 0;
struct list_head *head;
+ spin_lock(&rds_ring->lock);
+
producer = rds_ring->producer;
- spin_lock(&rds_ring->lock);
head = &rds_ring->free_list;
while (!list_empty(head)) {
@@ -1547,13 +1571,13 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
producer = get_next_index(producer, rds_ring->num_desc);
}
- spin_unlock(&rds_ring->lock);
if (count) {
rds_ring->producer = producer;
writel((producer-1) & (rds_ring->num_desc-1),
rds_ring->crb_rcv_producer);
}
+ spin_unlock(&rds_ring->lock);
}
static void
@@ -1565,10 +1589,11 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
int producer, count = 0;
struct list_head *head;
- producer = rds_ring->producer;
if (!spin_trylock(&rds_ring->lock))
return;
+ producer = rds_ring->producer;
+
head = &rds_ring->free_list;
while (!list_empty(head)) {
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 234dab1..23ea9ca 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -61,6 +61,10 @@ static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
module_param(auto_fw_reset, int, 0644);
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+static int load_fw_file;
+module_param(load_fw_file, int, 0644);
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+
static int __devinit qlcnic_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
static void __devexit qlcnic_remove(struct pci_dev *pdev);
@@ -84,6 +88,7 @@ static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -208,6 +213,9 @@ qlcnic_napi_enable(struct qlcnic_adapter *adapter)
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
@@ -222,6 +230,9 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
@@ -233,67 +244,6 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
{
memset(&adapter->stats, 0, sizeof(adapter->stats));
- return;
-}
-
-static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- u64 mask, cmask;
-
- adapter->pci_using_dac = 0;
-
- mask = DMA_BIT_MASK(39);
- cmask = mask;
-
- if (pci_set_dma_mask(pdev, mask) == 0 &&
- pci_set_consistent_dma_mask(pdev, cmask) == 0) {
- adapter->pci_using_dac = 1;
- return 0;
- }
-
- return -EIO;
-}
-
-/* Update addressable range if firmware supports it */
-static int
-qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
-{
- int change, shift, err;
- u64 mask, old_mask, old_cmask;
- struct pci_dev *pdev = adapter->pdev;
-
- change = 0;
-
- shift = QLCRD32(adapter, CRB_DMA_SHIFT);
- if (shift > 32)
- return 0;
-
- if (shift > 9)
- change = 1;
-
- if (change) {
- old_mask = pdev->dma_mask;
- old_cmask = pdev->dev.coherent_dma_mask;
-
- mask = DMA_BIT_MASK(32+shift);
-
- err = pci_set_dma_mask(pdev, mask);
- if (err)
- goto err_out;
-
- err = pci_set_consistent_dma_mask(pdev, mask);
- if (err)
- goto err_out;
- dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
- }
-
- return 0;
-
-err_out:
- pci_set_dma_mask(pdev, old_mask);
- pci_set_consistent_dma_mask(pdev, old_cmask);
- return err;
}
static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
@@ -512,13 +462,6 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int pci_func = adapter->ahw.pci_func;
- /*
- * Set the CRB window to invalid. If any register in window 0 is
- * accessed it should set the window to 0 and then reset it to 1.
- */
- adapter->ahw.crb_win = -1;
- adapter->ahw.ocm_win = -1;
-
/* remap phys address */
mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
mem_len = pci_resource_len(pdev, 0);
@@ -556,7 +499,9 @@ static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
qlcnic_boards[i].device == pdev->device &&
qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
qlcnic_boards[i].sub_device == pdev->subsystem_device) {
- strcpy(name, qlcnic_boards[i].short_name);
+ sprintf(name, "%pM: %s" ,
+ adapter->mac_addr,
+ qlcnic_boards[i].short_name);
found = 1;
break;
}
@@ -605,22 +550,10 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
brd_name, adapter->ahw.revision_id);
}
- if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
- adapter->driver_mismatch = 1;
- dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
- fw_major, fw_minor, fw_build);
- return;
- }
-
- i = QLCRD32(adapter, QLCNIC_SRE_MISC);
- adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
-
- dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
- fw_major, fw_minor, fw_build,
- adapter->ahw.cut_through ? "cut-through" : "legacy");
+ dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
+ fw_major, fw_minor, fw_build);
- if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
- adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
+ adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
adapter->flags &= ~QLCNIC_LRO_ENABLED;
@@ -637,7 +570,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->num_txd = MAX_CMD_DESCRIPTORS;
- adapter->num_lro_rxd = 0;
adapter->max_rds_rings = 2;
}
@@ -646,11 +578,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
{
int val, err, first_boot;
- err = qlcnic_set_dma_mask(adapter);
- if (err)
+ err = qlcnic_can_start_firmware(adapter);
+ if (err < 0)
return err;
-
- if (!qlcnic_can_start_firmware(adapter))
+ else if (!err)
goto wait_init;
first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
@@ -658,7 +589,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
/* This is the first boot after power up */
QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
- qlcnic_request_firmware(adapter);
+ if (load_fw_file)
+ qlcnic_request_firmware(adapter);
+ else
+ adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
err = qlcnic_need_fw_reset(adapter);
if (err < 0)
@@ -672,7 +606,6 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
msleep(1);
}
- QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
@@ -696,16 +629,18 @@ wait_init:
goto err_out;
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
-
- qlcnic_update_dma_mask(adapter);
+ qlcnic_idc_debug_info(adapter, 1);
qlcnic_check_options(adapter);
adapter->need_fw_reset = 0;
- /* fall through and release firmware */
+ qlcnic_release_firmware(adapter);
+ return 0;
err_out:
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ dev_err(&adapter->pdev->dev, "Device state set to failed\n");
qlcnic_release_firmware(adapter);
return err;
}
@@ -937,6 +872,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
struct qlcnic_host_sds_ring *sds_ring;
int ring;
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx.sds_rings[ring];
@@ -950,11 +886,11 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
adapter->max_sds_rings = max_sds_rings;
if (qlcnic_attach(adapter))
- return;
+ goto out;
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
-
+out:
netif_device_attach(netdev);
}
@@ -976,8 +912,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
adapter->diag_test = test;
ret = qlcnic_attach(adapter);
- if (ret)
+ if (ret) {
+ netif_device_attach(netdev);
return ret;
+ }
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -985,6 +923,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_enable_int(sds_ring);
}
}
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
return 0;
}
@@ -1010,23 +949,19 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
if (!err)
- err = __qlcnic_up(adapter, netdev);
-
- if (err)
- goto done;
+ __qlcnic_up(adapter, netdev);
}
netif_device_attach(netdev);
}
-done:
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return err;
}
static int
qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
- struct net_device *netdev)
+ struct net_device *netdev, u8 pci_using_dac)
{
int err;
struct pci_dev *pdev = adapter->pdev;
@@ -1049,7 +984,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
- if (adapter->pci_using_dac) {
+ if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
@@ -1079,6 +1014,22 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
return 0;
}
+static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
+{
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ *pci_using_dac = 1;
+ else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ *pci_using_dac = 0;
+ else {
+ dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int __devinit
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1087,6 +1038,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
int pci_func_id = PCI_FUNC(pdev->devfn);
uint8_t revision_id;
+ uint8_t pci_using_dac;
err = pci_enable_device(pdev);
if (err)
@@ -1097,6 +1049,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
}
+ err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
+ if (err)
+ goto err_out_disable_pdev;
+
err = pci_request_regions(pdev, qlcnic_driver_name);
if (err)
goto err_out_disable_pdev;
@@ -1115,6 +1071,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
+ adapter->dev_rst_time = jiffies;
adapter->ahw.pci_func = pci_func_id;
revision_id = pdev->revision;
@@ -1139,21 +1096,23 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ if (qlcnic_setup_idc_param(adapter))
+ goto err_out_iounmap;
err = qlcnic_start_firmware(adapter);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
goto err_out_decr_ref;
-
- /*
- * See if the firmware gave us a virtual-physical port mapping.
- */
- adapter->physical_port = adapter->portnum;
+ }
qlcnic_clear_stats(adapter);
qlcnic_setup_intr(adapter);
- err = qlcnic_setup_netdev(adapter, netdev);
+ err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
if (err)
goto err_out_disable_msi;
@@ -1304,9 +1263,6 @@ qlcnic_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- adapter->ahw.crb_win = -1;
- adapter->ahw.ocm_win = -1;
-
err = qlcnic_start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "failed to start firmware\n");
@@ -1334,6 +1290,7 @@ err_out_detach:
qlcnic_detach(adapter);
err_out:
qlcnic_clr_all_drv_state(adapter);
+ netif_device_attach(netdev);
return err;
}
#endif
@@ -1570,6 +1527,11 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int frag_count, no_of_desc;
u32 num_txd = tx_ring->num_desc;
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
/* 4 fragments per cmd des */
@@ -1586,8 +1548,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
pdev = adapter->pdev;
- if (qlcnic_map_tx_skb(pdev, skb, pbuf))
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+ adapter->stats.tx_dma_map_error++;
goto drop_packet;
+ }
pbuf->skb = skb;
pbuf->frag_count = frag_count;
@@ -1739,6 +1703,7 @@ static void qlcnic_tx_timeout_task(struct work_struct *work)
request_reset:
adapter->need_fw_reset = 1;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ QLCDB(adapter, DRV, "Resetting adapter\n");
}
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -1750,7 +1715,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
- stats->rx_bytes = adapter->stats.rxbytes;
+ stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
stats->tx_bytes = adapter->stats.txbytes;
stats->rx_dropped = adapter->stats.rxdropped;
stats->tx_dropped = adapter->stats.txdropped;
@@ -1944,7 +1909,20 @@ static void qlcnic_poll_controller(struct net_device *netdev)
#endif
static void
-qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
+qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
+{
+ u32 val;
+
+ val = adapter->portnum & 0xf;
+ val |= encoding << 7;
+ val |= (jiffies - adapter->dev_rst_time) << 8;
+
+ QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+ adapter->dev_rst_time = jiffies;
+}
+
+static int
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
{
u32 val;
@@ -1952,18 +1930,20 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
state != QLCNIC_DEV_NEED_QUISCENT);
if (qlcnic_api_lock(adapter))
- return ;
+ return -EIO;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
if (state == QLCNIC_DEV_NEED_RESET)
- val |= ((u32)0x1 << (adapter->portnum * 4));
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
else if (state == QLCNIC_DEV_NEED_QUISCENT)
- val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
+ QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
+
+ return 0;
}
static int
@@ -1975,7 +1955,7 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
return -EBUSY;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -1992,14 +1972,14 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
goto err;
val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
- val &= ~((u32)0x1 << (adapter->portnum * 4));
+ QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
if (!(val & 0x11111111))
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2009,6 +1989,7 @@ err:
clear_bit(__QLCNIC_RESETTING, &adapter->state);
}
+/* Grab api lock, before checking state */
static int
qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
{
@@ -2024,73 +2005,103 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
return 1;
}
+static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+
+ if (val != QLCNIC_DRV_IDC_VER) {
+ dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
+ " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
+ }
+
+ return 0;
+}
+
static int
qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
{
u32 val, prev_state;
- int cnt = 0;
- int portnum = adapter->portnum;
+ u8 dev_init_timeo = adapter->dev_init_timeo;
+ u8 portnum = adapter->portnum;
+ u8 ret;
+
+ if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
+ return 1;
if (qlcnic_api_lock(adapter))
return -1;
val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
- if (!(val & ((int)0x1 << (portnum * 4)))) {
- val |= ((u32)0x1 << (portnum * 4));
+ if (!(val & (1 << (portnum * 4)))) {
+ QLC_DEV_SET_REF_CNT(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
- } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
- goto start_fw;
}
prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Device state = %u\n", prev_state);
switch (prev_state) {
case QLCNIC_DEV_COLD:
-start_fw:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
+ qlcnic_idc_debug_info(adapter, 0);
qlcnic_api_unlock(adapter);
return 1;
case QLCNIC_DEV_READY:
+ ret = qlcnic_check_idc_ver(adapter);
qlcnic_api_unlock(adapter);
- return 0;
+ return ret;
case QLCNIC_DEV_NEED_RESET:
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val |= ((u32)0x1 << (portnum * 4));
+ QLC_DEV_SET_RST_RDY(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_NEED_QUISCENT:
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val |= ((u32)0x1 << ((portnum * 4) + 1));
+ QLC_DEV_SET_QSCNT_RDY(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_FAILED:
+ dev_err(&adapter->pdev->dev, "Device in failed state.\n");
qlcnic_api_unlock(adapter);
return -1;
+
+ case QLCNIC_DEV_INITIALIZING:
+ case QLCNIC_DEV_QUISCENT:
+ break;
}
qlcnic_api_unlock(adapter);
- msleep(1000);
- while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
- ++cnt < 20)
+
+ do {
msleep(1000);
+ prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (prev_state == QLCNIC_DEV_QUISCENT)
+ continue;
+ } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
- if (cnt >= 20)
+ if (!dev_init_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for device to initialize timeout\n");
return -1;
+ }
if (qlcnic_api_lock(adapter))
return -1;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ ret = qlcnic_check_idc_ver(adapter);
qlcnic_api_unlock(adapter);
- return 0;
+ return ret;
}
static void
@@ -2098,44 +2109,84 @@ qlcnic_fwinit_work(struct work_struct *work)
{
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
- int dev_state;
+ u32 dev_state = 0xf;
- if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
+ if (qlcnic_api_lock(adapter))
goto err_ret;
- if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (dev_state == QLCNIC_DEV_QUISCENT) {
+ qlcnic_api_unlock(adapter);
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+ FW_POLL_DELAY * 2);
+ return;
+ }
+
+ if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
+ dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
+ adapter->reset_ack_timeo);
+ goto skip_ack_check;
+ }
+
+ if (!qlcnic_check_drv_state(adapter)) {
+skip_ack_check:
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (qlcnic_check_drv_state(adapter)) {
- qlcnic_schedule_work(adapter,
- qlcnic_fwinit_work, FW_POLL_DELAY);
+ if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_QUISCENT);
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+ FW_POLL_DELAY * 2);
+ QLCDB(adapter, DRV, "Quiscing the driver\n");
+ qlcnic_idc_debug_info(adapter, 0);
+
+ qlcnic_api_unlock(adapter);
return;
}
+ if (dev_state == QLCNIC_DEV_NEED_RESET) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "Restarting fw\n");
+ qlcnic_idc_debug_info(adapter, 0);
+ }
+
+ qlcnic_api_unlock(adapter);
+
if (!qlcnic_start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
return;
}
-
goto err_ret;
}
+ qlcnic_api_unlock(adapter);
+
dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
+
switch (dev_state) {
- case QLCNIC_DEV_READY:
- if (!qlcnic_start_firmware(adapter)) {
- qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
- return;
- }
+ case QLCNIC_DEV_QUISCENT:
+ case QLCNIC_DEV_NEED_QUISCENT:
+ case QLCNIC_DEV_NEED_RESET:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
case QLCNIC_DEV_FAILED:
break;
default:
- qlcnic_schedule_work(adapter,
- qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
- return;
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
}
err_ret:
+ dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
+ "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
+ netif_device_attach(adapter->netdev);
qlcnic_clr_all_drv_state(adapter);
}
@@ -2163,7 +2214,8 @@ qlcnic_detach_work(struct work_struct *work)
if (adapter->temp == QLCNIC_TEMP_PANIC)
goto err_ret;
- qlcnic_set_drv_state(adapter, adapter->dev_state);
+ if (qlcnic_set_drv_state(adapter, adapter->dev_state))
+ goto err_ret;
adapter->fw_wait_cnt = 0;
@@ -2172,10 +2224,14 @@ qlcnic_detach_work(struct work_struct *work)
return;
err_ret:
+ dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
+ status, adapter->temp);
+ netif_device_attach(netdev);
qlcnic_clr_all_drv_state(adapter);
}
+/*Transit to RESET state from READY state only */
static void
qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
{
@@ -2186,9 +2242,10 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
+ if (state == QLCNIC_DEV_READY) {
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
- set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "NEED_RESET state set\n");
+ qlcnic_idc_debug_info(adapter, 0);
}
qlcnic_api_unlock(adapter);
@@ -2233,9 +2290,8 @@ qlcnic_attach_work(struct work_struct *work)
qlcnic_config_indev_addr(netdev, NETDEV_UP);
}
- netif_device_attach(netdev);
-
done:
+ netif_device_attach(netdev);
adapter->fw_fail_cnt = 0;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -2253,10 +2309,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
if (qlcnic_check_temp(adapter))
goto detach;
- if (adapter->need_fw_reset) {
+ if (adapter->need_fw_reset)
qlcnic_dev_request_reset(adapter);
- goto detach;
- }
state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
@@ -2285,8 +2339,11 @@ detach:
QLCNIC_DEV_NEED_RESET;
if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
- !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
+
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+ QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ }
return 1;
}
@@ -2387,51 +2444,72 @@ static int
qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
loff_t offset, size_t size)
{
+ size_t crb_size = 4;
+
if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
return -EIO;
- if ((size != 4) || (offset & 0x3))
- return -EINVAL;
+ if (offset < QLCNIC_PCI_CRBSPACE) {
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+ QLCNIC_PCI_CAMQM_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
- if (offset < QLCNIC_PCI_CRBSPACE)
- return -EINVAL;
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
return 0;
}
static ssize_t
-qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- data = QLCRD32(adapter, offset);
- memcpy(buf, &data, size);
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
return size;
}
static ssize_t
-qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- memcpy(&data, buf, size);
- QLCWR32(adapter, offset, data);
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
return size;
}
@@ -2449,7 +2527,8 @@ qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
}
static ssize_t
-qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -2470,7 +2549,8 @@ qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
}
static ssize_t
-qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -2553,24 +2633,12 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
-static int
-qlcnic_destip_supported(struct qlcnic_adapter *adapter)
-{
- if (adapter->ahw.cut_through)
- return 0;
-
- return 1;
-}
-
static void
qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
{
struct in_device *indev;
struct qlcnic_adapter *adapter = netdev_priv(dev);
- if (!qlcnic_destip_supported(adapter))
- return;
-
indev = in_dev_get(dev);
if (!indev)
return;
@@ -2591,7 +2659,6 @@ qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
} endfor_ifa(indev);
in_dev_put(indev);
- return;
}
static int qlcnic_netdev_event(struct notifier_block *this,
@@ -2650,7 +2717,7 @@ recheck:
adapter = netdev_priv(dev);
- if (!adapter || !qlcnic_destip_supported(adapter))
+ if (!adapter)
goto done;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 8b742b6..20624ba 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1344,8 +1344,8 @@ struct oal {
};
struct map_list {
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct tx_ring_desc {
@@ -1373,8 +1373,8 @@ struct bq_desc {
} p;
__le64 *addr;
u32 index;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 3626646..68a1c9b 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1340,7 +1340,7 @@ void ql_mpi_core_to_log(struct work_struct *work)
for (i = 0; i < count; i += 8) {
printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
- "%.08x %.08x %.08x \n", i,
+ "%.08x %.08x %.08x\n", i,
tmp[i + 0],
tmp[i + 1],
tmp[i + 2],
@@ -2058,7 +2058,7 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
- printk(KERN_ERR PFX "flags3 = %s %s %s \n",
+ printk(KERN_ERR PFX "flags3 = %s %s %s\n",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 7e09ff4..4892d64 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -181,8 +181,6 @@ quit:
spin_unlock(&qdev->stats_lock);
QL_DUMP_STAT(qdev);
-
- return;
}
static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index fd34f26..fa4b24c 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1057,7 +1057,7 @@ static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
- pci_unmap_addr(lbq_desc, mapaddr),
+ dma_unmap_addr(lbq_desc, mapaddr),
rx_ring->lbq_buf_size,
PCI_DMA_FROMDEVICE);
@@ -1170,8 +1170,8 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
- pci_unmap_addr_set(lbq_desc, mapaddr, map);
- pci_unmap_len_set(lbq_desc, maplen,
+ dma_unmap_addr_set(lbq_desc, mapaddr, map);
+ dma_unmap_len_set(lbq_desc, maplen,
rx_ring->lbq_buf_size);
*lbq_desc->addr = cpu_to_le64(map);
@@ -1241,8 +1241,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
sbq_desc->p.skb = NULL;
return;
}
- pci_unmap_addr_set(sbq_desc, mapaddr, map);
- pci_unmap_len_set(sbq_desc, maplen,
+ dma_unmap_addr_set(sbq_desc, mapaddr, map);
+ dma_unmap_len_set(sbq_desc, maplen,
rx_ring->sbq_buf_size);
*sbq_desc->addr = cpu_to_le64(map);
}
@@ -1298,18 +1298,18 @@ static void ql_unmap_send(struct ql_adapter *qdev,
"unmapping OAL area.\n");
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_ring_desc->map[i],
+ dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
- pci_unmap_len(&tx_ring_desc->map[i],
+ dma_unmap_len(&tx_ring_desc->map[i],
maplen),
PCI_DMA_TODEVICE);
} else {
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
"unmapping frag %d.\n", i);
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_ring_desc->map[i],
+ dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
- pci_unmap_len(&tx_ring_desc->map[i],
+ dma_unmap_len(&tx_ring_desc->map[i],
maplen), PCI_DMA_TODEVICE);
}
}
@@ -1348,8 +1348,8 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->len = cpu_to_le32(len);
tbd->addr = cpu_to_le64(map);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
map_idx++;
/*
@@ -1402,9 +1402,9 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->len =
cpu_to_le32((sizeof(struct tx_buf_desc) *
(frag_cnt - frag_idx)) | TX_DESC_C);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
sizeof(struct oal));
tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
map_idx++;
@@ -1425,8 +1425,8 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->addr = cpu_to_le64(map);
tbd->len = cpu_to_le32(frag->size);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
frag->size);
}
@@ -1742,8 +1742,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
@@ -1774,18 +1774,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
- pci_unmap_addr
+ dma_unmap_addr
(sbq_desc, mapaddr),
- pci_unmap_len
+ dma_unmap_len
(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
memcpy(skb_put(skb, length),
sbq_desc->p.skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
- pci_unmap_addr
+ dma_unmap_addr
(sbq_desc,
mapaddr),
- pci_unmap_len
+ dma_unmap_len
(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
@@ -1798,9 +1798,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
ql_realign_skb(skb, length);
skb_put(skb, length);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc,
+ dma_unmap_addr(sbq_desc,
mapaddr),
- pci_unmap_len(sbq_desc,
+ dma_unmap_len(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
sbq_desc->p.skb = NULL;
@@ -1839,9 +1839,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
return NULL;
}
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(lbq_desc,
+ dma_unmap_addr(lbq_desc,
mapaddr),
- pci_unmap_len(lbq_desc, maplen),
+ dma_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1874,8 +1874,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
int size, i = 0;
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
@@ -2737,8 +2737,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
}
if (sbq_desc->p.skb) {
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -4207,7 +4207,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
int i, status;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
@@ -4271,8 +4271,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (status)
goto exit;
i = 0;
- netdev_for_each_mc_addr(mc_ptr, ndev) {
- if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to loadmulticast address.\n");
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 0298d8c..9a251ac 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -330,7 +330,7 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
do {
skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!skb) {
- printk(KERN_ERR DRV_NAME "%s: failed to alloc skb for rx\n", dev->name);
+ netdev_err(dev, "failed to alloc skb for rx\n");
rc = -ENOMEM;
goto err_exit;
}
@@ -400,9 +400,6 @@ static void r6040_init_mac_regs(struct net_device *dev)
* we may got called by r6040_tx_timeout which has left
* some unsent tx buffers */
iowrite16(0x01, ioaddr + MTPR);
-
- /* Check media */
- mii_check_media(&lp->mii_if, 1, 1);
}
static void r6040_tx_timeout(struct net_device *dev)
@@ -410,9 +407,9 @@ static void r6040_tx_timeout(struct net_device *dev)
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
- printk(KERN_WARNING "%s: transmit timed out, int enable %4.4x "
+ netdev_warn(dev, "transmit timed out, int enable %4.4x "
"status %4.4x, PHY status %4.4x\n",
- dev->name, ioread16(ioaddr + MIER),
+ ioread16(ioaddr + MIER),
ioread16(ioaddr + MISR),
r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
@@ -530,8 +527,6 @@ static int r6040_phy_mode_chk(struct net_device *dev)
phy_dat = 0x0000;
}
- mii_check_media(&lp->mii_if, 0, 1);
-
return phy_dat;
};
@@ -813,6 +808,9 @@ static void r6040_timer(unsigned long data)
/* Timer active again */
mod_timer(&lp->timer, round_jiffies(jiffies + HZ));
+
+ /* Check media */
+ mii_check_media(&lp->mii_if, 1, 1);
}
/* Read/set MAC address routines */
@@ -897,7 +895,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
if (!lp->tx_free_desc) {
spin_unlock_irqrestore(&lp->lock, flags);
netif_stop_queue(dev);
- printk(KERN_ERR DRV_NAME ": no tx descriptor\n");
+ netdev_err(dev, ": no tx descriptor\n");
return NETDEV_TX_BUSY;
}
@@ -924,7 +922,6 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
if (!lp->tx_free_desc)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
@@ -937,7 +934,7 @@ static void r6040_multicast_list(struct net_device *dev)
u16 *adrp;
u16 reg;
unsigned long flags;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
/* MAC Address */
@@ -972,8 +969,8 @@ static void r6040_multicast_list(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -990,9 +987,9 @@ static void r6040_multicast_list(struct net_device *dev)
}
/* Multicast Address 1~4 case */
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i < MCAST_MAX) {
- adrp = (u16 *) dmi->dmi_addr;
+ adrp = (u16 *) ha->addr;
iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
@@ -1090,20 +1087,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* this should always be supported */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
goto err_out;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
goto err_out;
}
/* IO Size check */
if (pci_resource_len(pdev, bar) < io_size) {
- printk(KERN_ERR DRV_NAME ": Insufficient PCI resources, aborting\n");
+ dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
err = -EIO;
goto err_out;
}
@@ -1112,7 +1109,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
- printk(KERN_ERR DRV_NAME ": Failed to allocate etherdev\n");
+ dev_err(&pdev->dev, "Failed to allocate etherdev\n");
err = -ENOMEM;
goto err_out;
}
@@ -1122,14 +1119,13 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ dev_err(&pdev->dev, "Failed to request PCI regions\n");
goto err_out_free_dev;
}
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
- printk(KERN_ERR DRV_NAME ": ioremap failed for device %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "ioremap failed for device\n");
err = -EIO;
goto err_out_free_res;
}
@@ -1156,7 +1152,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
if (!(adrp[0] || adrp[1] || adrp[2])) {
- printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n");
+ netdev_warn(dev, "MAC address not initialized, generating random\n");
random_ether_addr(dev->dev_addr);
}
@@ -1184,7 +1180,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Check the vendor ID on the PHY, if 0xffff assume none attached */
if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
- printk(KERN_ERR DRV_NAME ": Failed to detect an attached PHY\n");
+ dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
err = -ENODEV;
goto err_out_unmap;
}
@@ -1192,7 +1188,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Register net device. After this dev->name assign */
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR DRV_NAME ": Failed to register net device\n");
+ dev_err(&pdev->dev, "Failed to register net device\n");
goto err_out_unmap;
}
return 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index dd8106f..217e709 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -23,6 +23,7 @@
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -509,6 +510,7 @@ struct rtl8169_private {
struct mii_if_info mii;
struct rtl8169_counters counters;
+ u32 saved_wolopts;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -748,53 +750,61 @@ static void rtl8169_check_link_status(struct net_device *dev,
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
+ /* This is to cancel a scheduled suspend if there's one. */
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
+ pm_schedule_suspend(&tp->pci_dev->dev, 100);
}
spin_unlock_irqrestore(&tp->lock, flags);
}
-static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+
+static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u8 options;
-
- wol->wolopts = 0;
-
-#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
- wol->supported = WAKE_ANY;
-
- spin_lock_irq(&tp->lock);
+ u32 wolopts = 0;
options = RTL_R8(Config1);
if (!(options & PMEnable))
- goto out_unlock;
+ return 0;
options = RTL_R8(Config3);
if (options & LinkUp)
- wol->wolopts |= WAKE_PHY;
+ wolopts |= WAKE_PHY;
if (options & MagicPacket)
- wol->wolopts |= WAKE_MAGIC;
+ wolopts |= WAKE_MAGIC;
options = RTL_R8(Config5);
if (options & UWF)
- wol->wolopts |= WAKE_UCAST;
+ wolopts |= WAKE_UCAST;
if (options & BWF)
- wol->wolopts |= WAKE_BCAST;
+ wolopts |= WAKE_BCAST;
if (options & MWF)
- wol->wolopts |= WAKE_MCAST;
+ wolopts |= WAKE_MCAST;
-out_unlock:
- spin_unlock_irq(&tp->lock);
+ return wolopts;
}
-static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl8169_get_wol(tp);
+
+ spin_unlock_irq(&tp->lock);
+}
+
+static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+{
void __iomem *ioaddr = tp->mmio_addr;
unsigned int i;
static const struct {
@@ -811,23 +821,29 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{ WAKE_ANY, Config5, LanWake }
};
- spin_lock_irq(&tp->lock);
-
RTL_W8(Cfg9346, Cfg9346_Unlock);
for (i = 0; i < ARRAY_SIZE(cfg); i++) {
u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
- if (wol->wolopts & cfg[i].opt)
+ if (wolopts & cfg[i].opt)
options |= cfg[i].mask;
RTL_W8(cfg[i].reg, options);
}
RTL_W8(Cfg9346, Cfg9346_Lock);
+}
+
+static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
if (wol->wolopts)
tp->features |= RTL_FEATURE_WOL;
else
tp->features &= ~RTL_FEATURE_WOL;
+ __rtl8169_set_wol(tp, wol->wolopts);
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
spin_unlock_irq(&tp->lock);
@@ -3192,6 +3208,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ }
+ pm_runtime_idle(&pdev->dev);
+
out:
return rc;
@@ -3213,10 +3235,18 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
+ pm_runtime_get_sync(&pdev->dev);
+
flush_scheduled_work();
unregister_netdev(dev);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ }
+ pm_runtime_put_noidle(&pdev->dev);
+
/* restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
@@ -3243,6 +3273,7 @@ static int rtl8169_open(struct net_device *dev)
struct pci_dev *pdev = tp->pci_dev;
int retval = -ENOMEM;
+ pm_runtime_get_sync(&pdev->dev);
/*
* Note that we use a magic value here, its wierd I know
@@ -3263,7 +3294,7 @@ static int rtl8169_open(struct net_device *dev)
tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
&tp->TxPhyAddr);
if (!tp->TxDescArray)
- goto out;
+ goto err_pm_runtime_put;
tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
&tp->RxPhyAddr);
@@ -3290,6 +3321,9 @@ static int rtl8169_open(struct net_device *dev)
rtl8169_request_timer(dev);
+ tp->saved_wolopts = 0;
+ pm_runtime_put_noidle(&pdev->dev);
+
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
out:
return retval;
@@ -3299,9 +3333,13 @@ err_release_ring_2:
err_free_rx_1:
pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
+ tp->RxDescArray = NULL;
err_free_tx_0:
pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+err_pm_runtime_put:
+ pm_runtime_put_noidle(&pdev->dev);
goto out;
}
@@ -4720,6 +4758,8 @@ static int rtl8169_close(struct net_device *dev)
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
+ pm_runtime_get_sync(&pdev->dev);
+
/* update counters before going down */
rtl8169_update_counters(dev);
@@ -4734,6 +4774,8 @@ static int rtl8169_close(struct net_device *dev)
tp->TxDescArray = NULL;
tp->RxDescArray = NULL;
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
@@ -4759,12 +4801,12 @@ static void rtl_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}
@@ -4832,21 +4874,74 @@ static int rtl8169_suspend(struct device *device)
return 0;
}
+static void __rtl8169_resume(struct net_device *dev)
+{
+ netif_device_attach(dev);
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+}
+
static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- if (!netif_running(dev))
- goto out;
+ if (netif_running(dev))
+ __rtl8169_resume(dev);
- netif_device_attach(dev);
+ return 0;
+}
+
+static int rtl8169_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ tp->saved_wolopts = __rtl8169_get_wol(tp);
+ __rtl8169_set_wol(tp, WAKE_ANY);
+ spin_unlock_irq(&tp->lock);
+
+ rtl8169_net_suspend(dev);
+
+ return 0;
+}
+
+static int rtl8169_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
+ tp->saved_wolopts = 0;
+ spin_unlock_irq(&tp->lock);
+
+ __rtl8169_resume(dev);
- rtl8169_schedule_work(dev, rtl8169_reset_task);
-out:
return 0;
}
+static int rtl8169_runtime_idle(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+ return -EBUSY;
+}
+
static const struct dev_pm_ops rtl8169_pm_ops = {
.suspend = rtl8169_suspend,
.resume = rtl8169_resume,
@@ -4854,6 +4949,9 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
.thaw = rtl8169_resume,
.poweroff = rtl8169_suspend,
.restore = rtl8169_resume,
+ .runtime_suspend = rtl8169_runtime_suspend,
+ .runtime_resume = rtl8169_runtime_resume,
+ .runtime_idle = rtl8169_runtime_idle,
};
#define RTL8169_PM_OPS (&rtl8169_pm_ops)
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index f2e335f..e26e107 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1467,7 +1467,6 @@ static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&rrpriv->lock, flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 92ae8d3..668327c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2400,7 +2400,7 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
return NULL;
}
pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ skb_headlen(skb), PCI_DMA_TODEVICE);
frg_cnt = skb_shinfo(skb)->nr_frags;
if (frg_cnt) {
txds++;
@@ -2943,7 +2943,6 @@ static void s2io_netpoll(struct net_device *dev)
}
}
enable_irq(dev->irq);
- return;
}
#endif
@@ -4202,7 +4201,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
}
- frg_len = skb->len - skb->data_len;
+ frg_len = skb_headlen(skb);
if (offload_type == SKB_GSO_UDP) {
int ufo_size;
@@ -4756,7 +4755,6 @@ reset:
s2io_stop_all_tx_queue(sp);
schedule_work(&sp->rst_timer_task);
sw_stat->soft_reset_cnt++;
- return;
}
/**
@@ -4965,7 +4963,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
static void s2io_set_multicast(struct net_device *dev)
{
int i, j, prev_cnt;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
@@ -5094,12 +5092,12 @@ static void s2io_set_multicast(struct net_device *dev)
/* Create the new Rx filter list and update the same in H/W. */
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(sp->usr_addrs[i].addr, ha->addr,
ETH_ALEN);
mac_addr = 0;
for (j = 0; j < ETH_ALEN; j++) {
- mac_addr |= mclist->dmi_addr[j];
+ mac_addr |= ha->addr[j];
mac_addr <<= 8;
}
mac_addr >>= 8;
@@ -8645,7 +8643,6 @@ static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
first->truesize += skb->truesize;
lro->last_frag = skb;
swstats->clubbed_frms_cnt++;
- return;
}
/**
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
index 45f2634..a7ff8ea 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/s6gmac.c
@@ -396,7 +396,6 @@ static void s6gmac_rx_interrupt(struct net_device *dev)
} else {
skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
& S6_GMAC_BURST_POSTRD_LEN_MASK);
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_rx(skb);
@@ -853,8 +852,8 @@ static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
{
struct s6gmac *pd = netdev_priv(dev);
unsigned long flags;
+
spin_lock_irqsave(&pd->lock, flags);
- dev->trans_start = jiffies;
writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
0 << S6_GMAC_BURST_PREWR_CFE |
1 << S6_GMAC_BURST_PREWR_PPE |
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index abc8eef..a9ae505 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -426,7 +426,6 @@ sb1000_send_command(const int ioaddr[], const char* name,
if (sb1000_debug > 3)
printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x"
"%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]);
- return;
}
/* Card Read Status (to be used during frame rx) */
@@ -438,7 +437,6 @@ sb1000_read_status(const int ioaddr[], unsigned char in[])
in[3] = inb(ioaddr[0] + 3);
in[4] = inb(ioaddr[0] + 4);
in[0] = inb(ioaddr[0] + 5);
- return;
}
/* Issue Read Command (to be used during frame rx) */
@@ -450,7 +448,6 @@ sb1000_issue_read_command(const int ioaddr[], const char* name)
sb1000_wait_for_ready_clear(ioaddr, name);
outb(0xa0, ioaddr[0] + 6);
sb1000_send_command(ioaddr, name, Command0);
- return;
}
@@ -733,7 +730,6 @@ sb1000_print_status_buffer(const char* name, unsigned char st[],
printk("\n");
}
}
- return;
}
/*
@@ -926,7 +922,6 @@ sb1000_error_dpc(struct net_device *dev)
sb1000_read_status(ioaddr, st);
if (st[1] & 0x10)
lp->rx_error_dpc_count = ErrorDpcCounterInitialize;
- return;
}
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 04efc0c..1f3acc3 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -48,23 +48,6 @@
#include <asm/io.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-/* This is only here until the firmware is ready. In that case,
- the firmware leaves the ethernet address in the register for us. */
-#ifdef CONFIG_SIBYTE_STANDALONE
-#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
-#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
-#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
-#define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
-#endif
-
-
-/* These identify the driver base version and may not be removed. */
-#if 0
-static char version1[] __initdata =
-"sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
-#endif
-
-
/* Operational parameters that usually are not changed. */
#define CONFIG_SBMAC_COALESCE
@@ -349,7 +332,6 @@ static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
********************************************************************* */
static char sbmac_string[] = "sb1250-mac";
-static char sbmac_pretty[] = "SB1250 MAC";
static char sbmac_mdio_string[] = "sb1250-mac-mdio";
@@ -2086,8 +2068,6 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&sc->sbm_lock, flags);
return NETDEV_TX_OK;
@@ -2112,7 +2092,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
uint64_t reg;
void __iomem *port;
int idx;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct net_device *dev = sc->sbm_dev;
/*
@@ -2161,10 +2141,10 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
* XXX if the table overflows */
idx = 1; /* skip station address */
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (idx == MAC_ADDR_COUNT)
break;
- reg = sbmac_addr2reg(mclist->dmi_addr);
+ reg = sbmac_addr2reg(ha->addr);
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
__raw_writeq(reg, port);
idx++;
@@ -2182,85 +2162,6 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
}
}
-#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
-/**********************************************************************
- * SBMAC_PARSE_XDIGIT(str)
- *
- * Parse a hex digit, returning its value
- *
- * Input parameters:
- * str - character
- *
- * Return value:
- * hex value, or -1 if invalid
- ********************************************************************* */
-
-static int sbmac_parse_xdigit(char str)
-{
- int digit;
-
- if ((str >= '0') && (str <= '9'))
- digit = str - '0';
- else if ((str >= 'a') && (str <= 'f'))
- digit = str - 'a' + 10;
- else if ((str >= 'A') && (str <= 'F'))
- digit = str - 'A' + 10;
- else
- return -1;
-
- return digit;
-}
-
-/**********************************************************************
- * SBMAC_PARSE_HWADDR(str,hwaddr)
- *
- * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
- * Ethernet address.
- *
- * Input parameters:
- * str - string
- * hwaddr - pointer to hardware address
- *
- * Return value:
- * 0 if ok, else -1
- ********************************************************************* */
-
-static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
-{
- int digit1,digit2;
- int idx = 6;
-
- while (*str && (idx > 0)) {
- digit1 = sbmac_parse_xdigit(*str);
- if (digit1 < 0)
- return -1;
- str++;
- if (!*str)
- return -1;
-
- if ((*str == ':') || (*str == '-')) {
- digit2 = digit1;
- digit1 = 0;
- }
- else {
- digit2 = sbmac_parse_xdigit(*str);
- if (digit2 < 0)
- return -1;
- str++;
- }
-
- *hwaddr++ = (digit1 << 4) | digit2;
- idx--;
-
- if (*str == '-')
- str++;
- if (*str == ':')
- str++;
- }
- return 0;
-}
-#endif
-
static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
{
if (new_mtu > ENET_PACKET_SIZE)
@@ -2585,7 +2486,7 @@ static void sbmac_tx_timeout (struct net_device *dev)
spin_lock_irqsave(&sc->sbm_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
spin_unlock_irqrestore(&sc->sbm_lock, flags);
@@ -2662,7 +2563,6 @@ static int sbmac_close(struct net_device *dev)
static int sbmac_poll(struct napi_struct *napi, int budget)
{
struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
- struct net_device *dev = sc->sbm_dev;
int work_done;
work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
@@ -2766,162 +2666,6 @@ static int __exit sbmac_remove(struct platform_device *pldev)
return 0;
}
-
-static struct platform_device **sbmac_pldev;
-static int sbmac_max_units;
-
-#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
-static void __init sbmac_setup_hwaddr(int idx, char *addr)
-{
- void __iomem *sbm_base;
- unsigned long start, end;
- uint8_t eaddr[6];
- uint64_t val;
-
- if (idx >= sbmac_max_units)
- return;
-
- start = A_MAC_CHANNEL_BASE(idx);
- end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
-
- sbm_base = ioremap_nocache(start, end - start + 1);
- if (!sbm_base) {
- printk(KERN_ERR "%s: unable to map device registers\n",
- sbmac_string);
- return;
- }
-
- sbmac_parse_hwaddr(addr, eaddr);
- val = sbmac_addr2reg(eaddr);
- __raw_writeq(val, sbm_base + R_MAC_ETHERNET_ADDR);
- val = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
-
- iounmap(sbm_base);
-}
-#endif
-
-static int __init sbmac_platform_probe_one(int idx)
-{
- struct platform_device *pldev;
- struct {
- struct resource r;
- char name[strlen(sbmac_pretty) + 4];
- } *res;
- int err;
-
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res) {
- printk(KERN_ERR "%s.%d: unable to allocate memory\n",
- sbmac_string, idx);
- err = -ENOMEM;
- goto out_err;
- }
-
- /*
- * This is the base address of the MAC.
- */
- snprintf(res->name, sizeof(res->name), "%s %d", sbmac_pretty, idx);
- res->r.name = res->name;
- res->r.flags = IORESOURCE_MEM;
- res->r.start = A_MAC_CHANNEL_BASE(idx);
- res->r.end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
-
- pldev = platform_device_register_simple(sbmac_string, idx, &res->r, 1);
- if (IS_ERR(pldev)) {
- printk(KERN_ERR "%s.%d: unable to register platform device\n",
- sbmac_string, idx);
- err = PTR_ERR(pldev);
- goto out_kfree;
- }
-
- if (!pldev->dev.driver) {
- err = 0; /* No hardware at this address. */
- goto out_unregister;
- }
-
- sbmac_pldev[idx] = pldev;
- return 0;
-
-out_unregister:
- platform_device_unregister(pldev);
-
-out_kfree:
- kfree(res);
-
-out_err:
- return err;
-}
-
-static void __init sbmac_platform_probe(void)
-{
- int i;
-
- /* Set the number of available units based on the SOC type. */
- switch (soc_type) {
- case K_SYS_SOC_TYPE_BCM1250:
- case K_SYS_SOC_TYPE_BCM1250_ALT:
- sbmac_max_units = 3;
- break;
- case K_SYS_SOC_TYPE_BCM1120:
- case K_SYS_SOC_TYPE_BCM1125:
- case K_SYS_SOC_TYPE_BCM1125H:
- case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
- sbmac_max_units = 2;
- break;
- case K_SYS_SOC_TYPE_BCM1x55:
- case K_SYS_SOC_TYPE_BCM1x80:
- sbmac_max_units = 4;
- break;
- default:
- return; /* none */
- }
-
- /*
- * For bringup when not using the firmware, we can pre-fill
- * the MAC addresses using the environment variables
- * specified in this file (or maybe from the config file?)
- */
-#ifdef SBMAC_ETH0_HWADDR
- sbmac_setup_hwaddr(0, SBMAC_ETH0_HWADDR);
-#endif
-#ifdef SBMAC_ETH1_HWADDR
- sbmac_setup_hwaddr(1, SBMAC_ETH1_HWADDR);
-#endif
-#ifdef SBMAC_ETH2_HWADDR
- sbmac_setup_hwaddr(2, SBMAC_ETH2_HWADDR);
-#endif
-#ifdef SBMAC_ETH3_HWADDR
- sbmac_setup_hwaddr(3, SBMAC_ETH3_HWADDR);
-#endif
-
- sbmac_pldev = kcalloc(sbmac_max_units, sizeof(*sbmac_pldev),
- GFP_KERNEL);
- if (!sbmac_pldev) {
- printk(KERN_ERR "%s: unable to allocate memory\n",
- sbmac_string);
- return;
- }
-
- /*
- * Walk through the Ethernet controllers and find
- * those who have their MAC addresses set.
- */
- for (i = 0; i < sbmac_max_units; i++)
- if (sbmac_platform_probe_one(i))
- break;
-}
-
-
-static void __exit sbmac_platform_cleanup(void)
-{
- int i;
-
- for (i = 0; i < sbmac_max_units; i++)
- platform_device_unregister(sbmac_pldev[i]);
- kfree(sbmac_pldev);
-}
-
-
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
.remove = __exit_p(sbmac_remove),
@@ -2932,20 +2676,11 @@ static struct platform_driver sbmac_driver = {
static int __init sbmac_init_module(void)
{
- int err;
-
- err = platform_driver_register(&sbmac_driver);
- if (err)
- return err;
-
- sbmac_platform_probe();
-
- return err;
+ return platform_driver_register(&sbmac_driver);
}
static void __exit sbmac_cleanup_module(void)
{
- sbmac_platform_cleanup();
platform_driver_unregister(&sbmac_driver);
}
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index d87c478..8c4067a 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -433,13 +433,13 @@ static void _sc92031_set_mar(struct net_device *dev)
(dev->flags & IFF_ALLMULTI))
mar0 = mar1 = 0xffffffff;
else if (dev->flags & IFF_MULTICAST) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_list, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 crc;
unsigned bit = 0;
- crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr);
+ crc = ~ether_crc(ETH_ALEN, ha->addr);
crc >>= 24;
if (crc & 0x01) bit |= 0x02;
@@ -987,8 +987,6 @@ static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
mmiowb();
- dev->trans_start = jiffies;
-
if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
netif_stop_queue(dev);
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 374832c..d2fce98 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -390,7 +390,7 @@ static void seeq8005_timeout(struct net_device *dev)
tx_done(dev) ? "IRQ conflict" : "network cable problem");
/* Try to restart the adaptor. */
seeq8005_init(dev, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -411,7 +411,6 @@ static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
netif_stop_queue(dev);
hardware_send_packet(dev, buf, length);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += length;
dev_kfree_skb (skb);
/* You might need to clean up and record Tx statistics here. */
@@ -579,7 +578,6 @@ static void seeq8005_rx(struct net_device *dev)
/* If any worth-while packets have been received, netif_rx()
has done a mark_bh(NET_BH) for us and will work on them
when we get to the bottom-half routine. */
- return;
}
/* The inverse routine to net_open(). */
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 649a264..1564605 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -225,17 +225,17 @@ static void efx_fini_channels(struct efx_nic *efx);
* never be concurrently called more than once on the same channel,
* though different channels may be being processed concurrently.
*/
-static int efx_process_channel(struct efx_channel *channel, int rx_quota)
+static int efx_process_channel(struct efx_channel *channel, int budget)
{
struct efx_nic *efx = channel->efx;
- int rx_packets;
+ int spent;
if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
!channel->enabled))
return 0;
- rx_packets = efx_nic_process_eventq(channel, rx_quota);
- if (rx_packets == 0)
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent == 0)
return 0;
/* Deliver last RX packet. */
@@ -249,7 +249,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
- return rx_packets;
+ return spent;
}
/* Mark channel as finished processing
@@ -278,17 +278,17 @@ static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
- int rx_packets;
+ int spent;
EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
channel->channel, raw_smp_processor_id());
- rx_packets = efx_process_channel(channel, budget);
+ spent = efx_process_channel(channel, budget);
- if (rx_packets < budget) {
+ if (spent < budget) {
struct efx_nic *efx = channel->efx;
- if (channel->used_flags & EFX_USED_BY_RX &&
+ if (channel->channel < efx->n_rx_channels &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
if (unlikely(channel->irq_mod_score <
@@ -318,7 +318,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
efx_channel_processed(channel);
}
- return rx_packets;
+ return spent;
}
/* Process the eventq of the specified channel immediately on this CPU
@@ -333,7 +333,6 @@ void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- BUG_ON(!channel->used_flags);
BUG_ON(!channel->enabled);
/* Disable interrupts and wait for ISRs to complete */
@@ -446,12 +445,12 @@ static void efx_set_channel_names(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
number = channel->channel;
- if (efx->n_channels > efx->n_rx_queues) {
- if (channel->channel < efx->n_rx_queues) {
+ if (efx->n_channels > efx->n_rx_channels) {
+ if (channel->channel < efx->n_rx_channels) {
type = "-rx";
} else {
type = "-tx";
- number -= efx->n_rx_queues;
+ number -= efx->n_rx_channels;
}
}
snprintf(channel->name, sizeof(channel->name),
@@ -585,8 +584,6 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
-
- channel->used_flags = 0;
}
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
@@ -956,10 +953,9 @@ static void efx_fini_io(struct efx_nic *efx)
pci_disable_device(efx->pci_dev);
}
-/* Get number of RX queues wanted. Return number of online CPU
- * packages in the expectation that an IRQ balancer will spread
- * interrupts across them. */
-static int efx_wanted_rx_queues(void)
+/* Get number of channels wanted. Each channel will have its own IRQ,
+ * 1 RX queue and/or 2 TX queues. */
+static int efx_wanted_channels(void)
{
cpumask_var_t core_mask;
int count;
@@ -995,34 +991,39 @@ static void efx_probe_interrupts(struct efx_nic *efx)
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
struct msix_entry xentries[EFX_MAX_CHANNELS];
- int wanted_ints;
- int rx_queues;
+ int n_channels;
- /* We want one RX queue and interrupt per CPU package
- * (or as specified by the rss_cpus module parameter).
- * We will need one channel per interrupt.
- */
- rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
- wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
- wanted_ints = min(wanted_ints, max_channels);
+ n_channels = efx_wanted_channels();
+ if (separate_tx_channels)
+ n_channels *= 2;
+ n_channels = min(n_channels, max_channels);
- for (i = 0; i < wanted_ints; i++)
+ for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
+ rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
if (rc > 0) {
EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
- " available (%d < %d).\n", rc, wanted_ints);
+ " available (%d < %d).\n", rc, n_channels);
EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
- EFX_BUG_ON_PARANOID(rc >= wanted_ints);
- wanted_ints = rc;
+ EFX_BUG_ON_PARANOID(rc >= n_channels);
+ n_channels = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
- wanted_ints);
+ n_channels);
}
if (rc == 0) {
- efx->n_rx_queues = min(rx_queues, wanted_ints);
- efx->n_channels = wanted_ints;
- for (i = 0; i < wanted_ints; i++)
+ efx->n_channels = n_channels;
+ if (separate_tx_channels) {
+ efx->n_tx_channels =
+ max(efx->n_channels / 2, 1U);
+ efx->n_rx_channels =
+ max(efx->n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = efx->n_channels;
+ efx->n_rx_channels = efx->n_channels;
+ }
+ for (i = 0; i < n_channels; i++)
efx->channel[i].irq = xentries[i].vector;
} else {
/* Fall back to single channel MSI */
@@ -1033,8 +1034,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Try single interrupt MSI */
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
- efx->n_rx_queues = 1;
efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx->channel[0].irq = efx->pci_dev->irq;
@@ -1046,8 +1048,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_rx_queues = 1;
efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
}
@@ -1068,21 +1071,24 @@ static void efx_remove_interrupts(struct efx_nic *efx)
static void efx_set_channels(struct efx_nic *efx)
{
+ struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
+ unsigned tx_channel_offset =
+ separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
- efx_for_each_tx_queue(tx_queue, efx) {
- if (separate_tx_channels)
- tx_queue->channel = &efx->channel[efx->n_channels-1];
- else
- tx_queue->channel = &efx->channel[0];
- tx_queue->channel->used_flags |= EFX_USED_BY_TX;
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
+ channel->tx_queue = &efx->tx_queue[
+ (channel->channel - tx_channel_offset) *
+ EFX_TXQ_TYPES];
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->channel = channel;
+ }
}
- efx_for_each_rx_queue(rx_queue, efx) {
+ efx_for_each_rx_queue(rx_queue, efx)
rx_queue->channel = &efx->channel[rx_queue->queue];
- rx_queue->channel->used_flags |= EFX_USED_BY_RX;
- }
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1096,11 +1102,12 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
return rc;
- /* Determine the number of channels and RX queues by trying to hook
+ /* Determine the number of channels and queues by trying to hook
* in MSI-X interrupts. */
efx_probe_interrupts(efx);
efx_set_channels(efx);
+ efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
/* Initialise the interrupt moderation settings */
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1187,11 +1194,12 @@ static void efx_start_all(struct efx_nic *efx)
/* Mark the port as enabled so port reconfigurations can start, then
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- if (efx_dev_registered(efx))
- efx_wake_queue(efx);
- efx_for_each_channel(channel, efx)
+ efx_for_each_channel(channel, efx) {
+ if (efx_dev_registered(efx))
+ efx_wake_queue(channel);
efx_start_channel(channel);
+ }
efx_nic_enable_interrupts(efx);
@@ -1282,7 +1290,9 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if (efx_dev_registered(efx)) {
- efx_stop_queue(efx);
+ struct efx_channel *channel;
+ efx_for_each_channel(channel, efx)
+ efx_stop_queue(channel);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
@@ -1537,9 +1547,8 @@ static void efx_watchdog(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
- " resetting channels\n",
- atomic_read(&efx->netif_stop_count), efx->port_enabled);
+ EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
@@ -1603,7 +1612,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_multicast_list(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
@@ -1615,8 +1624,8 @@ static void efx_set_multicast_list(struct net_device *net_dev)
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
- netdev_for_each_mc_addr(mc_list, net_dev) {
- crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
}
@@ -2014,22 +2023,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
efx->net_dev = net_dev;
efx->rx_checksum_enabled = true;
- spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->mac_op = type->default_mac_ops;
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
- atomic_set(&efx->netif_stop_count, 1);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
channel = &efx->channel[i];
channel->efx = efx;
channel->channel = i;
channel->work_pending = false;
+ spin_lock_init(&channel->tx_stop_lock);
+ atomic_set(&channel->tx_stop_count, 1);
}
- for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
+ for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
tx_queue = &efx->tx_queue[i];
tx_queue->efx = efx;
tx_queue->queue = i;
@@ -2201,7 +2210,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
int i, rc;
/* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev(sizeof(*efx));
+ net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 7eff0a6..ffd708c 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -35,8 +35,8 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern void efx_stop_queue(struct efx_nic *efx);
-extern void efx_wake_queue(struct efx_nic *efx);
+extern void efx_stop_queue(struct efx_channel *channel);
+extern void efx_wake_queue(struct efx_channel *channel);
#define EFX_TXQ_SIZE 1024
#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index d9f9c02..22026bf 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -304,7 +304,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
{
struct efx_tx_queue *tx_queue;
- efx_for_each_tx_queue(tx_queue, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
efx_fill_test(test_index++, strings, data,
&lb_tests->tx_sent[tx_queue->queue],
EFX_TX_QUEUE_NAME(tx_queue),
@@ -647,7 +647,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
efx_for_each_tx_queue(tx_queue, efx) {
channel = tx_queue->channel;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
- if (channel->used_flags != EFX_USED_BY_RX_TX)
+ if (channel->channel < efx->n_rx_channels)
coalesce->tx_coalesce_usecs_irq =
channel->irq_moderation;
else
@@ -690,7 +690,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
/* If the channel is shared only allow RX parameters to be set */
efx_for_each_tx_queue(tx_queue, efx) {
- if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) &&
+ if ((tx_queue->channel->channel < efx->n_rx_channels) &&
tx_usecs) {
EFX_ERR(efx, "Channel is shared. "
"Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 08278e7..655b697 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -175,16 +175,19 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
- /* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
-
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
*/
BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
+
+ /* Check to see if we have a serious error condition */
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
+
EFX_ZERO_OWORD(*int_ker);
wmb(); /* Ensure the vector is cleared before interrupt ack */
falcon_irq_ack_a1(efx);
@@ -504,6 +507,9 @@ static void falcon_reset_macs(struct efx_nic *efx)
/* Ensure the correct MAC is selected before statistics
* are re-enabled by the caller */
efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+
+ /* This can run even when the GMAC is selected */
+ falcon_setup_xaui(efx);
}
void falcon_drain_tx_fifo(struct efx_nic *efx)
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 8ccab2c..c84a2ce 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -26,7 +26,7 @@
*************************************************************************/
/* Configure the XAUI driver that is an output from Falcon */
-static void falcon_setup_xaui(struct efx_nic *efx)
+void falcon_setup_xaui(struct efx_nic *efx)
{
efx_oword_t sdctl, txdrv;
@@ -85,14 +85,14 @@ int falcon_reset_xaui(struct efx_nic *efx)
return -ETIMEDOUT;
}
-static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
+static void falcon_ack_status_intr(struct efx_nic *efx)
{
efx_oword_t reg;
if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
return;
- /* We expect xgmii faults if the wireside link is up */
+ /* We expect xgmii faults if the wireside link is down */
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
return;
@@ -101,14 +101,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
if (efx->xmac_poll_required)
return;
- /* Flush the ISR */
- if (enable)
- efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
-
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_MSK_RMTFLT, !enable,
- FRF_AB_XM_MSK_LCLFLT, !enable);
- efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
+ efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
}
static bool falcon_xgxs_link_ok(struct efx_nic *efx)
@@ -283,15 +276,13 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
static int falcon_reconfigure_xmac(struct efx_nic *efx)
{
- falcon_mask_status_intr(efx, false);
-
falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx);
falcon_reconfigure_mac_wrapper(efx);
efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
- falcon_mask_status_intr(efx, true);
+ falcon_ack_status_intr(efx);
return 0;
}
@@ -362,9 +353,8 @@ void falcon_poll_xmac(struct efx_nic *efx)
!efx->xmac_poll_required)
return;
- falcon_mask_status_intr(efx, false);
efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
- falcon_mask_status_intr(efx, true);
+ falcon_ack_status_intr(efx);
}
struct efx_mac_operations falcon_xmac_operations = {
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index c48669c..93cc3c1 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -613,7 +613,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
}
if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -647,8 +647,10 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN)
+ if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -676,7 +678,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
goto fail;
if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -738,8 +740,10 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN)
+ if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
return 0;
@@ -765,8 +769,10 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN)
+ if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
@@ -926,20 +932,26 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
rc = efx_mcdi_nvram_types(efx, &nvram_types);
if (rc)
- return rc;
+ goto fail1;
type = 0;
while (nvram_types != 0) {
if (nvram_types & 1) {
rc = efx_mcdi_nvram_test(efx, type);
if (rc)
- return rc;
+ goto fail2;
}
type++;
nvram_types >>= 1;
}
return 0;
+
+fail2:
+ EFX_ERR(efx, "%s: failed type=%u\n", __func__, type);
+fail1:
+ EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
}
static int efx_mcdi_read_assertion(struct efx_nic *efx)
@@ -968,7 +980,7 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
if (rc)
return rc;
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
- return -EINVAL;
+ return -EIO;
/* Print out any recorded assertion state */
flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
@@ -1086,7 +1098,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
goto fail;
if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -1121,7 +1133,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
goto fail;
if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 06d24a1..3918263 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -80,7 +80,7 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
int rc;
efx_dword_t *cmd_ptr;
- int period = 1000;
+ int period = enable ? 1000 : 0;
u32 addr_hi;
u32 addr_lo;
@@ -92,21 +92,14 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
- if (enable)
- EFX_POPULATE_DWORD_6(*cmd_ptr,
- MC_CMD_MAC_STATS_CMD_DMA, 1,
- MC_CMD_MAC_STATS_CMD_CLEAR, clear,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
- MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
- else
- EFX_POPULATE_DWORD_5(*cmd_ptr,
- MC_CMD_MAC_STATS_CMD_DMA, 0,
- MC_CMD_MAC_STATS_CMD_CLEAR, clear,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 0,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0);
+ EFX_POPULATE_DWORD_7(*cmd_ptr,
+ MC_CMD_MAC_STATS_CMD_DMA, !!enable,
+ MC_CMD_MAC_STATS_CMD_CLEAR, clear,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1,
+ MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index bd59302..90359e6 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -863,7 +863,7 @@
* bist output. The driver should only consume the BIST output
* after validating OUTLEN and PHY_CFG.PHY_TYPE.
*
- * If a driver can't succesfully parse the BIST output, it should
+ * If a driver can't successfully parse the BIST output, it should
* still respect the pass/Fail in OUT.RESULT
*
* Locks required: PHY_LOCK if doing a PHY BIST
@@ -872,7 +872,7 @@
#define MC_CMD_POLL_BIST 0x26
#define MC_CMD_POLL_BIST_IN_LEN 0
#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
-#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
#define MC_CMD_POLL_BIST_RUNNING 1
@@ -882,15 +882,14 @@
/* Generic: */
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
/* SFT9001-specific: */
-/* (offset 4 unused?) */
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
@@ -1054,9 +1053,13 @@
/* MC_CMD_PHY_STATS:
* Get generic PHY statistics
*
- * This call returns the statistics for a generic PHY, by direct DMA
- * into host memory, in a sparse array (indexed by the enumerate).
- * Each value is represented by a 32bit number.
+ * This call returns the statistics for a generic PHY in a sparse
+ * array (indexed by the enumerate). Each value is represented by
+ * a 32bit number.
+ *
+ * If the DMA_ADDR is 0, then no DMA is performed, and the statistics
+ * may be read directly out of shared memory. If DMA_ADDR != 0, then
+ * the statistics are dmad to that (page-aligned location)
*
* Locks required: None
* Returns: 0, ETIME
@@ -1066,7 +1069,8 @@
#define MC_CMD_PHY_STATS_IN_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
-#define MC_CMD_PHY_STATS_OUT_LEN 0
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
/* Unified MAC statistics enumeration */
#define MC_CMD_MAC_GENERATION_START 0
@@ -1158,11 +1162,13 @@
#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
-/* Fields only relevent when PERIODIC_CHANGE is set */
+/* Remaining PERIOD* fields only relevent when PERIODIC_CHANGE is set */
#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
@@ -1729,6 +1735,39 @@
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
+/* MC_CMD_TEST_HACK: (debug (unsurprisingly))
+ * Change bits of network port state for test purposes in ways that would never be
+ * useful in normal operation and so need a special command to change. */
+#define MC_CMD_TEST_HACK 0x2f
+#define MC_CMD_TEST_HACK_IN_LEN 8
+#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0
+#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */
+#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */
+#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */
+#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */
+#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */
+#define MC_CMD_TEST_HACK_OUT_LEN 0
+
+/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
+ * is a warranty-voiding operation.
+ *
+ * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
+ * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
+ * of these limits are meaningful and what their interpretation is is sensor-specific.
+ *
+ * OUT: nothing
+ *
+ * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
+ * out of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
* used for post-3.0 extensions. If you run out of space, look for gaps or
* commands that are unused in the existing range. */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 2f23546..6032c0e 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -17,6 +17,8 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "mdio_10g.h"
+#include "nic.h"
+#include "selftest.h"
struct efx_mcdi_phy_cfg {
u32 flags;
@@ -48,7 +50,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
goto fail;
if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -111,7 +113,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
goto fail;
if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -587,13 +589,153 @@ static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
return rc;
if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
- return -EMSGSIZE;
+ return -EIO;
if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
return -EINVAL;
return 0;
}
+static const char *const mcdi_sft9001_cable_diag_names[] = {
+ "cable.pairA.length",
+ "cable.pairB.length",
+ "cable.pairC.length",
+ "cable.pairD.length",
+ "cable.pairA.status",
+ "cable.pairB.status",
+ "cable.pairC.status",
+ "cable.pairD.status",
+};
+
+static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
+ int *results)
+{
+ unsigned int retry, i, count = 0;
+ size_t outlen;
+ u32 status;
+ u8 *buf, *ptr;
+ int rc;
+
+ buf = kzalloc(0x100, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
+ MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
+ NULL, 0, NULL);
+ if (rc)
+ goto out;
+
+ /* Wait up to 10s for BIST to finish */
+ for (retry = 0; retry < 100; ++retry) {
+ BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ buf, 0x100, &outlen);
+ if (rc)
+ goto out;
+
+ status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
+ if (status != MC_CMD_POLL_BIST_RUNNING)
+ goto finished;
+
+ msleep(100);
+ }
+
+ rc = -ETIMEDOUT;
+ goto out;
+
+finished:
+ results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
+
+ /* SFT9001 specific cable diagnostics output */
+ if (efx->phy_type == PHY_TYPE_SFT9001B &&
+ (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
+ bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
+ ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ if (status == MC_CMD_POLL_BIST_PASSED &&
+ outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
+ for (i = 0; i < 8; i++) {
+ results[count + i] =
+ EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
+ EFX_DWORD_0);
+ }
+ }
+ count += 8;
+ }
+ rc = count;
+
+out:
+ kfree(buf);
+
+ return rc;
+}
+
+static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
+ unsigned flags)
+{
+ struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ u32 mode;
+ int rc;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
+ if (rc < 0)
+ return rc;
+
+ results += rc;
+ }
+
+ /* If we support both LONG and SHORT, then run each in response to
+ * break or not. Otherwise, run the one we support */
+ mode = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) {
+ if ((flags & ETH_TEST_FL_OFFLINE) &&
+ (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+ else
+ mode = MC_CMD_PHY_BIST_CABLE_SHORT;
+ } else if (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+
+ if (mode != 0) {
+ rc = efx_mcdi_bist(efx, mode, results);
+ if (rc < 0)
+ return rc;
+ results += rc;
+ }
+
+ return 0;
+}
+
+const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
+{
+ struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ if (index == 0)
+ return "bist";
+ --index;
+ }
+
+ if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) |
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) {
+ if (index == 0)
+ return "cable";
+ --index;
+
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
+ return mcdi_sft9001_cable_diag_names[index];
+ index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
+ }
+ }
+
+ return NULL;
+}
+
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
@@ -604,6 +746,6 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
.test_alive = efx_mcdi_phy_test_alive,
- .run_tests = NULL,
- .test_name = NULL,
+ .run_tests = efx_mcdi_phy_run_tests,
+ .test_name = efx_mcdi_phy_test_name,
};
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index cb018e2..2e6fd89 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -85,9 +85,13 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
#define EFX_MAX_CHANNELS 32
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
-#define EFX_TX_QUEUE_OFFLOAD_CSUM 0
-#define EFX_TX_QUEUE_NO_CSUM 1
-#define EFX_TX_QUEUE_COUNT 2
+/* Checksum generation is a per-queue option in hardware, so each
+ * queue visible to the networking core is backed by two hardware TX
+ * queues. */
+#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
+#define EFX_TXQ_TYPE_OFFLOAD 1
+#define EFX_TXQ_TYPES 2
+#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
/**
* struct efx_special_buffer - An Efx special buffer
@@ -187,7 +191,7 @@ struct efx_tx_buffer {
struct efx_tx_queue {
/* Members which don't change on the fast path */
struct efx_nic *efx ____cacheline_aligned_in_smp;
- int queue;
+ unsigned queue;
struct efx_channel *channel;
struct efx_nic *nic;
struct efx_tx_buffer *buffer;
@@ -306,11 +310,6 @@ struct efx_buffer {
};
-/* Flags for channel->used_flags */
-#define EFX_USED_BY_RX 1
-#define EFX_USED_BY_TX 2
-#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
-
enum efx_rx_alloc_method {
RX_ALLOC_METHOD_AUTO = 0,
RX_ALLOC_METHOD_SKB = 1,
@@ -327,7 +326,6 @@ enum efx_rx_alloc_method {
* @efx: Associated Efx NIC
* @channel: Channel instance number
* @name: Name for channel and IRQ
- * @used_flags: Channel is used by net driver
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -352,12 +350,14 @@ enum efx_rx_alloc_method {
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX
+ * @tx_stop_count: Core TX queue stop count
+ * @tx_stop_lock: Core TX queue stop lock
*/
struct efx_channel {
struct efx_nic *efx;
int channel;
char name[IFNAMSIZ + 6];
- int used_flags;
bool enabled;
int irq;
unsigned int irq_moderation;
@@ -389,6 +389,9 @@ struct efx_channel {
struct efx_rx_buffer *rx_pkt;
bool rx_pkt_csummed;
+ struct efx_tx_queue *tx_queue;
+ atomic_t tx_stop_count;
+ spinlock_t tx_stop_lock;
};
enum efx_led_mode {
@@ -661,8 +664,9 @@ union efx_multicast_hash {
* @rx_queue: RX DMA queues
* @channel: Channels
* @next_buffer_table: First available buffer table id
- * @n_rx_queues: Number of RX queues
* @n_channels: Number of channels in use
+ * @n_rx_channels: Number of channels used for RX (= number of RX queues)
+ * @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @int_error_count: Number of internal errors seen recently
@@ -672,6 +676,8 @@ union efx_multicast_hash {
* This register is written with the SMP processor ID whenever an
* interrupt is handled. It is used by efx_nic_test_interrupt()
* to verify that an interrupt has occurred.
+ * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
+ * @fatal_irq_level: IRQ level (bit number) used for serious errors
* @spi_flash: SPI flash device
* This field will be %NULL if no flash device is present (or for Siena).
* @spi_eeprom: SPI EEPROM device
@@ -691,8 +697,6 @@ union efx_multicast_hash {
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @rx_checksum_enabled: RX checksumming enabled
- * @netif_stop_count: Port stop count
- * @netif_stop_lock: Port stop lock
* @mac_stats: MAC statistics. These include all statistics the MACs
* can provide. Generic code converts these into a standard
* &struct net_device_stats.
@@ -740,13 +744,14 @@ struct efx_nic {
enum nic_state state;
enum reset_type reset_pending;
- struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT];
+ struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
struct efx_channel channel[EFX_MAX_CHANNELS];
unsigned next_buffer_table;
- int n_rx_queues;
- int n_channels;
+ unsigned n_channels;
+ unsigned n_rx_channels;
+ unsigned n_tx_channels;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
@@ -755,7 +760,8 @@ struct efx_nic {
struct efx_buffer irq_status;
volatile signed int last_irq_cpu;
- unsigned long irq_zero_count;
+ unsigned irq_zero_count;
+ unsigned fatal_irq_level;
struct efx_spi_device *spi_flash;
struct efx_spi_device *spi_eeprom;
@@ -777,9 +783,6 @@ struct efx_nic {
struct net_device *net_dev;
bool rx_checksum_enabled;
- atomic_t netif_stop_count;
- spinlock_t netif_stop_lock;
-
struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
spinlock_t stats_lock;
@@ -924,40 +927,35 @@ struct efx_nic_type {
/* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx) \
- for (_channel = &_efx->channel[0]; \
- _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
- _channel++) \
- if (!_channel->used_flags) \
- continue; \
- else
+ for (_channel = &((_efx)->channel[0]); \
+ _channel < &((_efx)->channel[(efx)->n_channels]); \
+ _channel++)
/* Iterate over all used TX queues */
#define efx_for_each_tx_queue(_tx_queue, _efx) \
- for (_tx_queue = &_efx->tx_queue[0]; \
- _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
+ for (_tx_queue = &((_efx)->tx_queue[0]); \
+ _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \
+ (_efx)->n_tx_channels]); \
_tx_queue++)
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
- for (_tx_queue = &_channel->efx->tx_queue[0]; \
- _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
- _tx_queue++) \
- if (_tx_queue->channel != _channel) \
- continue; \
- else
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+ _tx_queue++)
/* Iterate over all used RX queues */
#define efx_for_each_rx_queue(_rx_queue, _efx) \
- for (_rx_queue = &_efx->rx_queue[0]; \
- _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \
+ for (_rx_queue = &((_efx)->rx_queue[0]); \
+ _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
_rx_queue++)
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
- for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \
+ for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
_rx_queue; \
_rx_queue = NULL) \
- if (_rx_queue->channel != _channel) \
+ if (_rx_queue->channel != (_channel)) \
continue; \
else
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index b06f8e3..5d3aaec 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -418,7 +418,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
FRF_BZ_TX_NON_IP_DROP_DIS, 1);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
@@ -431,10 +431,10 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
efx_oword_t reg;
/* Only 128 bits in this register */
- BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
- if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
clear_bit_le(tx_queue->queue, (void *)&reg);
else
set_bit_le(tx_queue->queue, (void *)&reg);
@@ -654,22 +654,23 @@ void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
* The NIC batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
-static void
+static int
efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label];
- channel->irq_mod_score +=
- (tx_ev_desc_ptr - tx_queue->read_count) &
- EFX_TXQ_MASK;
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ EFX_TXQ_MASK);
+ channel->irq_mod_score += tx_packets;
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
@@ -689,6 +690,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
+
+ return tx_packets;
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -947,16 +950,17 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
}
}
-int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
+int efx_nic_process_eventq(struct efx_channel *channel, int budget)
{
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
- int rx_packets = 0;
+ int tx_packets = 0;
+ int spent = 0;
read_ptr = channel->eventq_read_ptr;
- do {
+ for (;;) {
p_event = efx_event(channel, read_ptr);
event = *p_event;
@@ -970,15 +974,23 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
+ /* Increment read pointer */
+ read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
+
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
switch (ev_code) {
case FSE_AZ_EV_CODE_RX_EV:
efx_handle_rx_event(channel, &event);
- ++rx_packets;
+ if (++spent == budget)
+ goto out;
break;
case FSE_AZ_EV_CODE_TX_EV:
- efx_handle_tx_event(channel, &event);
+ tx_packets += efx_handle_tx_event(channel, &event);
+ if (tx_packets >= EFX_TXQ_SIZE) {
+ spent = budget;
+ goto out;
+ }
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
channel->eventq_magic = EFX_QWORD_FIELD(
@@ -1001,14 +1013,11 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
" (data " EFX_QWORD_FMT ")\n", channel->channel,
ev_code, EFX_QWORD_VAL(event));
}
+ }
- /* Increment read pointer */
- read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
-
- } while (rx_packets < rx_quota);
-
+out:
channel->eventq_read_ptr = read_ptr;
- return rx_packets;
+ return spent;
}
@@ -1123,7 +1132,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
ev_queue = EFX_QWORD_FIELD(*event,
FSF_AZ_DRIVER_EV_SUBDATA);
- if (ev_queue < EFX_TX_QUEUE_COUNT) {
+ if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
tx_queue = efx->tx_queue + ev_queue;
tx_queue->flushed = FLUSH_DONE;
}
@@ -1133,7 +1142,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
ev_failed = EFX_QWORD_FIELD(
*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
- if (ev_queue < efx->n_rx_queues) {
+ if (ev_queue < efx->n_rx_channels) {
rx_queue = efx->rx_queue + ev_queue;
rx_queue->flushed =
ev_failed ? FLUSH_FAILED : FLUSH_DONE;
@@ -1229,15 +1238,9 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
bool enabled, bool force)
{
efx_oword_t int_en_reg_ker;
- unsigned int level = 0;
-
- if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
- /* Set the level always even if we're generating a test
- * interrupt, because our legacy interrupt handler is safe */
- level = 0x1f;
EFX_POPULATE_OWORD_3(int_en_reg_ker,
- FRF_AZ_KER_INT_LEVE_SEL, level,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
FRF_AZ_KER_INT_KER, force,
FRF_AZ_DRV_INT_EN_KER, enabled);
efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
@@ -1291,11 +1294,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
EFX_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error");
- if (error == 0)
- goto out;
/* If this is a memory parity error dump which blocks are offending */
- mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
if (mem_perr) {
efx_oword_t reg;
efx_reado(efx, &reg, FR_AZ_MEM_STAT);
@@ -1324,7 +1326,7 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
"NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
}
-out:
+
return IRQ_HANDLED;
}
@@ -1346,9 +1348,11 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
/* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
if (queues != 0) {
if (EFX_WORKAROUND_15783(efx))
@@ -1362,33 +1366,28 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
}
result = IRQ_HANDLED;
- } else if (EFX_WORKAROUND_15783(efx) &&
- efx->irq_zero_count++ == 0) {
+ } else if (EFX_WORKAROUND_15783(efx)) {
efx_qword_t *event;
- /* Ensure we rearm all event queues */
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
efx_for_each_channel(channel, efx) {
event = efx_event(channel, channel->eventq_read_ptr);
if (efx_event_present(event))
efx_schedule_channel(channel);
+ else
+ efx_nic_eventq_read_ack(channel);
}
-
- result = IRQ_HANDLED;
}
if (result == IRQ_HANDLED) {
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
- } else if (EFX_WORKAROUND_15783(efx)) {
- /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
- * because this might be a shared interrupt, but we do need to
- * check the channel every time and preemptively rearm it if
- * it's idle. */
- efx_for_each_channel(channel, efx) {
- if (!channel->work_pending)
- efx_nic_eventq_read_ack(channel);
- }
}
return result;
@@ -1413,9 +1412,11 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ if (channel->channel == efx->fatal_irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
/* Schedule processing of the channel */
efx_schedule_channel(channel);
@@ -1440,7 +1441,7 @@ static void efx_setup_rss_indir_table(struct efx_nic *efx)
offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
offset += 0x10) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
- i % efx->n_rx_queues);
+ i % efx->n_rx_channels);
efx_writed(efx, &dword, offset);
i++;
}
@@ -1553,6 +1554,13 @@ void efx_nic_init_common(struct efx_nic *efx)
FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->fatal_irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->fatal_irq_level = 0;
+
/* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by
* falcon_interrupts()).
@@ -1563,6 +1571,8 @@ void efx_nic_init_common(struct efx_nic *efx)
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 3166baf..bbc2c0c 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -135,12 +135,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
* @fw_build: Firmware build number
* @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
+ * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
*/
struct siena_nic_data {
u64 fw_version;
u32 fw_build;
struct efx_mcdi_iface mcdi;
int wol_filter_id;
+ u8 ipv6_rss_key[40];
};
extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -203,6 +205,7 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx);
extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
+extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
extern void efx_nic_init_common(struct efx_nic *efx);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0106b1d..371e86c 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -616,10 +616,10 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
goto out;
}
- /* Test every TX queue */
- efx_for_each_tx_queue(tx_queue, efx) {
- state->offload_csum = (tx_queue->queue ==
- EFX_TX_QUEUE_OFFLOAD_CSUM);
+ /* Test both types of TX queue */
+ efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
+ state->offload_csum = (tx_queue->queue &
+ EFX_TXQ_TYPE_OFFLOAD);
rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]);
if (rc)
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index 643bef7..aed495a 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
*/
struct efx_loopback_self_tests {
- int tx_sent[EFX_TX_QUEUE_COUNT];
- int tx_done[EFX_TX_QUEUE_COUNT];
+ int tx_sent[EFX_TXQ_TYPES];
+ int tx_done[EFX_TXQ_TYPES];
int rx_good;
int rx_bad;
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index e0c46f5..727b422 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/random.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
@@ -274,6 +275,9 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
}
+ get_random_bytes(&nic_data->ipv6_rss_key,
+ sizeof(nic_data->ipv6_rss_key));
+
return 0;
fail5:
@@ -293,6 +297,7 @@ fail1:
*/
static int siena_init_nic(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
efx_oword_t temp;
int rc;
@@ -319,6 +324,20 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
/* No MCDI operation has been defined to set thresholds */
EFX_ERR(efx, "ignoring RX flow control thresholds\n");
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index be0e110..6bb12a8 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,32 +30,46 @@
*/
#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
-/* We want to be able to nest calls to netif_stop_queue(), since each
- * channel can have an individual stop on the queue.
- */
-void efx_stop_queue(struct efx_nic *efx)
+/* We need to be able to nest calls to netif_tx_stop_queue(), partly
+ * because of the 2 hardware queues associated with each core queue,
+ * but also so that we can inhibit TX for reasons other than a full
+ * hardware queue. */
+void efx_stop_queue(struct efx_channel *channel)
{
- spin_lock_bh(&efx->netif_stop_lock);
+ struct efx_nic *efx = channel->efx;
+
+ if (!channel->tx_queue)
+ return;
+
+ spin_lock_bh(&channel->tx_stop_lock);
EFX_TRACE(efx, "stop TX queue\n");
- atomic_inc(&efx->netif_stop_count);
- netif_stop_queue(efx->net_dev);
+ atomic_inc(&channel->tx_stop_count);
+ netif_tx_stop_queue(
+ netdev_get_tx_queue(
+ efx->net_dev,
+ channel->tx_queue->queue / EFX_TXQ_TYPES));
- spin_unlock_bh(&efx->netif_stop_lock);
+ spin_unlock_bh(&channel->tx_stop_lock);
}
-/* Wake netif's TX queue
- * We want to be able to nest calls to netif_stop_queue(), since each
- * channel can have an individual stop on the queue.
- */
-void efx_wake_queue(struct efx_nic *efx)
+/* Decrement core TX queue stop count and wake it if the count is 0 */
+void efx_wake_queue(struct efx_channel *channel)
{
+ struct efx_nic *efx = channel->efx;
+
+ if (!channel->tx_queue)
+ return;
+
local_bh_disable();
- if (atomic_dec_and_lock(&efx->netif_stop_count,
- &efx->netif_stop_lock)) {
+ if (atomic_dec_and_lock(&channel->tx_stop_count,
+ &channel->tx_stop_lock)) {
EFX_TRACE(efx, "waking TX queue\n");
- netif_wake_queue(efx->net_dev);
- spin_unlock(&efx->netif_stop_lock);
+ netif_tx_wake_queue(
+ netdev_get_tx_queue(
+ efx->net_dev,
+ channel->tx_queue->queue / EFX_TXQ_TYPES));
+ spin_unlock(&channel->tx_stop_lock);
}
local_bh_enable();
}
@@ -298,7 +312,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
rc = NETDEV_TX_BUSY;
if (tx_queue->stopped == 1)
- efx_stop_queue(efx);
+ efx_stop_queue(tx_queue->channel);
unwind:
/* Work backwards until we hit the original insert pointer value */
@@ -374,10 +388,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY;
+ tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
- tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
- else
- tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
+ tx_queue += EFX_TXQ_TYPE_OFFLOAD;
return efx_enqueue_skb(tx_queue, skb);
}
@@ -405,7 +418,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
netif_tx_lock(efx->net_dev);
if (tx_queue->stopped) {
tx_queue->stopped = 0;
- efx_wake_queue(efx);
+ efx_wake_queue(tx_queue->channel);
}
netif_tx_unlock(efx->net_dev);
}
@@ -488,7 +501,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Release queue's stop on port, if any */
if (tx_queue->stopped) {
tx_queue->stopped = 0;
- efx_wake_queue(tx_queue->efx);
+ efx_wake_queue(tx_queue->channel);
}
}
@@ -1120,7 +1133,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
/* Stop the queue if it wasn't stopped before. */
if (tx_queue->stopped == 1)
- efx_stop_queue(efx);
+ efx_stop_queue(tx_queue->channel);
unwind:
/* Free the DMA mapping we were in the process of writing out */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index acd9c73..518f7fc 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -37,7 +37,7 @@
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
/* Legacy ISR read can return zero once */
-#define EFX_WORKAROUND_15783 EFX_WORKAROUND_SIENA
+#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index c8fc896..cc4bd8c 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -574,7 +574,7 @@ static inline int sgiseeq_reset(struct net_device *dev)
if (err)
return err;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
return 0;
@@ -638,8 +638,6 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
kick_tx(dev, sp, hregs);
- dev->trans_start = jiffies;
-
if (!TX_BUFFS_AVAIL(sp))
netif_stop_queue(dev);
spin_unlock_irqrestore(&sp->tx_lock, flags);
@@ -652,7 +650,7 @@ static void timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 6242b85..586ed09 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1148,8 +1148,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
- ndev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b30ce75..a5d6a6b 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -849,13 +849,13 @@ static void sis190_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int bit_nr =
- ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ ether_crc(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index cc0c731..bbbded7 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -858,7 +858,6 @@ static void mdio_reset(long mdio_addr)
outl(MDDIR | MDIO | MDC, mdio_addr);
mdio_delay();
}
- return;
}
/**
@@ -953,8 +952,6 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location,
mdio_delay();
}
outl(0x00, mdio_addr);
-
- return;
}
@@ -1264,7 +1261,6 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
(reg14h | 0x2000) & 0xBFFF);
}
- return;
}
/**
@@ -1499,7 +1495,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
}
if(netif_msg_link(sis_priv))
- printk(KERN_INFO "%s: Media Link On %s %s-duplex \n",
+ printk(KERN_INFO "%s: Media Link On %s %s-duplex\n",
net_dev->name,
*speed == HW_SPEED_100_MBPS ?
"100mbps" : "10mbps",
@@ -1523,7 +1519,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
int i;
if(netif_msg_tx_err(sis_priv))
- printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n",
+ printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
/* Disable interrupts by clearing the interrupt mask. */
@@ -1553,14 +1549,13 @@ static void sis900_tx_timeout(struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies;
+ net_dev->trans_start = jiffies; /* prevent tx timeout */
/* load Transmit Descriptor Register */
outl(sis_priv->tx_ring_dma, ioaddr + txdp);
/* Enable all known interrupts by setting the interrupt mask. */
outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
- return;
}
/**
@@ -1623,8 +1618,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies;
-
if (netif_msg_tx_queued(sis_priv))
printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d "
"to slot %d.\n",
@@ -2298,12 +2291,14 @@ static void set_rx_mode(struct net_device *net_dev)
/* Accept Broadcast packet, destination address matchs our
* MAC address, use Receive Filter to reject unwanted MCAST
* packets */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = RFAAB;
- netdev_for_each_mc_addr(mclist, net_dev) {
- unsigned int bit_nr =
- sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
+ netdev_for_each_mc_addr(ha, net_dev) {
+ unsigned int bit_nr;
+
+ bit_nr = sis900_mcast_bitnr(ha->addr,
+ sis_priv->chipset_rev);
mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
}
}
@@ -2330,8 +2325,6 @@ static void set_rx_mode(struct net_device *net_dev)
/* restore cr */
outl(cr_saved, ioaddr + cr);
}
-
- return;
}
/**
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 6028bbb..9d8d1ac 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -1352,7 +1352,7 @@ void rtm_set_timer(struct s_smc *smc)
/*
* MIB timer and hardware timer have the same resolution of 80nS
*/
- DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns \n",
+ DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n",
(int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ;
outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
}
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index e6b33ee..ba45bc7 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -1277,7 +1277,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
mib = phy->mib ;
- DB_PCMN(1,"SIG rec %x %x: \n", bit,phy->r_val[bit] ) ;
+ DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ;
bit++ ;
switch(bit) {
@@ -1580,7 +1580,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy
mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
break ;
}
- DB_PCMN(1,"SIG snd %x %x: \n", bit,phy->t_val[bit] ) ;
+ DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ;
}
/*
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index d9016b7..31b2dab 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -844,7 +844,6 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
spin_lock_irqsave(&bp->DriverLock, Flags);
skfp_ctl_set_multicast_list_wo_lock(dev);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
- return;
} // skfp_ctl_set_multicast_list
@@ -852,7 +851,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
@@ -876,13 +875,13 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* use exact filtering */
// point to first multicast addr
- netdev_for_each_mc_addr(dmi, dev) {
- mac_add_multicast(smc,
- (struct fddi_addr *)dmi->dmi_addr,
- 1);
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_add_multicast(smc,
+ (struct fddi_addr *)ha->addr,
+ 1);
pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
- dmi->dmi_addr);
+ ha->addr);
}
} else { // more MC addresses than HW supports
@@ -898,7 +897,6 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* Update adapter filters */
mac_update_multicast(smc);
}
- return;
} // skfp_ctl_set_multicast_list_wo_lock
@@ -1076,7 +1074,6 @@ static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
if (bp->QueueSkb == 0) {
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
} // skfp_send_pkt
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 83d16fe..6f35bb7 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -574,7 +574,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
if (smt_check_para(smc,sm,plist_nif)) {
DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ;
break ;
- } ;
+ }
switch (sm->smt_type) {
case SMT_ANNOUNCE :
case SMT_REQUEST :
diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c
index 6caf713..40882b3 100644
--- a/drivers/net/skfp/srf.c
+++ b/drivers/net/skfp/srf.c
@@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc)
smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
- DB_SMT("SRF: sending SRF at %x, len %d \n",smt,mb->sm_len) ;
+ DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
DB_SMT("SRF: state SR%d Threshold %d\n",
smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
#ifdef DEBUG
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 50eb706..40e5c46 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -984,8 +984,8 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
wmb();
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
- pci_unmap_addr_set(e, mapaddr, map);
- pci_unmap_len_set(e, maplen, bufsize);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, bufsize);
}
/* Resume receiving using existing skb,
@@ -1018,8 +1018,8 @@ static void skge_rx_clean(struct skge_port *skge)
rd->control = 0;
if (e->skb) {
pci_unmap_single(hw->pdev,
- pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(e->skb);
e->skb = NULL;
@@ -2756,8 +2756,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(e, mapaddr, map);
- pci_unmap_len_set(e, maplen, len);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, len);
td->dma_lo = map;
td->dma_hi = map >> 32;
@@ -2799,8 +2799,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
tf->dma_lo = map;
tf->dma_hi = (u64) map >> 32;
- pci_unmap_addr_set(e, mapaddr, map);
- pci_unmap_len_set(e, maplen, frag->size);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, frag->size);
tf->control = BMU_OWN | BMU_SW | control | frag->size;
}
@@ -2837,12 +2837,12 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
/* skb header vs. fragment */
if (control & BMU_STF)
- pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
else
- pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
if (control & BMU_EOF) {
@@ -2918,7 +2918,7 @@ static void genesis_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u32 mode;
u8 filter[8];
@@ -2938,8 +2938,8 @@ static void genesis_set_multicast(struct net_device *dev)
skge->flow_status == FLOW_STAT_SYMMETRIC)
genesis_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- genesis_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ genesis_add_filter(filter, ha->addr);
}
xm_write32(hw, port, XM_MODE, mode);
@@ -2957,7 +2957,7 @@ static void yukon_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
skge->flow_status == FLOW_STAT_SYMMETRIC);
u16 reg;
@@ -2980,8 +2980,8 @@ static void yukon_set_multicast(struct net_device *dev)
if (rx_pause)
yukon_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- yukon_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ yukon_add_filter(filter, ha->addr);
}
@@ -3060,11 +3060,11 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
goto resubmit;
pci_dma_sync_single_for_cpu(skge->hw->pdev,
- pci_unmap_addr(e, mapaddr),
+ dma_unmap_addr(e, mapaddr),
len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(e->skb, skb->data, len);
pci_dma_sync_single_for_device(skge->hw->pdev,
- pci_unmap_addr(e, mapaddr),
+ dma_unmap_addr(e, mapaddr),
len, PCI_DMA_FROMDEVICE);
skge_rx_reuse(e, skge->rx_buf_size);
} else {
@@ -3075,8 +3075,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
goto resubmit;
pci_unmap_single(skge->hw->pdev,
- pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
prefetch(skb->data);
@@ -3667,7 +3667,7 @@ static int skge_debug_show(struct seq_file *seq, void *v)
t->csum_offs, t->csum_write, t->csum_start);
}
- seq_printf(seq, "\nRx Ring: \n");
+ seq_printf(seq, "\nRx Ring:\n");
for (e = skge->rx_ring.to_clean; ; e = e->next) {
const struct skge_rx_desc *r = e->desc;
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 831de1b..507addc 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2393,8 +2393,8 @@ struct skge_element {
struct skge_element *next;
void *desc;
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct skge_ring {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 088c797..2111c7b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -53,7 +53,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.27"
+#define DRV_VERSION "1.28"
/*
* The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -70,18 +70,15 @@
VLAN:GSO + CKSUM + Data + skb_frags * DMA */
#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
-#define TX_MAX_PENDING 4096
+#define TX_MAX_PENDING 1024
#define TX_DEF_PENDING 127
-#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
-#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
#define TX_WATCHDOG (5 * HZ)
#define NAPI_WEIGHT 64
#define PHY_RETRIES 1000
#define SKY2_EEPROM_MAGIC 0x9955aabb
-
#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
static const u32 default_msg =
@@ -227,7 +224,7 @@ static void sky2_power_on(struct sky2_hw *hw)
/* disable Core Clock Division, */
sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
/* enable bits are inverted */
sky2_write8(hw, B2_Y2_CLK_GATE,
Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
@@ -269,7 +266,7 @@ static void sky2_power_on(struct sky2_hw *hw)
static void sky2_power_aux(struct sky2_hw *hw)
{
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
else
/* enable bits are inverted */
@@ -652,7 +649,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~phy_power[port];
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
@@ -824,7 +821,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
+ if (hw->chip_id == CHIP_ID_YUKON_XL &&
+ hw->chip_rev == CHIP_REV_YU_XL_A0 &&
+ port == 1) {
/* WA DEV_472 -- looks like crossed wires on port 2 */
/* clear GMAC 1 Control reset */
sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
@@ -878,6 +877,10 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
if (hw->dev[port]->mtu > ETH_DATA_LEN)
reg |= GM_SMOD_JUMBO_ENA;
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ hw->chip_rev == CHIP_REV_YU_EC_U_B1)
+ reg |= GM_NEW_FLOW_CTRL;
+
gma_write16(hw, port, GM_SERIAL_MODE, reg);
/* virtual address for data */
@@ -1126,7 +1129,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
if (pci_dma_mapping_error(pdev, re->data_addr))
goto mapping_error;
- pci_unmap_len_set(re, data_size, size);
+ dma_unmap_len_set(re, data_size, size);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1148,7 +1151,7 @@ map_page_error:
PCI_DMA_FROMDEVICE);
}
- pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
PCI_DMA_FROMDEVICE);
mapping_error:
@@ -1163,7 +1166,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
struct sk_buff *skb = re->skb;
int i;
- pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
PCI_DMA_FROMDEVICE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
@@ -1190,6 +1193,39 @@ static void rx_set_checksum(struct sky2_port *sky2)
? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
+/* Enable/disable receive hash calculation (RSS) */
+static void rx_set_rss(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ int i, nkeys = 4;
+
+ /* Supports IPv6 and other modes */
+ if (hw->flags & SKY2_HW_NEW_LE) {
+ nkeys = 10;
+ sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
+ }
+
+ /* Program RSS initial values */
+ if (dev->features & NETIF_F_RXHASH) {
+ u32 key[nkeys];
+
+ get_random_bytes(key, nkeys * sizeof(u32));
+ for (i = 0; i < nkeys; i++)
+ sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
+ key[i]);
+
+ /* Need to turn on (undocumented) flag to make hashing work */
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
+ RX_STFW_ENA);
+
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_ENA_RX_RSS_HASH);
+ } else
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_RSS_HASH);
+}
+
/*
* The RX Stop command will not work for Yukon-2 if the BMU does not
* reach the end of packet and since we can't make sure that we have
@@ -1414,8 +1450,7 @@ static void sky2_rx_start(struct sky2_port *sky2)
/* These chips have no ram buffer?
* MAC Rx RAM Read is controlled by hardware */
if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
- (hw->chip_rev == CHIP_REV_YU_EC_U_A1 ||
- hw->chip_rev == CHIP_REV_YU_EC_U_B0))
+ hw->chip_rev > CHIP_REV_YU_EC_U_A0)
sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
@@ -1423,6 +1458,9 @@ static void sky2_rx_start(struct sky2_port *sky2)
if (!(hw->flags & SKY2_HW_NEW_LE))
rx_set_checksum(sky2);
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ rx_set_rss(sky2->netdev);
+
/* submit Rx ring */
for (i = 0; i < sky2->rx_pending; i++) {
re = sky2->rx_ring + i;
@@ -1657,12 +1695,12 @@ static unsigned tx_le_req(const struct sk_buff *skb)
static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
{
if (re->flags & TX_MAP_SINGLE)
- pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
- pci_unmap_len(re, maplen),
+ pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
else if (re->flags & TX_MAP_PAGE)
- pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
- pci_unmap_len(re, maplen),
+ pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
re->flags = 0;
}
@@ -1773,8 +1811,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_SINGLE;
- pci_unmap_addr_set(re, mapaddr, mapping);
- pci_unmap_len_set(re, maplen, len);
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, len);
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -1802,8 +1840,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_PAGE;
- pci_unmap_addr_set(re, mapaddr, mapping);
- pci_unmap_len_set(re, maplen, frag->size);
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, frag->size);
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -2142,7 +2180,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
- if (sky2_autoneg_done(sky2, phystat) == 0)
+ if (sky2_autoneg_done(sky2, phystat) == 0 &&
+ !netif_carrier_ok(dev))
sky2_link_up(sky2);
goto out;
}
@@ -2236,8 +2275,8 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write32(hw, B0_IMSK, 0);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_stop_queue(dev);
napi_disable(&hw->napi);
+ netif_tx_disable(dev);
synchronize_irq(hw->pdev->irq);
@@ -2531,6 +2570,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
}
}
+static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
+{
+ struct sk_buff *skb;
+
+ skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->rxhash = le32_to_cpu(status);
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
{
@@ -2552,7 +2599,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
if (!(opcode & HW_OWNER))
break;
- hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
+ hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
port = le->css & CSS_LINK_BIT;
dev = hw->dev[port];
@@ -2603,6 +2650,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
sky2_rx_checksum(sky2, status);
break;
+ case OP_RSS_HASH:
+ sky2_rx_hash(sky2, status);
+ break;
+
case OP_TXINDEXLE:
/* TX index reports status for both ports */
sky2_tx_done(hw->dev[0], status & 0xfff);
@@ -2957,6 +3008,8 @@ static int __devinit sky2_init(struct sky2_hw *hw)
switch(hw->chip_id) {
case CHIP_ID_YUKON_XL:
hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
+ if (hw->chip_rev < CHIP_REV_YU_XL_A2)
+ hw->flags |= SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_EC_U:
@@ -2982,10 +3035,11 @@ static int __devinit sky2_init(struct sky2_hw *hw)
dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
return -EOPNOTSUPP;
}
- hw->flags = SKY2_HW_GIGABIT;
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_FE:
+ hw->flags = SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_FE_P:
@@ -3192,7 +3246,7 @@ static void sky2_reset(struct sky2_hw *hw)
for (i = 0; i < hw->ports; i++)
sky2_gmac_reset(hw, i);
- memset(hw->st_le, 0, STATUS_LE_BYTES);
+ memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
hw->st_idx = 0;
sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
@@ -3202,7 +3256,7 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
/* Set the list last index */
- sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
+ sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
sky2_write16(hw, STAT_TX_IDX_TH, 10);
sky2_write8(hw, STAT_FIFO_WM, 16);
@@ -3258,18 +3312,14 @@ static int sky2_reattach(struct net_device *dev)
return err;
}
-static void sky2_restart(struct work_struct *work)
+static void sky2_all_down(struct sky2_hw *hw)
{
- struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
- u32 imask;
int i;
- rtnl_lock();
-
- napi_disable(&hw->napi);
- synchronize_irq(hw->pdev->irq);
- imask = sky2_read32(hw, B0_IMSK);
+ sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
+ synchronize_irq(hw->pdev->irq);
+ napi_disable(&hw->napi);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
@@ -3282,8 +3332,12 @@ static void sky2_restart(struct work_struct *work)
netif_tx_disable(dev);
sky2_hw_down(sky2);
}
+}
- sky2_reset(hw);
+static void sky2_all_up(struct sky2_hw *hw)
+{
+ u32 imask = Y2_IS_BASE;
+ int i;
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
@@ -3293,6 +3347,8 @@ static void sky2_restart(struct work_struct *work)
continue;
sky2_hw_up(sky2);
+ sky2_set_multicast(dev);
+ imask |= portirq_msk[i];
netif_wake_queue(dev);
}
@@ -3301,6 +3357,17 @@ static void sky2_restart(struct work_struct *work)
sky2_read32(hw, B0_Y2_SP_LISR);
napi_enable(&hw->napi);
+}
+
+static void sky2_restart(struct work_struct *work)
+{
+ struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
+
+ rtnl_lock();
+
+ sky2_all_down(hw);
+ sky2_reset(hw);
+ sky2_all_up(hw);
rtnl_unlock();
}
@@ -3622,7 +3689,7 @@ static void sky2_set_multicast(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u16 reg;
u8 filter[8];
int rx_pause;
@@ -3646,8 +3713,8 @@ static void sky2_set_multicast(struct net_device *dev)
if (rx_pause)
sky2_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- sky2_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ sky2_add_filter(filter, ha->addr);
}
gma_write16(hw, port, GM_MC_ADDR_H1,
@@ -4109,6 +4176,25 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
}
+static int sky2_set_flags(struct net_device *dev, u32 data)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (data & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (data & ETH_FLAG_RXHASH) {
+ if (sky2->hw->flags & SKY2_HW_RSS_BROKEN)
+ return -EINVAL;
+
+ dev->features |= NETIF_F_RXHASH;
+ } else
+ dev->features &= ~NETIF_F_RXHASH;
+
+ rx_set_rss(dev);
+
+ return 0;
+}
static const struct ethtool_ops sky2_ethtool_ops = {
.get_settings = sky2_get_settings,
@@ -4140,6 +4226,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.phys_id = sky2_phys_id,
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
+ .set_flags = sky2_set_flags,
};
#ifdef CONFIG_SKY2_DEBUG
@@ -4250,12 +4337,13 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
napi_disable(&hw->napi);
last = sky2_read16(hw, STAT_PUT_IDX);
+ seq_printf(seq, "Status ring %u\n", hw->st_size);
if (hw->st_idx == last)
seq_puts(seq, "Status ring (empty)\n");
else {
seq_puts(seq, "Status ring\n");
- for (idx = hw->st_idx; idx != last && idx < STATUS_RING_SIZE;
- idx = RING_NEXT(idx, STATUS_RING_SIZE)) {
+ for (idx = hw->st_idx; idx != last && idx < hw->st_size;
+ idx = RING_NEXT(idx, hw->st_size)) {
const struct sky2_status_le *le = hw->st_le + idx;
seq_printf(seq, "[%d] %#x %d %#x\n",
idx, le->opcode, le->length, le->status);
@@ -4492,6 +4580,10 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
+ /* Enable receive hashing unless hardware is known broken */
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ dev->features |= NETIF_F_RXHASH;
+
#ifdef SKY2_VLAN_TAG_USED
/* The workaround for FE+ status conflicts with VLAN tag detection. */
if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
@@ -4683,15 +4775,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
- /* ring for status responses */
- hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma);
- if (!hw->st_le)
- goto err_out_iounmap;
-
err = sky2_init(hw);
if (err)
goto err_out_iounmap;
+ /* ring for status responses */
+ hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
+ hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ &hw->st_dma);
+ if (!hw->st_le)
+ goto err_out_reset;
+
dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
@@ -4765,8 +4859,10 @@ err_out_unregister:
err_out_free_netdev:
free_netdev(dev);
err_out_free_pci:
+ pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
+err_out_reset:
sky2_write8(hw, B0_CTST, CS_RST_SET);
- pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
err_out_iounmap:
iounmap(hw->regs);
err_out_free_hw:
@@ -4804,7 +4900,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
free_irq(pdev->irq, hw);
if (hw->flags & SKY2_HW_USE_MSI)
pci_disable_msi(pdev);
- pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
+ pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -4829,12 +4926,12 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
cancel_work_sync(&hw->restart_work);
rtnl_lock();
+
+ sky2_all_down(hw);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
struct sky2_port *sky2 = netdev_priv(dev);
- sky2_detach(dev);
-
if (sky2->wol)
sky2_wol_init(sky2);
@@ -4843,8 +4940,6 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
device_set_wakeup_enable(&pdev->dev, wol != 0);
- sky2_write32(hw, B0_IMSK, 0);
- napi_disable(&hw->napi);
sky2_power_aux(hw);
rtnl_unlock();
@@ -4859,12 +4954,11 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
static int sky2_resume(struct pci_dev *pdev)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
- int i, err;
+ int err;
if (!hw)
return 0;
- rtnl_lock();
err = pci_set_power_state(pdev, PCI_D0);
if (err)
goto out;
@@ -4882,20 +4976,13 @@ static int sky2_resume(struct pci_dev *pdev)
goto out;
}
+ rtnl_lock();
sky2_reset(hw);
- sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
- napi_enable(&hw->napi);
-
- for (i = 0; i < hw->ports; i++) {
- err = sky2_reattach(hw->dev[i]);
- if (err)
- goto out;
- }
+ sky2_all_up(hw);
rtnl_unlock();
return 0;
out:
- rtnl_unlock();
dev_err(&pdev->dev, "resume failed (%d)\n", err);
pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index a5e182d..084eff2 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -548,6 +548,14 @@ enum {
CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
};
+
+enum yukon_xl_rev {
+ CHIP_REV_YU_XL_A0 = 0,
+ CHIP_REV_YU_XL_A1 = 1,
+ CHIP_REV_YU_XL_A2 = 2,
+ CHIP_REV_YU_XL_A3 = 3,
+};
+
enum yukon_ec_rev {
CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
@@ -557,6 +565,7 @@ enum yukon_ec_u_rev {
CHIP_REV_YU_EC_U_A0 = 1,
CHIP_REV_YU_EC_U_A1 = 2,
CHIP_REV_YU_EC_U_B0 = 3,
+ CHIP_REV_YU_EC_U_B1 = 5,
};
enum yukon_fe_rev {
CHIP_REV_YU_FE_A1 = 1,
@@ -685,8 +694,21 @@ enum {
TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
+
+ RSS_KEY = 0x0220, /* RSS Key setup */
+ RSS_CFG = 0x0248, /* RSS Configuration */
};
+enum {
+ HASH_TCP_IPV6_EX_CTRL = 1<<5,
+ HASH_IPV6_EX_CTRL = 1<<4,
+ HASH_TCP_IPV6_CTRL = 1<<3,
+ HASH_IPV6_CTRL = 1<<2,
+ HASH_TCP_IPV4_CTRL = 1<<1,
+ HASH_IPV4_CTRL = 1<<0,
+
+ HASH_ALL = 0x3f,
+};
enum {
B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
@@ -1775,10 +1797,13 @@ enum {
/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
enum {
GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
- GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
- GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
- GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
- GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
+ GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */
+ GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */
+ GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */
+
+ GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */
+
+ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
};
#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
@@ -2157,14 +2182,14 @@ struct tx_ring_info {
unsigned long flags;
#define TX_MAP_SINGLE 0x0001
#define TX_MAP_PAGE 0x0002
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct rx_ring_info {
struct sk_buff *skb;
dma_addr_t data_addr;
- DECLARE_PCI_UNMAP_LEN(data_size);
+ DEFINE_DMA_UNMAP_LEN(data_size);
dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
};
@@ -2249,6 +2274,7 @@ struct sky2_hw {
#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
+#define SKY2_HW_RSS_BROKEN 0x00000100
u8 chip_id;
u8 chip_rev;
@@ -2256,6 +2282,7 @@ struct sky2_hw {
u8 ports;
struct sky2_status_le *st_le;
+ u32 st_size;
u32 st_idx;
dma_addr_t st_dma;
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index 140d63f..ac279fa 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -731,7 +731,6 @@ void
slhc_free(struct slcompress *comp)
{
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_free");
- return;
}
struct slcompress *
slhc_init(int rslots, int tslots)
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 8969615..fa434fb 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -458,7 +458,7 @@ static void sl_tx_timeout(struct net_device *dev)
* 14 Oct 1994 Dmitry Gorodchanin.
*/
#ifdef SL_CHECK_TRANSMIT
- if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
+ if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
/* 20 sec timeout not reached */
goto out;
}
@@ -1269,7 +1269,7 @@ static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGLEASE:
*p = sl->leased;
- };
+ }
spin_unlock_bh(&sl->lock);
return 0;
}
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index a93f122..d07c39c 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -460,7 +460,6 @@ static void ultramca_reset_8390(struct net_device *dev)
if (ei_debug > 1)
printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 0291ea0..d2dd8e6 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -421,7 +421,6 @@ ultra_reset_8390(struct net_device *dev)
outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index 7a554ad..e459c3b 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -352,7 +352,6 @@ static void ultra32_reset_8390(struct net_device *dev)
outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
outb(0x01, ioaddr + 6); /* Enable Interrupts. */
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 635820d..66831f3 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -382,7 +382,7 @@ static inline void smc911x_rcv(struct net_device *dev)
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
dev->name, __func__);
status = SMC_GET_RX_STS_FIFO(lp);
- DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
+ DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
if (status & RX_STS_ES_) {
@@ -1135,7 +1135,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
}
#else
if (status & INT_STS_TSFL_) {
- DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, );
+ DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
smc911x_tx(dev);
SMC_ACK_INT(lp, INT_STS_TSFL_);
}
@@ -1274,7 +1274,7 @@ static void smc911x_timeout(struct net_device *dev)
status = SMC_GET_INT(lp);
mask = SMC_GET_INT_EN(lp);
spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n",
+ DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
dev->name, status, mask);
/* Dump the current TX FIFO contents and restart */
@@ -1289,7 +1289,7 @@ static void smc911x_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1340,7 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* within that register.
*/
else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* Set the Hash perfec mode */
mcr |= MAC_CR_HPFILT_;
@@ -1348,19 +1348,16 @@ static void smc911x_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 position;
- /* do we have a pointer here? */
- if (!cur_addr)
- break;
/* make sure this is a multicast address -
shouldn't this be a given if we have it here ? */
- if (!(*cur_addr->dmi_addr & 1))
- continue;
+ if (!(*ha->addr & 1))
+ continue;
/* upper 6 bits are used as hash index */
- position = ether_crc(ETH_ALEN, cur_addr->dmi_addr)>>26;
+ position = ether_crc(ETH_ALEN, ha->addr)>>26;
multicast_table[position>>5] |= 1 << (position&0x1f);
}
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 3f2f784..7486d09 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -416,7 +416,7 @@ static void smc_shutdown( int ioaddr )
/*
- . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds )
+ . Function: smc_setmulticast( int ioaddr, struct net_device *dev )
. Purpose:
. This sets the internal hardware table to filter out unwanted multicast
. packets before they take up memory.
@@ -437,26 +437,23 @@ static void smc_setmulticast(int ioaddr, struct net_device *dev)
{
int i;
unsigned char multicast_table[ 8 ];
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* table for flipping the order of 3 bits */
unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
/* start with a table of all zeros: reject all */
memset( multicast_table, 0, sizeof( multicast_table ) );
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int position;
- /* do we have a pointer here? */
- if ( !cur_addr )
- break;
/* make sure this is a multicast address - shouldn't this
be a given if we have it here ? */
- if ( !( *cur_addr->dmi_addr & 1 ) )
+ if (!(*ha->addr & 1))
continue;
/* only use the low order bits */
- position = ether_crc_le(6, cur_addr->dmi_addr) & 0x3f;
+ position = ether_crc_le(6, ha->addr) & 0x3f;
/* do some messy swapping to put the bit in the right spot */
multicast_table[invert3[position&7]] |=
@@ -528,7 +525,7 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
numPages = ((length & 0xfffe) + 6) / 256;
if (numPages > 7 ) {
- printk(CARDNAME": Far too big packet error. \n");
+ printk(CARDNAME": Far too big packet error.\n");
/* freeing the packet is a good thing here... but should
. any packets of this size get down here? */
dev_kfree_skb (skb);
@@ -570,9 +567,9 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
if ( !time_out ) {
/* oh well, wait until the chip finds memory later */
SMC_ENABLE_INT( IM_ALLOC_INT );
- PRINTK2((CARDNAME": memory allocation deferred. \n"));
+ PRINTK2((CARDNAME": memory allocation deferred.\n"));
/* it's deferred, but I'll handle it later */
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/* or YES! I can send the packet now.. */
smc_hardware_send_packet(dev);
@@ -610,7 +607,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
ioaddr = dev->base_addr;
if ( !skb ) {
- PRINTK((CARDNAME": In XMIT with no packet to send \n"));
+ PRINTK((CARDNAME": In XMIT with no packet to send\n"));
return;
}
length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
@@ -620,7 +617,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
packet_no = inb( ioaddr + PNR_ARR + 1 );
if ( packet_no & 0x80 ) {
/* or isn't there? BAD CHIP! */
- printk(KERN_DEBUG CARDNAME": Memory allocation failed. \n");
+ printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
dev_kfree_skb_any(skb);
lp->saved_skb = NULL;
netif_wake_queue(dev);
@@ -685,7 +682,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* and let the chipset deal with it */
outw( MC_ENQUEUE , ioaddr + MMU_CMD );
- PRINTK2((CARDNAME": Sent packet of length %d \n",length));
+ PRINTK2((CARDNAME": Sent packet of length %d\n", length));
lp->saved_skb = NULL;
dev_kfree_skb_any (skb);
@@ -694,8 +691,6 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* we can send another packet */
netif_wake_queue(dev);
-
- return;
}
/*-------------------------------------------------------------------------
@@ -937,7 +932,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) {
/* I don't recognize this chip, so... */
printk(CARDNAME ": IO %x: Unrecognized revision register:"
- " %x, Contact author. \n", ioaddr, revision_register );
+ " %x, Contact author.\n", ioaddr, revision_register);
retval = -ENODEV;
goto err_out;
@@ -1045,9 +1040,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
*/
printk("ADDR: %pM\n", dev->dev_addr);
- /* set the private data to zero by default */
- memset(netdev_priv(dev), 0, sizeof(struct smc_local));
-
/* Grab the IRQ */
retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
if (retval) {
@@ -1074,7 +1066,7 @@ static void print_packet( byte * buf, int length )
int remainder;
int lines;
- printk("Packet of length %d \n", length );
+ printk("Packet of length %d\n", length);
lines = length / 16;
remainder = length % 16;
@@ -1170,7 +1162,7 @@ static void smc_timeout(struct net_device *dev)
/* "kick" the adaptor */
smc_reset( dev->base_addr );
smc_enable( dev->base_addr );
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* clear anything saved */
((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
netif_wake_queue(dev);
@@ -1201,7 +1193,7 @@ static void smc_rcv(struct net_device *dev)
if ( packet_number & FP_RXEMPTY ) {
/* we got called , but nothing was on the FIFO */
- PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO. \n"));
+ PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n"));
/* don't need to restore anything */
return;
}
@@ -1257,14 +1249,14 @@ static void smc_rcv(struct net_device *dev)
to send the DWORDs or the bytes first, or some
mixture. A mixture might improve already slow PIO
performance */
- PRINTK3((" Reading %d dwords (and %d bytes) \n",
+ PRINTK3((" Reading %d dwords (and %d bytes)\n",
packet_length >> 2, packet_length & 3 ));
insl(ioaddr + DATA_1 , data, packet_length >> 2 );
/* read the left over bytes */
insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC),
packet_length & 0x3 );
#else
- PRINTK3((" Reading %d words and %d byte(s) \n",
+ PRINTK3((" Reading %d words and %d byte(s)\n",
(packet_length >> 1 ), packet_length & 1 ));
insw(ioaddr + DATA_1 , data, packet_length >> 1);
if ( packet_length & 1 ) {
@@ -1333,7 +1325,7 @@ static void smc_tx( struct net_device * dev )
outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER );
tx_status = inw( ioaddr + DATA_1 );
- PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status ));
+ PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status));
dev->stats.tx_errors++;
if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
@@ -1347,7 +1339,7 @@ static void smc_tx( struct net_device * dev )
#endif
if ( tx_status & TS_SUCCESS ) {
- printk(CARDNAME": Successful packet caused interrupt \n");
+ printk(CARDNAME": Successful packet caused interrupt\n");
}
/* re-enable transmit */
SMC_SELECT_BANK( 0 );
@@ -1361,7 +1353,6 @@ static void smc_tx( struct net_device * dev )
lp->packets_waiting--;
outb( saved_packet, ioaddr + PNR_ARR );
- return;
}
/*--------------------------------------------------------------------
@@ -1393,7 +1384,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
int handled = 0;
- PRINTK3((CARDNAME": SMC interrupt started \n"));
+ PRINTK3((CARDNAME": SMC interrupt started\n"));
saved_bank = inw( ioaddr + BANK_SELECT );
@@ -1408,7 +1399,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
/* set a timeout value, so I don't stay here forever */
timeout = 4;
- PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask ));
+ PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask));
do {
/* read the status flag, and mask it */
status = inb( ioaddr + INTERRUPT ) & mask;
@@ -1418,7 +1409,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
handled = 1;
PRINTK3((KERN_WARNING CARDNAME
- ": Handling interrupt status %x \n", status ));
+ ": Handling interrupt status %x\n", status));
if (status & IM_RCV_INT) {
/* Got a packet(s). */
@@ -1452,7 +1443,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
} else if (status & IM_ALLOC_INT ) {
PRINTK2((KERN_DEBUG CARDNAME
- ": Allocation interrupt \n"));
+ ": Allocation interrupt\n"));
/* clear this interrupt so it doesn't happen again */
mask &= ~IM_ALLOC_INT;
@@ -1470,9 +1461,9 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
dev->stats.rx_fifo_errors++;
outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
} else if (status & IM_EPH_INT ) {
- PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n"));
+ PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n"));
} else if (status & IM_ERCV_INT ) {
- PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n"));
+ PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n"));
outb( IM_ERCV_INT, ioaddr + INTERRUPT );
}
} while ( timeout -- );
@@ -1482,7 +1473,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
SMC_SELECT_BANK( 2 );
outb( mask, ioaddr + INT_MASK );
- PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask ));
+ PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask));
outw( saved_pointer, ioaddr + POINTER );
SMC_SELECT_BANK( saved_bank );
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 860339d..10cf0cb 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1285,7 +1285,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
smc_phy_interrupt(dev);
} else if (status & IM_ERCV_INT) {
SMC_ACK_INT(lp, IM_ERCV_INT);
- PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name);
+ PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
}
} while (--timeout);
@@ -1360,7 +1360,7 @@ static void smc_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1412,7 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* within that register.
*/
else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* table for flipping the order of 3 bits */
static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
@@ -1420,16 +1420,16 @@ static void smc_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int position;
/* make sure this is a multicast address -
shouldn't this be a given if we have it here ? */
- if (!(*cur_addr->dmi_addr & 1))
+ if (!(*ha->addr & 1))
continue;
/* only use the low order bits */
- position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f;
+ position = crc32_le(~0, ha->addr, 6) & 0x3f;
/* do some messy swapping to put the bit in the right spot */
multicast_table[invert3[position&7]] |=
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index ffbaa60..cc55974 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1335,7 +1335,6 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
freespace -= (skb->len + 32);
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
smsc911x_tx_update_txcounters(dev);
@@ -1382,13 +1381,13 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
/* Enabling specific multicast addresses */
unsigned int hash_high = 0;
unsigned int hash_low = 0;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
pdata->set_bits_mask = MAC_CR_HPFILT_;
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- netdev_for_each_mc_addr(mc_list, dev) {
- unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bitnum = smsc911x_hash(ha->addr);
unsigned int mask = 0x01 << (bitnum & 0x1F);
if (bitnum & 0x20)
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index aafaebf..6cdee6a 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1034,8 +1034,6 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
smsc9420_reg_write(pd, TX_POLL_DEMAND, 1);
smsc9420_pci_flush_write(pd);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -1064,12 +1062,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 hash_lo = 0, hash_hi = 0;
smsc_dbg(HW, "Multicast filter enabled");
- netdev_for_each_mc_addr(mc_list, dev) {
- u32 bit_num = smsc9420_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 bit_num = smsc9420_hash(ha->addr);
u32 mask = 1 << (bit_num & 0x1F);
if (bit_num & 0x20)
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 287c251..26e25d7 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
sonic_init(dev);
lp->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -263,8 +263,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -531,7 +529,7 @@ static void sonic_multicast_list(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
unsigned int rcr;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *addr;
int i;
@@ -550,8 +548,8 @@ static void sonic_multicast_list(struct net_device *dev)
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
i = 1;
- netdev_for_each_mc_addr(dmi, dev) {
- addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addr = ha->addr;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index dd3cb0f..1636a34 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -625,7 +625,7 @@ spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
static void
spider_net_set_multi(struct net_device *netdev)
{
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u8 hash;
int i;
u32 reg;
@@ -646,8 +646,8 @@ spider_net_set_multi(struct net_device *netdev)
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
set_bit(0xfd, bitmask);
- netdev_for_each_mc_addr(mc, netdev) {
- hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash = spider_net_get_multicast_hash(netdev, ha->addr);
set_bit(hash, bitmask);
}
@@ -2095,8 +2095,6 @@ static void spider_net_link_phy(unsigned long data)
card->netdev->name, phy->speed,
phy->duplex == 1 ? "Full" : "Half",
phy->autoneg == 1 ? "" : "no ");
-
- return;
}
/**
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 6dfa698..74b7ae7 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1173,7 +1173,7 @@ static void tx_timeout(struct net_device *dev)
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -1221,8 +1221,6 @@ static void init_ring(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++)
memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
-
- return;
}
@@ -1312,8 +1310,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -1766,7 +1762,7 @@ static void set_rx_mode(struct net_device *dev)
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
u32 rx_mode = MinVLANPrio;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
#ifdef VLAN_SUPPORT
@@ -1804,8 +1800,8 @@ static void set_rx_mode(struct net_device *dev)
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
__be16 *eaddrs;
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (__be16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (__be16 *) ha->addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
@@ -1825,10 +1821,10 @@ static void set_rx_mode(struct net_device *dev)
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* The chip uses the upper 9 CRC bits
as index into the hash table */
- int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
+ int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
__le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
*fptr |= cpu_to_le32(1 << (bit_nr & 31));
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index c776af1..9691733 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
- dwmac100.o $(stmmac-y)
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 2a58172..144f76f 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -22,8 +22,26 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include "descs.h"
#include <linux/netdevice.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "descs.h"
+
+#undef CHIP_DEBUG_PRINT
+/* Turn-on extra printk debug for MAC core, dma and descriptors */
+/* #define CHIP_DEBUG_PRINT */
+
+#ifdef CHIP_DEBUG_PRINT
+#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define CHIP_DBG(fmt, args...) do { } while (0)
+#endif
+
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
struct stmmac_extra_stats {
/* Transmit errors */
@@ -231,3 +249,4 @@ extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
+extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac100.c b/drivers/net/stmmac/dwmac100.c
deleted file mode 100644
index 4cacca6..0000000
--- a/drivers/net/stmmac/dwmac100.c
+++ /dev/null
@@ -1,538 +0,0 @@
-/*******************************************************************************
- This is the driver for the MAC 10/100 on-chip Ethernet controller
- currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
-
- DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
- this code.
-
- Copyright (C) 2007-2009 STMicroelectronics Ltd
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-*******************************************************************************/
-
-#include <linux/crc32.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
-
-#include "common.h"
-#include "dwmac100.h"
-#include "dwmac_dma.h"
-
-#undef DWMAC100_DEBUG
-/*#define DWMAC100_DEBUG*/
-#ifdef DWMAC100_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
-
-static void dwmac100_core_init(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CONTROL);
-
- writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
-
-#ifdef STMMAC_VLAN_TAG_USED
- writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
-#endif
- return;
-}
-
-static void dwmac100_dump_mac_regs(unsigned long ioaddr)
-{
- pr_info("\t----------------------------------------------\n"
- "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
- pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
- readl(ioaddr + MAC_CONTROL));
- pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
- readl(ioaddr + MAC_ADDR_HIGH));
- pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
- readl(ioaddr + MAC_ADDR_LOW));
- pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
- MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
- pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
- MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
- pr_info("\tflow control (offset 0x%x): 0x%08x\n",
- MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
- pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
- readl(ioaddr + MAC_VLAN1));
- pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
- readl(ioaddr + MAC_VLAN2));
- pr_info("\n\tMAC management counter registers\n");
- pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
- MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
- pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
- pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
- pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
- pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
- return;
-}
-
-static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
- u32 dma_rx)
-{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
- /* DMA SW reset */
- value |= DMA_BUS_MODE_SFT_RESET;
- writel(value, ioaddr + DMA_BUS_MODE);
- do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
-
- /* Enable Application Access by writing to DMA CSR0 */
- writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
- ioaddr + DMA_BUS_MODE);
-
- /* Mask interrupts by writing to CSR7 */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
-
- /* The base address of the RX/TX descriptor lists must be written into
- * DMA CSR3 and CSR4, respectively. */
- writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
- writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
-
- return 0;
-}
-
-/* Store and Forward capability is not used at all..
- * The transmit threshold can be programmed by
- * setting the TTC bits in the DMA control register.*/
-static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
- int rxmode)
-{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
-
- if (txmode <= 32)
- csr6 |= DMA_CONTROL_TTC_32;
- else if (txmode <= 64)
- csr6 |= DMA_CONTROL_TTC_64;
- else
- csr6 |= DMA_CONTROL_TTC_128;
-
- writel(csr6, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-static void dwmac100_dump_dma_regs(unsigned long ioaddr)
-{
- int i;
-
- DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
- for (i = 0; i < 9; i++)
- pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
- (DMA_BUS_MODE + i * 4),
- readl(ioaddr + DMA_BUS_MODE + i * 4));
- DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
- DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
- DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
- DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
- return;
-}
-
-/* DMA controller has two counters to track the number of
- * the receive missed frames. */
-static void dwmac100_dma_diagnostic_fr(void *data,
- struct stmmac_extra_stats *x,
- unsigned long ioaddr)
-{
- struct net_device_stats *stats = (struct net_device_stats *)data;
- u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
-
- if (unlikely(csr8)) {
- if (csr8 & DMA_MISSED_FRAME_OVE) {
- stats->rx_over_errors += 0x800;
- x->rx_overflow_cntr += 0x800;
- } else {
- unsigned int ove_cntr;
- ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
- stats->rx_over_errors += ove_cntr;
- x->rx_overflow_cntr += ove_cntr;
- }
-
- if (csr8 & DMA_MISSED_FRAME_OVE_M) {
- stats->rx_missed_errors += 0xffff;
- x->rx_missed_cntr += 0xffff;
- } else {
- unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
- stats->rx_missed_errors += miss_f;
- x->rx_missed_cntr += miss_f;
- }
- }
- return;
-}
-
-static int dwmac100_get_tx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
-{
- int ret = 0;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.tx.error_summary)) {
- if (unlikely(p->des01.tx.underflow_error)) {
- x->tx_underflow++;
- stats->tx_fifo_errors++;
- }
- if (unlikely(p->des01.tx.no_carrier)) {
- x->tx_carrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.tx.loss_carrier)) {
- x->tx_losscarrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely((p->des01.tx.excessive_deferral) ||
- (p->des01.tx.excessive_collisions) ||
- (p->des01.tx.late_collision)))
- stats->collisions += p->des01.tx.collision_count;
- ret = -1;
- }
- if (unlikely(p->des01.tx.heartbeat_fail)) {
- x->tx_heartbeat++;
- stats->tx_heartbeat_errors++;
- ret = -1;
- }
- if (unlikely(p->des01.tx.deferred))
- x->tx_deferred++;
-
- return ret;
-}
-
-static int dwmac100_get_tx_len(struct dma_desc *p)
-{
- return p->des01.tx.buffer1_size;
-}
-
-/* This function verifies if each incoming frame has some errors
- * and, if required, updates the multicast statistics.
- * In case of success, it returns csum_none becasue the device
- * is not able to compute the csum in HW. */
-static int dwmac100_get_rx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p)
-{
- int ret = csum_none;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.rx.last_descriptor == 0)) {
- pr_warning("dwmac100 Error: Oversized Ethernet "
- "frame spanned multiple buffers\n");
- stats->rx_length_errors++;
- return discard_frame;
- }
-
- if (unlikely(p->des01.rx.error_summary)) {
- if (unlikely(p->des01.rx.descriptor_error))
- x->rx_desc++;
- if (unlikely(p->des01.rx.partial_frame_error))
- x->rx_partial++;
- if (unlikely(p->des01.rx.run_frame))
- x->rx_runt++;
- if (unlikely(p->des01.rx.frame_too_long))
- x->rx_toolong++;
- if (unlikely(p->des01.rx.collision)) {
- x->rx_collision++;
- stats->collisions++;
- }
- if (unlikely(p->des01.rx.crc_error)) {
- x->rx_crc++;
- stats->rx_crc_errors++;
- }
- ret = discard_frame;
- }
- if (unlikely(p->des01.rx.dribbling))
- ret = discard_frame;
-
- if (unlikely(p->des01.rx.length_error)) {
- x->rx_length++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.rx.mii_error)) {
- x->rx_mii++;
- ret = discard_frame;
- }
- if (p->des01.rx.multicast_frame) {
- x->rx_multicast++;
- stats->multicast++;
- }
- return ret;
-}
-
-static void dwmac100_irq_status(unsigned long ioaddr)
-{
- return;
-}
-
-static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
-}
-
-static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
-}
-
-static void dwmac100_set_filter(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- u32 value = readl(ioaddr + MAC_CONTROL);
-
- if (dev->flags & IFF_PROMISC) {
- value |= MAC_CONTROL_PR;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
- MAC_CONTROL_HP);
- } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
- || (dev->flags & IFF_ALLMULTI)) {
- value |= MAC_CONTROL_PM;
- value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
- writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
- writel(0xffffffff, ioaddr + MAC_HASH_LOW);
- } else if (netdev_mc_empty(dev)) { /* no multicast */
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
- MAC_CONTROL_HO | MAC_CONTROL_HP);
- } else {
- u32 mc_filter[2];
- struct dev_mc_list *mclist;
-
- /* Perfect filter mode for physical address and Hash
- filter for multicast */
- value |= MAC_CONTROL_HP;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
- MAC_CONTROL_IF | MAC_CONTROL_HO);
-
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- /* The upper 6 bits of the calculated CRC are used to
- * index the contens of the hash table */
- int bit_nr =
- ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
- /* The most significant bit determines the register to
- * use (H/L) while the other 5 bits determine the bit
- * within the register. */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
- writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
- }
-
- writel(value, ioaddr + MAC_CONTROL);
-
- DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
- "HI 0x%08x, LO 0x%08x\n",
- __func__, readl(ioaddr + MAC_CONTROL),
- readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
- return;
-}
-
-static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
-{
- unsigned int flow = MAC_FLOW_CTRL_ENABLE;
-
- if (duplex)
- flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
- writel(flow, ioaddr + MAC_FLOW_CTRL);
-
- return;
-}
-
-/* No PMT module supported for this Ethernet Controller.
- * Tested on ST platforms only.
- */
-static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
-{
- return;
-}
-
-static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.rx.own = 1;
- p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
- if (i == ring_size - 1)
- p->des01.rx.end_ring = 1;
- if (disable_rx_ic)
- p->des01.rx.disable_ic = 1;
- p++;
- }
- return;
-}
-
-static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.tx.own = 0;
- if (i == ring_size - 1)
- p->des01.tx.end_ring = 1;
- p++;
- }
- return;
-}
-
-static int dwmac100_get_tx_owner(struct dma_desc *p)
-{
- return p->des01.tx.own;
-}
-
-static int dwmac100_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.rx.own;
-}
-
-static void dwmac100_set_tx_owner(struct dma_desc *p)
-{
- p->des01.tx.own = 1;
-}
-
-static void dwmac100_set_rx_owner(struct dma_desc *p)
-{
- p->des01.rx.own = 1;
-}
-
-static int dwmac100_get_tx_ls(struct dma_desc *p)
-{
- return p->des01.tx.last_segment;
-}
-
-static void dwmac100_release_tx_desc(struct dma_desc *p)
-{
- int ter = p->des01.tx.end_ring;
-
- /* clean field used within the xmit */
- p->des01.tx.first_segment = 0;
- p->des01.tx.last_segment = 0;
- p->des01.tx.buffer1_size = 0;
-
- /* clean status reported */
- p->des01.tx.error_summary = 0;
- p->des01.tx.underflow_error = 0;
- p->des01.tx.no_carrier = 0;
- p->des01.tx.loss_carrier = 0;
- p->des01.tx.excessive_deferral = 0;
- p->des01.tx.excessive_collisions = 0;
- p->des01.tx.late_collision = 0;
- p->des01.tx.heartbeat_fail = 0;
- p->des01.tx.deferred = 0;
-
- /* set termination field */
- p->des01.tx.end_ring = ter;
-
- return;
-}
-
-static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
-{
- p->des01.tx.first_segment = is_fs;
- p->des01.tx.buffer1_size = len;
-}
-
-static void dwmac100_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.tx.interrupt = 0;
-}
-
-static void dwmac100_close_tx_desc(struct dma_desc *p)
-{
- p->des01.tx.last_segment = 1;
- p->des01.tx.interrupt = 1;
-}
-
-static int dwmac100_get_rx_frame_len(struct dma_desc *p)
-{
- return p->des01.rx.frame_length;
-}
-
-struct stmmac_ops dwmac100_ops = {
- .core_init = dwmac100_core_init,
- .dump_regs = dwmac100_dump_mac_regs,
- .host_irq_status = dwmac100_irq_status,
- .set_filter = dwmac100_set_filter,
- .flow_ctrl = dwmac100_flow_ctrl,
- .pmt = dwmac100_pmt,
- .set_umac_addr = dwmac100_set_umac_addr,
- .get_umac_addr = dwmac100_get_umac_addr,
-};
-
-struct stmmac_dma_ops dwmac100_dma_ops = {
- .init = dwmac100_dma_init,
- .dump_regs = dwmac100_dump_dma_regs,
- .dma_mode = dwmac100_dma_operation_mode,
- .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
- .enable_dma_transmission = dwmac_enable_dma_transmission,
- .enable_dma_irq = dwmac_enable_dma_irq,
- .disable_dma_irq = dwmac_disable_dma_irq,
- .start_tx = dwmac_dma_start_tx,
- .stop_tx = dwmac_dma_stop_tx,
- .start_rx = dwmac_dma_start_rx,
- .stop_rx = dwmac_dma_stop_rx,
- .dma_interrupt = dwmac_dma_interrupt,
-};
-
-struct stmmac_desc_ops dwmac100_desc_ops = {
- .tx_status = dwmac100_get_tx_frame_status,
- .rx_status = dwmac100_get_rx_frame_status,
- .get_tx_len = dwmac100_get_tx_len,
- .init_rx_desc = dwmac100_init_rx_desc,
- .init_tx_desc = dwmac100_init_tx_desc,
- .get_tx_owner = dwmac100_get_tx_owner,
- .get_rx_owner = dwmac100_get_rx_owner,
- .release_tx_desc = dwmac100_release_tx_desc,
- .prepare_tx_desc = dwmac100_prepare_tx_desc,
- .clear_tx_ic = dwmac100_clear_tx_ic,
- .close_tx_desc = dwmac100_close_tx_desc,
- .get_tx_ls = dwmac100_get_tx_ls,
- .set_tx_owner = dwmac100_set_tx_owner,
- .set_rx_owner = dwmac100_set_rx_owner,
- .get_rx_frame_len = dwmac100_get_rx_frame_len,
-};
-
-struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
-{
- struct mac_device_info *mac;
-
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
-
- pr_info("\tDWMAC100\n");
-
- mac->mac = &dwmac100_ops;
- mac->desc = &dwmac100_desc_ops;
- mac->dma = &dwmac100_dma_ops;
-
- mac->pmt = PMT_NOT_SUPPORTED;
- mac->link.port = MAC_CONTROL_PS;
- mac->link.duplex = MAC_CONTROL_F;
- mac->link.speed = 0;
- mac->mii.addr = MAC_MII_ADDR;
- mac->mii.data = MAC_MII_DATA;
-
- return mac;
-}
diff --git a/drivers/net/stmmac/dwmac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110..97956cb 100644
--- a/drivers/net/stmmac/dwmac100.h
+++ b/drivers/net/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/phy.h>
+#include "common.h"
+
/*----------------------------------------------------------------------------
* MAC BLOCK defines
*---------------------------------------------------------------------------*/
@@ -114,3 +117,5 @@ enum ttc_control {
#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
+
+extern struct stmmac_dma_ops dwmac100_dma_ops;
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index 62dca0e..d8d0f35 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -172,7 +172,6 @@ enum rfd {
deac_full_minus_4 = 0x00401800,
};
#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
-#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
enum ttc_control {
DMA_CONTROL_TTC_64 = 0x00000000,
@@ -206,15 +205,4 @@ enum rtc_control {
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
-#undef DWMAC1000_DEBUG
-/* #define DWMAC1000__DEBUG */
-#undef FRAME_FILTER_DEBUG
-/* #define FRAME_FILTER_DEBUG */
-#ifdef DWMAC1000__DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
-
extern struct stmmac_dma_ops dwmac1000_dma_ops;
-extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 5bd95eb..917b4e1 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -48,7 +48,6 @@ static void dwmac1000_core_init(unsigned long ioaddr)
/* Tag detection without filtering */
writel(0x0, ioaddr + GMAC_VLAN_TAG);
#endif
- return;
}
static void dwmac1000_dump_regs(unsigned long ioaddr)
@@ -61,7 +60,6 @@ static void dwmac1000_dump_regs(unsigned long ioaddr)
pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
offset, readl(ioaddr + offset));
}
- return;
}
static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
@@ -83,8 +81,8 @@ static void dwmac1000_set_filter(struct net_device *dev)
unsigned long ioaddr = dev->base_addr;
unsigned int value = 0;
- DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+ CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
if (dev->flags & IFF_PROMISC)
value = GMAC_FRAME_FILTER_PR;
@@ -95,17 +93,17 @@ static void dwmac1000_set_filter(struct net_device *dev)
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
} else if (!netdev_mc_empty(dev)) {
u32 mc_filter[2];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* Hash filter for multicast */
value = GMAC_FRAME_FILTER_HMC;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* The upper 6 bits of the calculated CRC are used to
index the contens of the hash table */
int bit_nr =
- bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
/* The most significant bit determines the register to
* use (H/L) while the other 5 bits determine the bit
* within the register. */
@@ -136,11 +134,9 @@ static void dwmac1000_set_filter(struct net_device *dev)
#endif
writel(value, ioaddr + GMAC_FRAME_FILTER);
- DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
"HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
-
- return;
}
static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
@@ -148,23 +144,22 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
{
unsigned int flow = 0;
- DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n");
if (fc & FLOW_RX) {
- DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
flow |= GMAC_FLOW_CTRL_RFE;
}
if (fc & FLOW_TX) {
- DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
flow |= GMAC_FLOW_CTRL_TFE;
}
if (duplex) {
- DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time);
flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
}
writel(flow, ioaddr + GMAC_FLOW_CTRL);
- return;
}
static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
@@ -172,15 +167,14 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
unsigned int pmt = 0;
if (mode == WAKE_MAGIC) {
- DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
pmt |= power_down | magic_pkt_en;
} else if (mode == WAKE_UCAST) {
- DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
pmt |= global_unicast;
}
writel(pmt, ioaddr + GMAC_PMT);
- return;
}
@@ -190,22 +184,20 @@ static void dwmac1000_irq_status(unsigned long ioaddr)
/* Not used events (e.g. MMC interrupts) are not handled. */
if ((intr_status & mmc_tx_irq))
- DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_TX_INTR));
if (unlikely(intr_status & mmc_rx_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_INTR));
if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
if (unlikely(intr_status & pmt_irq)) {
- DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT
* status register. */
readl(ioaddr + GMAC_PMT);
}
-
- return;
}
struct stmmac_ops dwmac1000_ops = {
@@ -230,7 +222,6 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
mac->mac = &dwmac1000_ops;
- mac->desc = &dwmac1000_desc_ops;
mac->dma = &dwmac1000_dma_ops;
mac->pmt = PMT_SUPPORTED;
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 39d436a..4158050 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,7 +3,7 @@
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
developing this code.
- This contains the functions to handle the dma and descriptors.
+ This contains the functions to handle the dma.
Copyright (C) 2007-2009 STMicroelectronics Ltd
@@ -58,29 +58,20 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
return 0;
}
-/* Transmit FIFO flush operation */
-static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
-{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
- writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
-
- do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
-}
-
static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
if (txmode == SF_DMA_MODE) {
- DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
csr6 |= DMA_CONTROL_TSF;
/* Operating on second frame increase the performance
* especially when transmit store-and-forward is used.*/
csr6 |= DMA_CONTROL_OSF;
} else {
- DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
" (threshold = %d)\n", txmode);
csr6 &= ~DMA_CONTROL_TSF;
csr6 &= DMA_CONTROL_TC_TX_MASK;
@@ -98,10 +89,10 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
}
if (rxmode == SF_DMA_MODE) {
- DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
csr6 |= DMA_CONTROL_RSF;
} else {
- DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
" (threshold = %d)\n", rxmode);
csr6 &= ~DMA_CONTROL_RSF;
csr6 &= DMA_CONTROL_TC_RX_MASK;
@@ -116,7 +107,6 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
}
writel(csr6, ioaddr + DMA_CONTROL);
- return;
}
/* Not yet implemented --- no RMON module */
@@ -138,306 +128,6 @@ static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
readl(ioaddr + DMA_BUS_MODE + offset));
}
}
- return;
-}
-
-static int dwmac1000_get_tx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
-{
- int ret = 0;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.etx.error_summary)) {
- DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
- if (unlikely(p->des01.etx.jabber_timeout)) {
- DBG(KERN_ERR "\tjabber_timeout error\n");
- x->tx_jabber++;
- }
-
- if (unlikely(p->des01.etx.frame_flushed)) {
- DBG(KERN_ERR "\tframe_flushed error\n");
- x->tx_frame_flushed++;
- dwmac1000_flush_tx_fifo(ioaddr);
- }
-
- if (unlikely(p->des01.etx.loss_carrier)) {
- DBG(KERN_ERR "\tloss_carrier error\n");
- x->tx_losscarrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.etx.no_carrier)) {
- DBG(KERN_ERR "\tno_carrier error\n");
- x->tx_carrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.etx.late_collision)) {
- DBG(KERN_ERR "\tlate_collision error\n");
- stats->collisions += p->des01.etx.collision_count;
- }
- if (unlikely(p->des01.etx.excessive_collisions)) {
- DBG(KERN_ERR "\texcessive_collisions\n");
- stats->collisions += p->des01.etx.collision_count;
- }
- if (unlikely(p->des01.etx.excessive_deferral)) {
- DBG(KERN_INFO "\texcessive tx_deferral\n");
- x->tx_deferred++;
- }
-
- if (unlikely(p->des01.etx.underflow_error)) {
- DBG(KERN_ERR "\tunderflow error\n");
- dwmac1000_flush_tx_fifo(ioaddr);
- x->tx_underflow++;
- }
-
- if (unlikely(p->des01.etx.ip_header_error)) {
- DBG(KERN_ERR "\tTX IP header csum error\n");
- x->tx_ip_header_error++;
- }
-
- if (unlikely(p->des01.etx.payload_error)) {
- DBG(KERN_ERR "\tAddr/Payload csum error\n");
- x->tx_payload_error++;
- dwmac1000_flush_tx_fifo(ioaddr);
- }
-
- ret = -1;
- }
-
- if (unlikely(p->des01.etx.deferred)) {
- DBG(KERN_INFO "GMAC TX status: tx deferred\n");
- x->tx_deferred++;
- }
-#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.etx.vlan_frame) {
- DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
- x->tx_vlan++;
- }
-#endif
-
- return ret;
-}
-
-static int dwmac1000_get_tx_len(struct dma_desc *p)
-{
- return p->des01.etx.buffer1_size;
-}
-
-static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
-{
- int ret = good_frame;
- u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
-
- /* bits 5 7 0 | Frame status
- * ----------------------------------------------------------
- * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
- * 1 0 0 | IPv4/6 No CSUM errorS.
- * 1 0 1 | IPv4/6 CSUM PAYLOAD error
- * 1 1 0 | IPv4/6 CSUM IP HR error
- * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
- * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
- * 0 1 1 | COE bypassed.. no IPv4/6 frame
- * 0 1 0 | Reserved.
- */
- if (status == 0x0) {
- DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
- ret = good_frame;
- } else if (status == 0x4) {
- DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
- ret = good_frame;
- } else if (status == 0x5) {
- DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
- ret = csum_none;
- } else if (status == 0x6) {
- DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
- ret = csum_none;
- } else if (status == 0x7) {
- DBG(KERN_ERR
- "RX Des0 status: IPv4/6 Header and Payload Error.\n");
- ret = csum_none;
- } else if (status == 0x1) {
- DBG(KERN_ERR
- "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
- ret = discard_frame;
- } else if (status == 0x3) {
- DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
- ret = discard_frame;
- }
- return ret;
-}
-
-static int dwmac1000_get_rx_frame_status(void *data,
- struct stmmac_extra_stats *x, struct dma_desc *p)
-{
- int ret = good_frame;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.erx.error_summary)) {
- DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
- if (unlikely(p->des01.erx.descriptor_error)) {
- DBG(KERN_ERR "\tdescriptor error\n");
- x->rx_desc++;
- stats->rx_length_errors++;
- }
- if (unlikely(p->des01.erx.overflow_error)) {
- DBG(KERN_ERR "\toverflow error\n");
- x->rx_gmac_overflow++;
- }
-
- if (unlikely(p->des01.erx.ipc_csum_error))
- DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
-
- if (unlikely(p->des01.erx.late_collision)) {
- DBG(KERN_ERR "\tlate_collision error\n");
- stats->collisions++;
- stats->collisions++;
- }
- if (unlikely(p->des01.erx.receive_watchdog)) {
- DBG(KERN_ERR "\treceive_watchdog error\n");
- x->rx_watchdog++;
- }
- if (unlikely(p->des01.erx.error_gmii)) {
- DBG(KERN_ERR "\tReceive Error\n");
- x->rx_mii++;
- }
- if (unlikely(p->des01.erx.crc_error)) {
- DBG(KERN_ERR "\tCRC error\n");
- x->rx_crc++;
- stats->rx_crc_errors++;
- }
- ret = discard_frame;
- }
-
- /* After a payload csum error, the ES bit is set.
- * It doesn't match with the information reported into the databook.
- * At any rate, we need to understand if the CSUM hw computation is ok
- * and report this info to the upper layers. */
- ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
- p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
-
- if (unlikely(p->des01.erx.dribbling)) {
- DBG(KERN_ERR "GMAC RX: dribbling error\n");
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.sa_filter_fail)) {
- DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
- x->sa_rx_filter_fail++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.da_filter_fail)) {
- DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
- x->da_rx_filter_fail++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.length_error)) {
- DBG(KERN_ERR "GMAC RX: length_error error\n");
- x->rx_length++;
- ret = discard_frame;
- }
-#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.erx.vlan_tag) {
- DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
- x->rx_vlan++;
- }
-#endif
- return ret;
-}
-
-static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.erx.own = 1;
- p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
- /* To support jumbo frames */
- p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
- if (i == ring_size - 1)
- p->des01.erx.end_ring = 1;
- if (disable_rx_ic)
- p->des01.erx.disable_ic = 1;
- p++;
- }
- return;
-}
-
-static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
-{
- int i;
-
- for (i = 0; i < ring_size; i++) {
- p->des01.etx.own = 0;
- if (i == ring_size - 1)
- p->des01.etx.end_ring = 1;
- p++;
- }
-
- return;
-}
-
-static int dwmac1000_get_tx_owner(struct dma_desc *p)
-{
- return p->des01.etx.own;
-}
-
-static int dwmac1000_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.erx.own;
-}
-
-static void dwmac1000_set_tx_owner(struct dma_desc *p)
-{
- p->des01.etx.own = 1;
-}
-
-static void dwmac1000_set_rx_owner(struct dma_desc *p)
-{
- p->des01.erx.own = 1;
-}
-
-static int dwmac1000_get_tx_ls(struct dma_desc *p)
-{
- return p->des01.etx.last_segment;
-}
-
-static void dwmac1000_release_tx_desc(struct dma_desc *p)
-{
- int ter = p->des01.etx.end_ring;
-
- memset(p, 0, sizeof(struct dma_desc));
- p->des01.etx.end_ring = ter;
-
- return;
-}
-
-static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
-{
- p->des01.etx.first_segment = is_fs;
- if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
- p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
- } else {
- p->des01.etx.buffer1_size = len;
- }
- if (likely(csum_flag))
- p->des01.etx.checksum_insertion = cic_full;
-}
-
-static void dwmac1000_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.etx.interrupt = 0;
-}
-
-static void dwmac1000_close_tx_desc(struct dma_desc *p)
-{
- p->des01.etx.last_segment = 1;
- p->des01.etx.interrupt = 1;
-}
-
-static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
-{
- return p->des01.erx.frame_length;
}
struct stmmac_dma_ops dwmac1000_dma_ops = {
@@ -454,21 +144,3 @@ struct stmmac_dma_ops dwmac1000_dma_ops = {
.stop_rx = dwmac_dma_stop_rx,
.dma_interrupt = dwmac_dma_interrupt,
};
-
-struct stmmac_desc_ops dwmac1000_desc_ops = {
- .tx_status = dwmac1000_get_tx_frame_status,
- .rx_status = dwmac1000_get_rx_frame_status,
- .get_tx_len = dwmac1000_get_tx_len,
- .init_rx_desc = dwmac1000_init_rx_desc,
- .init_tx_desc = dwmac1000_init_tx_desc,
- .get_tx_owner = dwmac1000_get_tx_owner,
- .get_rx_owner = dwmac1000_get_rx_owner,
- .release_tx_desc = dwmac1000_release_tx_desc,
- .prepare_tx_desc = dwmac1000_prepare_tx_desc,
- .clear_tx_ic = dwmac1000_clear_tx_ic,
- .close_tx_desc = dwmac1000_close_tx_desc,
- .get_tx_ls = dwmac1000_get_tx_ls,
- .set_tx_owner = dwmac1000_set_tx_owner,
- .set_rx_owner = dwmac1000_set_rx_owner,
- .get_rx_frame_len = dwmac1000_get_rx_frame_len,
-};
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
new file mode 100644
index 0000000..6f270a0
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -0,0 +1,196 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include "dwmac100.h"
+
+static void dwmac100_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
+#endif
+}
+
+static void dwmac100_dump_mac_regs(unsigned long ioaddr)
+{
+ pr_info("\t----------------------------------------------\n"
+ "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
+ pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
+ readl(ioaddr + MAC_CONTROL));
+ pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
+ readl(ioaddr + MAC_ADDR_HIGH));
+ pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
+ readl(ioaddr + MAC_ADDR_LOW));
+ pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ pr_info("\tflow control (offset 0x%x): 0x%08x\n",
+ MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
+ pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
+ readl(ioaddr + MAC_VLAN1));
+ pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
+ readl(ioaddr + MAC_VLAN2));
+ pr_info("\n\tMAC management counter registers\n");
+ pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+}
+
+static void dwmac100_irq_status(unsigned long ioaddr)
+{
+ return;
+}
+
+static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= MAC_CONTROL_PR;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
+ MAC_CONTROL_HP);
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value |= MAC_CONTROL_PM;
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
+ MAC_CONTROL_HO | MAC_CONTROL_HP);
+ } else {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Perfect filter mode for physical address and Hash
+ filter for multicast */
+ value |= MAC_CONTROL_HP;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table */
+ int bit_nr =
+ ether_crc(ETH_ALEN, ha->addr) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
+ }
+
+ writel(value, ioaddr + MAC_CONTROL);
+
+ CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
+ "HI 0x%08x, LO 0x%08x\n",
+ __func__, readl(ioaddr + MAC_CONTROL),
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
+}
+
+static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+
+ if (duplex)
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
+}
+
+/* No PMT module supported for this Ethernet Controller.
+ * Tested on ST platforms only.
+ */
+static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ return;
+}
+
+struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
+};
+
+struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->dma = &dwmac100_dma_ops;
+
+ mac->pmt = PMT_NOT_SUPPORTED;
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
new file mode 100644
index 0000000..2fece7b
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -0,0 +1,134 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This contains the functions to handle the dma.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "dwmac100.h"
+#include "dwmac_dma.h"
+
+static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+
+ /* Enable Application Access by writing to DMA CSR0 */
+ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
+ ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+/* Store and Forward capability is not used at all..
+ * The transmit threshold can be programmed by
+ * setting the TTC bits in the DMA control register.*/
+static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else
+ csr6 |= DMA_CONTROL_TTC_128;
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac100_dump_dma_regs(unsigned long ioaddr)
+{
+ int i;
+
+ CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
+ for (i = 0; i < 9; i++)
+ pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + i * 4),
+ readl(ioaddr + DMA_BUS_MODE + i * 4));
+ CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
+ CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
+}
+
+/* DMA controller has two counters to track the number of
+ * the receive missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
+
+ if (unlikely(csr8)) {
+ if (csr8 & DMA_MISSED_FRAME_OVE) {
+ stats->rx_over_errors += 0x800;
+ x->rx_overflow_cntr += 0x800;
+ } else {
+ unsigned int ove_cntr;
+ ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
+ stats->rx_over_errors += ove_cntr;
+ x->rx_overflow_cntr += ove_cntr;
+ }
+
+ if (csr8 & DMA_MISSED_FRAME_OVE_M) {
+ stats->rx_missed_errors += 0xffff;
+ x->rx_missed_cntr += 0xffff;
+ } else {
+ unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
+ stats->rx_missed_errors += miss_f;
+ x->rx_missed_cntr += miss_f;
+ }
+ }
+}
+
+struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
index de848d9..7b815a1 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -95,6 +95,7 @@
#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
extern void dwmac_enable_dma_irq(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d4adb1e..a854152 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -52,7 +52,6 @@ void dwmac_dma_start_tx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
- return;
}
void dwmac_dma_stop_tx(unsigned long ioaddr)
@@ -60,7 +59,6 @@ void dwmac_dma_stop_tx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
- return;
}
void dwmac_dma_start_rx(unsigned long ioaddr)
@@ -68,8 +66,6 @@ void dwmac_dma_start_rx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
-
- return;
}
void dwmac_dma_stop_rx(unsigned long ioaddr)
@@ -77,8 +73,6 @@ void dwmac_dma_stop_rx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
-
- return;
}
#ifdef DWMAC_DMA_DEBUG
@@ -111,7 +105,6 @@ static void show_tx_process_state(unsigned int status)
default:
break;
}
- return;
}
static void show_rx_process_state(unsigned int status)
@@ -149,7 +142,6 @@ static void show_rx_process_state(unsigned int status)
default:
break;
}
- return;
}
#endif
@@ -227,6 +219,13 @@ int dwmac_dma_interrupt(unsigned long ioaddr,
return ret;
}
+void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
+
+ do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
+}
void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
unsigned int high, unsigned int low)
@@ -237,8 +236,6 @@ void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
writel(data, ioaddr + high);
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
writel(data, ioaddr + low);
-
- return;
}
void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
@@ -257,7 +254,5 @@ void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
addr[3] = (lo_addr >> 24) & 0xff;
addr[4] = hi_addr & 0xff;
addr[5] = (hi_addr >> 8) & 0xff;
-
- return;
}
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
new file mode 100644
index 0000000..3c18ebe
--- /dev/null
+++ b/drivers/net/stmmac/enh_desc.c
@@ -0,0 +1,337 @@
+/*******************************************************************************
+ This contains the functions to handle the enhanced descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "common.h"
+
+static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.etx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
+ if (unlikely(p->des01.etx.jabber_timeout)) {
+ CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
+ x->tx_jabber++;
+ }
+
+ if (unlikely(p->des01.etx.frame_flushed)) {
+ CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
+ x->tx_frame_flushed++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ if (unlikely(p->des01.etx.loss_carrier)) {
+ CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.no_carrier)) {
+ CHIP_DBG(KERN_ERR "\tno_carrier error\n");
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_collisions)) {
+ CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_deferral)) {
+ CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
+ x->tx_deferred++;
+ }
+
+ if (unlikely(p->des01.etx.underflow_error)) {
+ CHIP_DBG(KERN_ERR "\tunderflow error\n");
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ x->tx_underflow++;
+ }
+
+ if (unlikely(p->des01.etx.ip_header_error)) {
+ CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
+ x->tx_ip_header_error++;
+ }
+
+ if (unlikely(p->des01.etx.payload_error)) {
+ CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
+ x->tx_payload_error++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ ret = -1;
+ }
+
+ if (unlikely(p->des01.etx.deferred)) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
+ x->tx_deferred++;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.etx.vlan_frame) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
+ x->tx_vlan++;
+ }
+#endif
+
+ return ret;
+}
+
+static int enh_desc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.etx.buffer1_size;
+}
+
+static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
+{
+ int ret = good_frame;
+ u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
+
+ /* bits 5 7 0 | Frame status
+ * ----------------------------------------------------------
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
+ * 1 0 0 | IPv4/6 No CSUM errorS.
+ * 1 0 1 | IPv4/6 CSUM PAYLOAD error
+ * 1 1 0 | IPv4/6 CSUM IP HR error
+ * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
+ * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
+ * 0 1 1 | COE bypassed.. no IPv4/6 frame
+ * 0 1 0 | Reserved.
+ */
+ if (status == 0x0) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
+ ret = good_frame;
+ } else if (status == 0x4) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
+ ret = good_frame;
+ } else if (status == 0x5) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x6) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
+ ret = csum_none;
+ } else if (status == 0x7) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 Header and Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x1) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
+ ret = discard_frame;
+ } else if (status == 0x3) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
+ ret = discard_frame;
+ }
+ return ret;
+}
+
+static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = good_frame;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.erx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
+ p->des01.erx);
+ if (unlikely(p->des01.erx.descriptor_error)) {
+ CHIP_DBG(KERN_ERR "\tdescriptor error\n");
+ x->rx_desc++;
+ stats->rx_length_errors++;
+ }
+ if (unlikely(p->des01.erx.overflow_error)) {
+ CHIP_DBG(KERN_ERR "\toverflow error\n");
+ x->rx_gmac_overflow++;
+ }
+
+ if (unlikely(p->des01.erx.ipc_csum_error))
+ CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
+
+ if (unlikely(p->des01.erx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.erx.receive_watchdog)) {
+ CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
+ x->rx_watchdog++;
+ }
+ if (unlikely(p->des01.erx.error_gmii)) {
+ CHIP_DBG(KERN_ERR "\tReceive Error\n");
+ x->rx_mii++;
+ }
+ if (unlikely(p->des01.erx.crc_error)) {
+ CHIP_DBG(KERN_ERR "\tCRC error\n");
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+
+ /* After a payload csum error, the ES bit is set.
+ * It doesn't match with the information reported into the databook.
+ * At any rate, we need to understand if the CSUM hw computation is ok
+ * and report this info to the upper layers. */
+ ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
+ p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
+
+ if (unlikely(p->des01.erx.dribbling)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.sa_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.da_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.length_error)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
+ x->rx_length++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.erx.vlan_tag) {
+ CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
+ x->rx_vlan++;
+ }
+#endif
+ return ret;
+}
+
+static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.erx.own = 1;
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ /* To support jumbo frames */
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.erx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.erx.disable_ic = 1;
+ p++;
+ }
+}
+
+static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+
+ for (i = 0; i < ring_size; i++) {
+ p->des01.etx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.etx.end_ring = 1;
+ p++;
+ }
+}
+
+static int enh_desc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.etx.own;
+}
+
+static int enh_desc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.erx.own;
+}
+
+static void enh_desc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.etx.own = 1;
+}
+
+static void enh_desc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.erx.own = 1;
+}
+
+static int enh_desc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.etx.last_segment;
+}
+
+static void enh_desc_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.etx.end_ring;
+
+ memset(p, 0, sizeof(struct dma_desc));
+ p->des01.etx.end_ring = ter;
+}
+
+static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.etx.first_segment = is_fs;
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ } else {
+ p->des01.etx.buffer1_size = len;
+ }
+ if (likely(csum_flag))
+ p->des01.etx.checksum_insertion = cic_full;
+}
+
+static void enh_desc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.etx.interrupt = 0;
+}
+
+static void enh_desc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.etx.last_segment = 1;
+ p->des01.etx.interrupt = 1;
+}
+
+static int enh_desc_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.erx.frame_length;
+}
+
+struct stmmac_desc_ops enh_desc_ops = {
+ .tx_status = enh_desc_get_tx_status,
+ .rx_status = enh_desc_get_rx_status,
+ .get_tx_len = enh_desc_get_tx_len,
+ .init_rx_desc = enh_desc_init_rx_desc,
+ .init_tx_desc = enh_desc_init_tx_desc,
+ .get_tx_owner = enh_desc_get_tx_owner,
+ .get_rx_owner = enh_desc_get_rx_owner,
+ .release_tx_desc = enh_desc_release_tx_desc,
+ .prepare_tx_desc = enh_desc_prepare_tx_desc,
+ .clear_tx_ic = enh_desc_clear_tx_ic,
+ .close_tx_desc = enh_desc_close_tx_desc,
+ .get_tx_ls = enh_desc_get_tx_ls,
+ .set_tx_owner = enh_desc_set_tx_owner,
+ .set_rx_owner = enh_desc_set_rx_owner,
+ .get_rx_frame_len = enh_desc_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
new file mode 100644
index 0000000..31ad536
--- /dev/null
+++ b/drivers/net/stmmac/norm_desc.c
@@ -0,0 +1,236 @@
+/*******************************************************************************
+ This contains the functions to handle the normal descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "common.h"
+
+static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.tx.error_summary)) {
+ if (unlikely(p->des01.tx.underflow_error)) {
+ x->tx_underflow++;
+ stats->tx_fifo_errors++;
+ }
+ if (unlikely(p->des01.tx.no_carrier)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.tx.loss_carrier)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((p->des01.tx.excessive_deferral) ||
+ (p->des01.tx.excessive_collisions) ||
+ (p->des01.tx.late_collision)))
+ stats->collisions += p->des01.tx.collision_count;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.heartbeat_fail)) {
+ x->tx_heartbeat++;
+ stats->tx_heartbeat_errors++;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.deferred))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int ndesc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.tx.buffer1_size;
+}
+
+/* This function verifies if each incoming frame has some errors
+ * and, if required, updates the multicast statistics.
+ * In case of success, it returns csum_none becasue the device
+ * is not able to compute the csum in HW. */
+static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = csum_none;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.rx.last_descriptor == 0)) {
+ pr_warning("ndesc Error: Oversized Ethernet "
+ "frame spanned multiple buffers\n");
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(p->des01.rx.error_summary)) {
+ if (unlikely(p->des01.rx.descriptor_error))
+ x->rx_desc++;
+ if (unlikely(p->des01.rx.partial_frame_error))
+ x->rx_partial++;
+ if (unlikely(p->des01.rx.run_frame))
+ x->rx_runt++;
+ if (unlikely(p->des01.rx.frame_too_long))
+ x->rx_toolong++;
+ if (unlikely(p->des01.rx.collision)) {
+ x->rx_collision++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.rx.crc_error)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.dribbling))
+ ret = discard_frame;
+
+ if (unlikely(p->des01.rx.length_error)) {
+ x->rx_length++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.mii_error)) {
+ x->rx_mii++;
+ ret = discard_frame;
+ }
+ if (p->des01.rx.multicast_frame) {
+ x->rx_multicast++;
+ stats->multicast++;
+ }
+ return ret;
+}
+
+static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.rx.own = 1;
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.rx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.rx.disable_ic = 1;
+ p++;
+ }
+}
+
+static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.tx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.tx.end_ring = 1;
+ p++;
+ }
+}
+
+static int ndesc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.tx.own;
+}
+
+static int ndesc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.rx.own;
+}
+
+static void ndesc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.tx.own = 1;
+}
+
+static void ndesc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.rx.own = 1;
+}
+
+static int ndesc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.tx.last_segment;
+}
+
+static void ndesc_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.tx.end_ring;
+
+ /* clean field used within the xmit */
+ p->des01.tx.first_segment = 0;
+ p->des01.tx.last_segment = 0;
+ p->des01.tx.buffer1_size = 0;
+
+ /* clean status reported */
+ p->des01.tx.error_summary = 0;
+ p->des01.tx.underflow_error = 0;
+ p->des01.tx.no_carrier = 0;
+ p->des01.tx.loss_carrier = 0;
+ p->des01.tx.excessive_deferral = 0;
+ p->des01.tx.excessive_collisions = 0;
+ p->des01.tx.late_collision = 0;
+ p->des01.tx.heartbeat_fail = 0;
+ p->des01.tx.deferred = 0;
+
+ /* set termination field */
+ p->des01.tx.end_ring = ter;
+}
+
+static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.tx.first_segment = is_fs;
+ p->des01.tx.buffer1_size = len;
+}
+
+static void ndesc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.tx.interrupt = 0;
+}
+
+static void ndesc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.tx.last_segment = 1;
+ p->des01.tx.interrupt = 1;
+}
+
+static int ndesc_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.rx.frame_length;
+}
+
+struct stmmac_desc_ops ndesc_ops = {
+ .tx_status = ndesc_get_tx_status,
+ .rx_status = ndesc_get_rx_status,
+ .get_tx_len = ndesc_get_tx_len,
+ .init_rx_desc = ndesc_init_rx_desc,
+ .init_tx_desc = ndesc_init_tx_desc,
+ .get_tx_owner = ndesc_get_tx_owner,
+ .get_rx_owner = ndesc_get_rx_owner,
+ .release_tx_desc = ndesc_release_tx_desc,
+ .prepare_tx_desc = ndesc_prepare_tx_desc,
+ .clear_tx_ic = ndesc_clear_tx_ic,
+ .close_tx_desc = ndesc_close_tx_desc,
+ .get_tx_ls = ndesc_get_tx_ls,
+ .set_tx_owner = ndesc_set_tx_owner,
+ .set_rx_owner = ndesc_set_rx_owner,
+ .get_rx_frame_len = ndesc_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index ba35e69..ebebc64 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,14 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Jan_2010"
+#define DRV_MODULE_VERSION "Apr_2010"
#include <linux/stmmac.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define STMMAC_VLAN_TAG_USED
-#include <linux/if_vlan.h>
-#endif
-
#include "common.h"
#ifdef CONFIG_STMMAC_TIMER
#include "stmmac_timer.h"
@@ -93,6 +88,7 @@ struct stmmac_priv {
#ifdef STMMAC_VLAN_TAG_USED
struct vlan_group *vlgrp;
#endif
+ int enh_desc;
};
#ifdef CONFIG_STM_DRIVERS
@@ -120,3 +116,5 @@ static inline int stmmac_claim_resource(struct platform_device *pdev)
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+extern struct stmmac_desc_ops enh_desc_ops;
+extern struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index c021eaa..f080509 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -102,7 +102,6 @@ void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strcpy(info->version, DRV_MODULE_VERSION);
info->fw_version[0] = '\0';
info->n_stats = STMMAC_STATS_LEN;
- return;
}
int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -194,8 +193,6 @@ void stmmac_ethtool_gregs(struct net_device *dev,
reg_space[i + 55] =
readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
}
-
- return;
}
int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
@@ -233,7 +230,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
pause->tx_pause = 1;
spin_unlock(&priv->lock);
- return;
}
static int
@@ -292,8 +288,6 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
}
-
- return;
}
static int stmmac_get_sset_count(struct net_device *netdev, int sset)
@@ -323,7 +317,6 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
WARN_ON(1);
break;
}
- return;
}
/* Currently only support WOL through Magic packet. */
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 4111a85..a31d580 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -169,8 +169,6 @@ static void stmmac_verify_args(void)
flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
-
- return;
}
#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
@@ -184,7 +182,6 @@ static void print_pkt(unsigned char *buf, int len)
pr_info(" %02x", buf[j]);
}
pr_info("\n");
- return;
}
#endif
@@ -514,7 +511,6 @@ static void init_dma_desc_rings(struct net_device *dev)
pr_info("TX descriptor ring:\n");
display_ring(priv->dma_tx, txsize);
}
- return;
}
static void dma_free_rx_skbufs(struct stmmac_priv *priv)
@@ -529,7 +525,6 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv)
}
priv->rx_skbuff[i] = NULL;
}
- return;
}
static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -547,7 +542,6 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
priv->tx_skbuff[i] = NULL;
}
}
- return;
}
static void free_dma_desc_resources(struct stmmac_priv *priv)
@@ -567,8 +561,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
kfree(priv->rx_skbuff_dma);
kfree(priv->rx_skbuff);
kfree(priv->tx_skbuff);
-
- return;
}
/**
@@ -598,8 +590,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
}
tx_coe = priv->tx_coe;
-
- return;
}
/**
@@ -675,7 +665,6 @@ static void stmmac_tx(struct stmmac_priv *priv)
}
netif_tx_unlock(priv->dev);
}
- return;
}
static inline void stmmac_enable_irq(struct stmmac_priv *priv)
@@ -731,8 +720,6 @@ void stmmac_schedule(struct net_device *dev)
priv->xstats.sched_timer_n++;
_stmmac_schedule(priv);
-
- return;
}
static void stmmac_no_timer_started(unsigned int x)
@@ -763,8 +750,6 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
-
- return;
}
@@ -788,8 +773,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
stmmac_tx_err(priv);
} else if (unlikely(status == tx_hard_error))
stmmac_tx_err(priv);
-
- return;
}
/**
@@ -837,7 +820,7 @@ static int stmmac_open(struct net_device *dev)
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
- pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
+ pr_err("%s: ERROR: timer memory alloc failed\n", __func__);
return -ENOMEM;
}
priv->tm->freq = tmrate;
@@ -1197,7 +1180,6 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
priv->hw->desc->set_rx_owner(p + entry);
}
- return;
}
static int stmmac_rx(struct stmmac_priv *priv, int limit)
@@ -1280,7 +1262,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
- priv->dev->last_rx = jiffies;
}
entry = next_entry;
p = p_next; /* use prefetched values */
@@ -1332,7 +1313,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
/* Clear Tx resources and restart transmitting again */
stmmac_tx_err(priv);
- return;
}
/* Configuration changes (passed on by ifconfig) */
@@ -1374,7 +1354,6 @@ static void stmmac_multicast_list(struct net_device *dev)
spin_lock(&priv->lock);
priv->hw->mac->set_filter(dev);
spin_unlock(&priv->lock);
- return;
}
/**
@@ -1490,8 +1469,6 @@ static void stmmac_vlan_rx_register(struct net_device *dev,
spin_lock(&priv->lock);
priv->vlgrp = grp;
spin_unlock(&priv->lock);
-
- return;
}
#endif
@@ -1587,6 +1564,12 @@ static int stmmac_mac_device_setup(struct net_device *dev)
else
device = dwmac100_setup(ioaddr);
+ if (priv->enh_desc) {
+ device->desc = &enh_desc_ops;
+ pr_info("\tEnhanced descriptor structure\n");
+ } else
+ device->desc = &ndesc_ops;
+
if (!device)
return -ENOMEM;
@@ -1727,6 +1710,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
+ priv->enh_desc = plat_dat->enh_desc;
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
index 679f61f..2a0e1ab 100644
--- a/drivers/net/stmmac/stmmac_timer.c
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -31,8 +31,6 @@ static void stmmac_timer_handler(void *data)
struct net_device *dev = (struct net_device *)data;
stmmac_schedule(dev);
-
- return;
}
#define STMMAC_TIMER_MSG(timer, freq) \
@@ -47,13 +45,11 @@ static void stmmac_rtc_start(unsigned int new_freq)
{
rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
- return;
}
static void stmmac_rtc_stop(void)
{
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
- return;
}
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
@@ -102,13 +98,11 @@ static void stmmac_tmu_start(unsigned int new_freq)
{
clk_set_rate(timer_clock, new_freq);
clk_enable(timer_clock);
- return;
}
static void stmmac_tmu_stop(void)
{
clk_disable(timer_clock);
- return;
}
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c
index 87a6b8e..d85f0a8 100644
--- a/drivers/net/stnic.c
+++ b/drivers/net/stnic.c
@@ -280,7 +280,6 @@ stnic_init (struct net_device *dev)
{
stnic_reset (dev);
NS8390_init (dev, 0);
- return;
}
static void __exit stnic_cleanup(void)
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 8b28c89..1513123 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -412,7 +412,7 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
@@ -536,9 +536,9 @@ static int init586(struct net_device *dev)
mc_cmd->mc_cnt = swab16(num_addrs * 6);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy((char *) mc_cmd->mc_list[i++],
- dmi->dmi_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd_cuc = CUC_START;
@@ -985,7 +985,7 @@ static void sun3_82586_timeout(struct net_device *dev)
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_SCB_CMD();
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
return 0;
}
#endif
@@ -998,7 +998,7 @@ static void sun3_82586_timeout(struct net_device *dev)
sun3_82586_close(dev);
sun3_82586_open(dev);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
/******************************************************
@@ -1062,7 +1062,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
}
sun3_attn586();
- dev->trans_start = jiffies;
if(!i)
dev_kfree_skb(skb);
WAIT_4_SCB_CMD();
@@ -1082,7 +1081,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
# endif
@@ -1097,7 +1095,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
{
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 1694ca5..358c22f 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -523,8 +523,8 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
/* Transmitter timeout, serious problems. */
if (netif_queue_stopped(dev)) {
- int tickssofar = jiffies - dev->trans_start;
- if (tickssofar < 20)
+ int tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/5)
return NETDEV_TX_BUSY;
DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
@@ -559,7 +559,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
netif_start_queue(dev);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -637,8 +636,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
AREG = CSR0;
DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
dev->name, DREG ));
- dev->trans_start = jiffies;
- dev_kfree_skb( skb );
+ dev_kfree_skb(skb);
lp->lock = 0;
if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index ed7865a0..4591fe9 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -362,7 +362,7 @@ static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
default:
printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
return;
- };
+ }
idle_transceiver(tregs);
write_tcvr_bit(bp, tregs, 0);
@@ -401,7 +401,7 @@ static unsigned short bigmac_tcvr_read(struct bigmac *bp,
default:
printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
return 0xffff;
- };
+ }
idle_transceiver(tregs);
write_tcvr_bit(bp, tregs, 0);
@@ -982,8 +982,6 @@ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -999,7 +997,7 @@ static void bigmac_set_multicast(struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
void __iomem *bregs = bp->bregs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i;
u32 tmp, crc;
@@ -1028,8 +1026,8 @@ static void bigmac_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 8249a39..2678588 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -788,7 +788,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
mdio_delay();
}
- return;
}
static int mdio_wait_link(struct net_device *dev, int wait)
@@ -972,7 +971,7 @@ static void tx_timeout(struct net_device *dev)
dev->if_port = 0;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
netif_wake_queue(dev);
@@ -1022,7 +1021,6 @@ static void init_ring(struct net_device *dev)
np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = 0;
}
- return;
}
static void tx_poll (unsigned long data)
@@ -1049,7 +1047,6 @@ static void tx_poll (unsigned long data)
if (ioread32 (np->base + TxListPtr) == 0)
iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
np->base + TxListPtr);
- return;
}
static netdev_tx_t
@@ -1084,7 +1081,6 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
} else {
netif_stop_queue (dev);
}
- dev->trans_start = jiffies;
if (netif_msg_tx_queued(np)) {
printk (KERN_DEBUG
"%s: Transmit frame #%d queued in slot %d.\n",
@@ -1379,7 +1375,6 @@ not_done:
if (np->budget <= 0)
np->budget = RX_BUDGET;
tasklet_schedule(&np->rx_tasklet);
- return;
}
static void refill_rx (struct net_device *dev)
@@ -1410,7 +1405,6 @@ static void refill_rx (struct net_device *dev)
np->rx_ring[entry].status = 0;
cnt++;
}
- return;
}
static void netdev_error(struct net_device *dev, int intr_status)
{
@@ -1522,13 +1516,13 @@ static void set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int bit;
int index;
int crc;
memset (mc_filter, 0, sizeof (mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
if (crc & 0x80000000) index |= 1 << bit;
mc_filter[index/16] |= (1 << (index % 16));
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index e6880f1..434f9d7 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1136,7 +1136,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
writel(gp->tx_new, gp->regs + TXDMA_KICK);
spin_unlock_irqrestore(&gp->tx_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
return NETDEV_TX_OK;
}
@@ -1846,12 +1846,12 @@ static u32 gem_setup_multicast(struct gem *gp)
} else {
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, gp->dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, gp->dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -2923,7 +2923,6 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
get_random_bytes(dev_addr + 3, 3);
- return;
}
#endif /* not Sparc and not PPC */
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index b17dbb1..915c590 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -855,7 +855,7 @@ static void happy_meal_timer(unsigned long data)
hp->timer_ticks = 0;
hp->timer_state = asleep; /* foo on you */
break;
- };
+ }
if (restart_timer) {
hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
@@ -1488,7 +1488,7 @@ static int happy_meal_init(struct happy_meal *hp)
HMD(("external, disable MII, "));
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
- };
+ }
if (happy_meal_tcvr_reset(hp, tregs))
return -EAGAIN;
@@ -1523,13 +1523,13 @@ static int happy_meal_init(struct happy_meal *hp)
hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
} else if ((hp->dev->flags & IFF_PROMISC) == 0) {
u16 hash_table[4];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, hp->dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, hp->dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -1734,7 +1734,7 @@ static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
case external:
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
- };
+ }
if (happy_meal_tcvr_reset(hp, tregs))
return;
@@ -2341,8 +2341,6 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
spin_unlock_irq(&hp->happy_lock);
- dev->trans_start = jiffies;
-
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
return NETDEV_TX_OK;
}
@@ -2362,7 +2360,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
void __iomem *bregs = hp->bigmacregs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -2380,8 +2378,8 @@ static void happy_meal_set_multicast(struct net_device *dev)
u16 hash_table[4];
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -2945,7 +2943,6 @@ static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
get_random_bytes(&dev_addr[3], 3);
- return;
}
#endif /* !(CONFIG_SPARC) */
@@ -3004,7 +3001,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
dev->base_addr = (long) pdev;
hp = netdev_priv(dev);
- memset(hp, 0, sizeof(*hp));
hp->happy_dev = pdev;
hp->dma_dev = &pdev->dev;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 0c21653..386af7b 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1003,7 +1003,7 @@ static int lance_reset(struct net_device *dev)
}
lp->init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
@@ -1054,7 +1054,7 @@ static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int l
}
src = (char *) p16;
break;
- };
+ }
if (len >= 2) {
u16 val = src[0] << 8 | src[1];
sbus_writew(val, piobuf);
@@ -1160,7 +1160,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq(&lp->lock);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1170,7 +1169,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
u32 val;
@@ -1195,8 +1194,8 @@ static void lance_load_multicast(struct net_device *dev)
return;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index be637dc..a7542d2 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -602,7 +602,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
qep->tx_new = NEXT_TX(entry);
/* Get it going. */
- dev->trans_start = jiffies;
sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
dev->stats.tx_packets++;
@@ -627,7 +626,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void qe_set_multicast(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u8 new_mconfig = qep->mconfig;
char *addrs;
int i;
@@ -651,8 +650,8 @@ static void qe_set_multicast(struct net_device *dev)
u8 *hbytes = (unsigned char *) &hash_table[0];
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 6b1b7ce..d281a7b 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -717,7 +717,6 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
out_dropped_unlock:
@@ -763,12 +762,12 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
static void __update_mc_list(struct vnet *vp, struct net_device *dev)
{
- struct dev_addr_list *p;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(p, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
struct vnet_mcast_entry *m;
- m = __vnet_mc_find(vp, p->dmi_addr);
+ m = __vnet_mc_find(vp, ha->addr);
if (m) {
m->hit = 1;
continue;
@@ -778,7 +777,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev)
m = kzalloc(sizeof(*m), GFP_ATOMIC);
if (!m)
continue;
- memcpy(m->addr, p->dmi_addr, ETH_ALEN);
+ memcpy(m->addr, ha->addr, ETH_ALEN);
m->hit = 1;
m->next = vp->mcast_list;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 49bd84c..be08b75 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1357,8 +1357,6 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
}
lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
- dev->trans_start = jiffies;
-
/* If we just used up the very last entry in the
* TX ring on this device, tell the queueing
* layer to send no more.
@@ -1954,16 +1952,16 @@ tc35815_set_multicast_list(struct net_device *dev)
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
tc_writel(0, &tr->CAM_Ctl);
/* Walk the address list, and load the filter */
i = 0;
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* entry 0,1 is reserved. */
- tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr);
+ tc35815_set_cam_entry(dev, i + 2, ha->addr);
ena_bits |= CAM_Ena_Bit(i + 2);
i++;
}
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index f549309..20ab161 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -808,7 +808,7 @@ static void bdx_setmulti(struct net_device *ndev)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
} else if (!netdev_mc_empty(ndev)) {
u8 hash;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 reg, val;
/* set IMF to deny all multicast frames */
@@ -825,10 +825,10 @@ static void bdx_setmulti(struct net_device *ndev)
* into RX_MAC_MCST regs. we skip this phase now and accept ALL
* multicast frames throu IMF */
/* accept the rest of addresses throu IMF */
- netdev_for_each_mc_addr(mclist, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
hash = 0;
for (i = 0; i < ETH_ALEN; i++)
- hash ^= mclist->dmi_addr[i];
+ hash ^= ha->addr[i];
reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
val = READ_REG(priv, reg);
val |= (1 << (hash % 32));
@@ -1303,7 +1303,6 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
priv->net_stats.rx_bytes += len;
skb_put(skb, len);
- skb->dev = priv->ndev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, priv->ndev);
@@ -1509,7 +1508,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
- db->wptr->len = skb->len - skb->data_len;
+ db->wptr->len = skb_headlen(skb);
db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
db->wptr->len, PCI_DMA_TODEVICE);
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
@@ -2034,7 +2033,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/************** priv ****************/
priv = nic->priv[port] = netdev_priv(ndev);
- memset(priv, 0, sizeof(struct bdx_priv));
priv->pBdxRegs = nic->regs + port * 0x8000;
priv->port = port;
priv->pdev = pdev;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ecc41cf..573054a 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
-#define DRV_MODULE_VERSION "3.108"
-#define DRV_MODULE_RELDATE "February 17, 2010"
+#define DRV_MODULE_VERSION "3.110"
+#define DRV_MODULE_RELDATE "April 9, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -101,7 +101,7 @@
#define TG3_DEF_RX_RING_PENDING 200
#define TG3_RX_JUMBO_RING_SIZE 256
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
-#define TG3_RSS_INDIR_TBL_SIZE 128
+#define TG3_RSS_INDIR_TBL_SIZE 128
/* Do not place this n-ring entries value into the tp struct itself,
* we really want to expose these constants to GCC so that modulo et
@@ -126,6 +126,9 @@
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
+#define TG3_RX_DMA_ALIGN 16
+#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
+
#define TG3_DMA_BYTE_ENAB 64
#define TG3_RX_STD_DMA_SZ 1536
@@ -142,6 +145,26 @@
#define TG3_RX_JMB_BUFF_RING_SIZE \
(sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
+#define TG3_RSS_MIN_NUM_MSIX_VECS 2
+
+/* Due to a hardware bug, the 5701 can only DMA to memory addresses
+ * that are at least dword aligned when used in PCIX mode. The driver
+ * works around this bug by double copying the packet. This workaround
+ * is built into the normal double copy length check for efficiency.
+ *
+ * However, the double copy is only necessary on those architectures
+ * where unaligned memory accesses are inefficient. For those architectures
+ * where unaligned memory accesses incur little penalty, we can reintegrate
+ * the 5701 in the normal rx path. Doing so saves a device structure
+ * dereference by hardcoding the double copy threshold in place.
+ */
+#define TG3_RX_COPY_THRESHOLD 256
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
+#else
+ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
+#endif
+
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
@@ -152,6 +175,8 @@
#define TG3_NUM_TEST 6
+#define TG3_FW_UPDATE_TIMEOUT_SEC 5
+
#define FIRMWARE_TG3 "tigon/tg3.bin"
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -167,8 +192,6 @@ MODULE_FIRMWARE(FIRMWARE_TG3);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
-#define TG3_RSS_MIN_NUM_MSIX_VECS 2
-
static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
@@ -360,7 +383,7 @@ static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_read32(struct tg3 *tp, u32 off)
{
- return (readl(tp->regs + off));
+ return readl(tp->regs + off);
}
static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
@@ -370,7 +393,7 @@ static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
{
- return (readl(tp->aperegs + off));
+ return readl(tp->aperegs + off);
}
static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
@@ -488,7 +511,7 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
{
- return (readl(tp->regs + off + GRCMBOX_BASE));
+ return readl(tp->regs + off + GRCMBOX_BASE);
}
static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
@@ -496,16 +519,16 @@ static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
writel(val, tp->regs + off + GRCMBOX_BASE);
}
-#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
+#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
-#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
-#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
-#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
+#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
+#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
+#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
-#define tw32(reg,val) tp->write32(tp, reg, val)
-#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
-#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
-#define tr32(reg) tp->read32(tp, reg)
+#define tw32(reg, val) tp->write32(tp, reg, val)
+#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
+#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
+#define tr32(reg) tp->read32(tp, reg)
static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
{
@@ -579,11 +602,11 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return 0;
switch (locknum) {
- case TG3_APE_LOCK_GRC:
- case TG3_APE_LOCK_MEM:
- break;
- default:
- return -EINVAL;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return -EINVAL;
}
off = 4 * locknum;
@@ -617,11 +640,11 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
switch (locknum) {
- case TG3_APE_LOCK_GRC:
- case TG3_APE_LOCK_MEM:
- break;
- default:
- return;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return;
}
off = 4 * locknum;
@@ -651,6 +674,7 @@ static void tg3_enable_ints(struct tg3 *tp)
tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
+
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -1098,7 +1122,7 @@ static int tg3_mdio_init(struct tg3 *tp)
i = mdiobus_register(tp->mdio_bus);
if (i) {
- netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i);
+ dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
mdiobus_free(tp->mdio_bus);
return i;
}
@@ -1106,7 +1130,7 @@ static int tg3_mdio_init(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
if (!phydev || !phydev->drv) {
- netdev_warn(tp->dev, "No PHY devices\n");
+ dev_warn(&tp->pdev->dev, "No PHY devices\n");
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
return -ENODEV;
@@ -1437,7 +1461,7 @@ static void tg3_adjust_link(struct net_device *dev)
phydev->speed != tp->link_config.active_speed ||
phydev->duplex != tp->link_config.active_duplex ||
oldflowctrl != tp->link_config.active_flowctrl)
- linkmesg = 1;
+ linkmesg = 1;
tp->link_config.active_speed = phydev->speed;
tp->link_config.active_duplex = phydev->duplex;
@@ -1464,7 +1488,7 @@ static int tg3_phy_init(struct tg3 *tp)
phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
phydev->dev_flags, phydev->interface);
if (IS_ERR(phydev)) {
- netdev_err(tp->dev, "Could not attach to PHY\n");
+ dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -1855,8 +1879,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
/* Set Extended packet length bit for jumbo frames */
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
- }
- else {
+ } else {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
}
@@ -1974,8 +1997,7 @@ out:
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
- }
- else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
+ } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
@@ -2007,8 +2029,8 @@ out:
u32 phy_reg;
if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
- tg3_writephy(tp, MII_TG3_EXT_CTRL,
- phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -3425,7 +3447,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
ap->rxconfig = rx_cfg_reg;
ret = ANEG_OK;
- switch(ap->state) {
+ switch (ap->state) {
case ANEG_STATE_UNKNOWN:
if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
ap->state = ANEG_STATE_AN_ENABLE;
@@ -3463,11 +3485,10 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
/* fallthru */
case ANEG_STATE_RESTART:
delta = ap->cur_time - ap->link_time;
- if (delta > ANEG_STATE_SETTLE_TIME) {
+ if (delta > ANEG_STATE_SETTLE_TIME)
ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
- } else {
+ else
ret = ANEG_TIMER_ENAB;
- }
break;
case ANEG_STATE_DISABLE_LINK_OK:
@@ -3491,9 +3512,8 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
break;
case ANEG_STATE_ABILITY_DETECT:
- if (ap->ability_match != 0 && ap->rxconfig != 0) {
+ if (ap->ability_match != 0 && ap->rxconfig != 0)
ap->state = ANEG_STATE_ACK_DETECT_INIT;
- }
break;
case ANEG_STATE_ACK_DETECT_INIT:
@@ -4171,9 +4191,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
- }
- else
+ } else {
current_link_up = 0;
+ }
}
}
@@ -4211,6 +4231,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
tp->serdes_counter--;
return;
}
+
if (!netif_carrier_ok(tp->dev) &&
(tp->link_config.autoneg == AUTONEG_ENABLE)) {
u32 bmcr;
@@ -4240,10 +4261,9 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
}
}
- }
- else if (netif_carrier_ok(tp->dev) &&
- (tp->link_config.autoneg == AUTONEG_ENABLE) &&
- (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
+ } else if (netif_carrier_ok(tp->dev) &&
+ (tp->link_config.autoneg == AUTONEG_ENABLE) &&
+ (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
u32 phy2;
/* Select expansion interrupt status register */
@@ -4266,13 +4286,12 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
{
int err;
- if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
err = tg3_setup_fiber_phy(tp, force_reset);
- } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+ else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
err = tg3_setup_fiber_mii_phy(tp, force_reset);
- } else {
+ else
err = tg3_setup_copper_phy(tp, force_reset);
- }
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
u32 val, scale;
@@ -4335,8 +4354,11 @@ static void tg3_tx_recover(struct tg3 *tp)
BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
tp->write32_tx_mbox == tg3_write_indirect_mbox);
- netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
- "Please report the problem to the driver maintainer and include system chipset information.\n");
+ netdev_warn(tp->dev,
+ "The system may be re-ordering memory-mapped I/O "
+ "cycles to the network device, attempting to recover. "
+ "Please report the problem to the driver maintainer "
+ "and include system chipset information.\n");
spin_lock(&tp->lock);
tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
@@ -4378,7 +4400,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
pci_unmap_single(tp->pdev,
- pci_unmap_addr(ri, mapping),
+ dma_unmap_addr(ri, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
@@ -4392,7 +4414,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
tx_bug = 1;
pci_unmap_page(tp->pdev,
- pci_unmap_addr(ri, mapping),
+ dma_unmap_addr(ri, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
sw_idx = NEXT_TX(sw_idx);
@@ -4430,7 +4452,7 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
if (!ri->skb)
return;
- pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
+ pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
map_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(ri->skb);
ri->skb = NULL;
@@ -4496,7 +4518,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
}
map->skb = skb;
- pci_unmap_addr_set(map, mapping, mapping);
+ dma_unmap_addr_set(map, mapping, mapping);
desc->addr_hi = ((u64)mapping >> 32);
desc->addr_lo = ((u64)mapping & 0xffffffff);
@@ -4516,8 +4538,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3 *tp = tnapi->tp;
struct tg3_rx_buffer_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map;
- int dest_idx;
struct tg3_rx_prodring_set *spr = &tp->prodring[0];
+ int dest_idx;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
@@ -4541,8 +4563,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
}
dest_map->skb = src_map->skb;
- pci_unmap_addr_set(dest_map, mapping,
- pci_unmap_addr(src_map, mapping));
+ dma_unmap_addr_set(dest_map, mapping,
+ dma_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
dest_desc->addr_lo = src_desc->addr_lo;
@@ -4605,18 +4627,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ bool hw_vlan __maybe_unused = false;
+ u16 vtag __maybe_unused = 0;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
ri = &tp->prodring[0].rx_std_buffers[desc_idx];
- dma_addr = pci_unmap_addr(ri, mapping);
+ dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
- dma_addr = pci_unmap_addr(ri, mapping);
+ dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &jmb_prod_idx;
} else
@@ -4638,12 +4662,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
ETH_FCS_LEN;
- if (len > RX_COPY_THRESHOLD &&
- tp->rx_offset == NET_IP_ALIGN) {
- /* rx_offset will likely not equal NET_IP_ALIGN
- * if this is a 5701 card running in PCI-X mode
- * [see tg3_get_invariants()]
- */
+ if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
@@ -4668,12 +4687,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev,
- len + TG3_RAW_IP_ALIGN);
+ copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+ TG3_RAW_IP_ALIGN);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4699,12 +4718,29 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
goto next_pkt;
}
+ if (desc->type_flags & RXD_FLAG_VLAN &&
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
+ vtag = desc->err_vlan & RXD_VLAN_MASK;
#if TG3_VLAN_TAG_USED
- if (tp->vlgrp != NULL &&
- desc->type_flags & RXD_FLAG_VLAN) {
- vlan_gro_receive(&tnapi->napi, tp->vlgrp,
- desc->err_vlan & RXD_VLAN_MASK, skb);
- } else
+ if (tp->vlgrp)
+ hw_vlan = true;
+ else
+#endif
+ {
+ struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
+ __skb_push(skb, VLAN_HLEN);
+
+ memmove(ve, skb->data + VLAN_HLEN,
+ ETH_ALEN * 2);
+ ve->h_vlan_proto = htons(ETH_P_8021Q);
+ ve->h_vlan_TCI = htons(vtag);
+ }
+ }
+
+#if TG3_VLAN_TAG_USED
+ if (hw_vlan)
+ vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
+ else
#endif
napi_gro_receive(&tnapi->napi, skb);
@@ -4978,7 +5014,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
if (unlikely(work_done >= budget))
break;
- /* tp->last_tag is used in tg3_restart_ints() below
+ /* tp->last_tag is used in tg3_int_reenable() below
* to tell the hw how much work has been processed,
* so we must read it before checking for more work.
*/
@@ -4987,8 +5023,8 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
rmb();
/* check for RX/TX work to do */
- if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
- *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
+ if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
+ *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
napi_complete(napi);
/* Reenable interrupts. */
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -5260,7 +5296,8 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
err = tg3_init_hw(tp, reset_phy);
if (err) {
- netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
+ netdev_err(tp->dev,
+ "Failed to re-initialize device, aborting\n");
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
@@ -5437,12 +5474,12 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
len = skb_shinfo(skb)->frags[i-1].size;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
len, PCI_DMA_TODEVICE);
if (i == 0) {
tnapi->tx_buffers[entry].skb = new_skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
new_addr);
} else {
tnapi->tx_buffers[entry].skb = NULL;
@@ -5492,7 +5529,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
struct netdev_queue *txq;
unsigned int i, last;
-
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5508,7 +5544,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5552,9 +5589,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
tcp_hdr(skb)->check = 0;
- }
- else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
base_flags |= TXD_FLAG_TCPUDP_CSUM;
+ }
+
#if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
@@ -5571,7 +5609,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
}
tnapi->tx_buffers[entry].skb = skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
!mss && skb->len > ETH_DATA_LEN)
@@ -5597,7 +5635,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
goto dma_error;
tnapi->tx_buffers[entry].skb = NULL;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
mapping);
tg3_set_txd(tnapi, entry, mapping, len,
@@ -5627,7 +5665,7 @@ dma_error:
entry = tnapi->tx_prod;
tnapi->tx_buffers[entry].skb = NULL;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+ dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
for (i = 0; i <= last; i++) {
@@ -5635,7 +5673,7 @@ dma_error:
entry = NEXT_TX(entry);
pci_unmap_page(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
frag->size, PCI_DMA_TODEVICE);
}
@@ -5695,7 +5733,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
struct netdev_queue *txq;
unsigned int i, last;
-
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5711,7 +5748,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5737,7 +5775,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
hdr_len = ip_tcp_len + tcp_opt_len;
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
(tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
- return (tg3_tso_bug(tp, skb));
+ return tg3_tso_bug(tp, skb);
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
@@ -5797,7 +5835,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
}
tnapi->tx_buffers[entry].skb = skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
would_hit_hwbug = 0;
@@ -5833,7 +5871,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
len, PCI_DMA_TODEVICE);
tnapi->tx_buffers[entry].skb = NULL;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
mapping);
if (pci_dma_mapping_error(tp->pdev, mapping))
goto dma_error;
@@ -5898,7 +5936,7 @@ dma_error:
entry = tnapi->tx_prod;
tnapi->tx_buffers[entry].skb = NULL;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+ dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
for (i = 0; i <= last; i++) {
@@ -5906,7 +5944,7 @@ dma_error:
entry = NEXT_TX(entry);
pci_unmap_page(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
frag->size, PCI_DMA_TODEVICE);
}
@@ -5924,9 +5962,9 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
ethtool_op_set_tso(dev, 0);
- }
- else
+ } else {
tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
+ }
} else {
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
@@ -6007,7 +6045,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
}
}
-/* Initialize tx/rx rings for packet processing.
+/* Initialize rx rings for packet processing.
*
* The chip has been shut down and the driver detached from
* the networking, so no interrupts or new tx packets will
@@ -6058,8 +6096,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
- netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n",
- i, tp->rx_pending);
+ netdev_warn(tp->dev,
+ "Using a smaller RX standard ring. Only "
+ "%d out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_pending);
if (i == 0)
goto initfail;
tp->rx_pending = i;
@@ -6088,8 +6128,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
for (i = 0; i < tp->rx_jumbo_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
- netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n",
- i, tp->rx_jumbo_pending);
+ netdev_warn(tp->dev,
+ "Using a smaller RX jumbo ring. Only %d "
+ "out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_jumbo_pending);
if (i == 0)
goto initfail;
tp->rx_jumbo_pending = i;
@@ -6187,7 +6229,7 @@ static void tg3_free_rings(struct tg3 *tp)
}
pci_unmap_single(tp->pdev,
- pci_unmap_addr(txp, mapping),
+ dma_unmap_addr(txp, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
txp->skb = NULL;
@@ -6197,7 +6239,7 @@ static void tg3_free_rings(struct tg3 *tp)
for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
pci_unmap_page(tp->pdev,
- pci_unmap_addr(txp, mapping),
+ dma_unmap_addr(txp, mapping),
skb_shinfo(skb)->frags[k].size,
PCI_DMA_TODEVICE);
i++;
@@ -6433,8 +6475,9 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
}
if (i == MAX_WAIT_CNT && !silent) {
- pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
- ofs, enable_bit);
+ dev_err(&tp->pdev->dev,
+ "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
+ ofs, enable_bit);
return -ENODEV;
}
@@ -6480,8 +6523,9 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
break;
}
if (i >= MAX_WAIT_CNT) {
- netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
- __func__, tr32(MAC_TX_MODE));
+ dev_err(&tp->pdev->dev,
+ "%s timed out, TX_MODE_ENABLE will not clear "
+ "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
err |= -ENODEV;
}
@@ -6551,35 +6595,35 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
return;
switch (kind) {
- case RESET_KIND_INIT:
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
- APE_HOST_SEG_SIG_MAGIC);
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
- APE_HOST_SEG_LEN_MAGIC);
- apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
- tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
- tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
- APE_HOST_DRIVER_ID_MAGIC);
- tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
- APE_HOST_BEHAV_NO_PHYLOCK);
-
- event = APE_EVENT_STATUS_STATE_START;
- break;
- case RESET_KIND_SHUTDOWN:
- /* With the interface we are currently using,
- * APE does not track driver state. Wiping
- * out the HOST SEGMENT SIGNATURE forces
- * the APE to assume OS absent status.
- */
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+ case RESET_KIND_INIT:
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+ APE_HOST_SEG_SIG_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+ APE_HOST_SEG_LEN_MAGIC);
+ apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+ tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+ APE_HOST_DRIVER_ID_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+ APE_HOST_BEHAV_NO_PHYLOCK);
+
+ event = APE_EVENT_STATUS_STATE_START;
+ break;
+ case RESET_KIND_SHUTDOWN:
+ /* With the interface we are currently using,
+ * APE does not track driver state. Wiping
+ * out the HOST SEGMENT SIGNATURE forces
+ * the APE to assume OS absent status.
+ */
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
- event = APE_EVENT_STATUS_STATE_UNLOAD;
- break;
- case RESET_KIND_SUSPEND:
- event = APE_EVENT_STATUS_STATE_SUSPEND;
- break;
- default:
- return;
+ event = APE_EVENT_STATUS_STATE_UNLOAD;
+ break;
+ case RESET_KIND_SUSPEND:
+ event = APE_EVENT_STATUS_STATE_SUSPEND;
+ break;
+ default:
+ return;
}
event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
@@ -7156,7 +7200,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
if (cpu_base == TX_CPU_BASE &&
(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n",
+ netdev_err(tp->dev,
+ "%s: Trying to load TX cpu firmware which is 5705\n",
__func__);
return -EINVAL;
}
@@ -7236,7 +7281,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n",
+ netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
+ "should be %08x\n", __func__,
tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
return -ENODEV;
}
@@ -7300,7 +7346,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n",
+ netdev_err(tp->dev,
+ "%s fails to set CPU PC, is %08x should be %08x\n",
__func__, tr32(cpu_base + CPU_PC), info.fw_base);
return -ENODEV;
}
@@ -7568,9 +7615,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
- if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
+ if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
tg3_abort_hw(tp, 1);
- }
if (reset_phy)
tg3_phy_reset(tp);
@@ -7631,6 +7677,25 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+ val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+
+ tw32(GRC_MODE, grc_mode);
+
+ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+ val |= CPMU_LSPD_10MB_MACCLK_6_25;
+ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+ }
+
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -7679,6 +7744,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
val = tr32(TG3PCI_DMA_RW_CTRL) &
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
@@ -7723,8 +7790,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
- }
- else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
+ } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
int fw_len;
fw_len = tp->fw_len;
@@ -7839,9 +7905,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
- (RX_STD_MAX_SIZE << 2);
+ (TG3_RX_STD_DMA_SZ << 2);
else
- val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
+ val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
} else
val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
@@ -8476,8 +8542,8 @@ static void tg3_timer(unsigned long __opaque)
tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
FWCMD_NICDRV_ALIVE3);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
- /* 5 seconds timeout */
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
+ TG3_FW_UPDATE_TIMEOUT_SEC);
tg3_generate_fw_event(tp);
}
@@ -8625,8 +8691,9 @@ static int tg3_test_msi(struct tg3 *tp)
return err;
/* MSI test failed, go back to INTx mode */
- netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n"
- "Please report this failure to the PCI maintainer and include system chipset information\n");
+ netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
+ "to INTx mode. Please report this failure to the PCI "
+ "maintainer and include system chipset information\n");
free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
@@ -8739,7 +8806,8 @@ static void tg3_ints_init(struct tg3 *tp)
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
- netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n");
+ netdev_warn(tp->dev,
+ "MSI without TAGGED_STATUS? Not using MSI\n");
goto defcfg;
}
@@ -8914,236 +8982,6 @@ err_out1:
return err;
}
-#if 0
-/*static*/ void tg3_dump_state(struct tg3 *tp)
-{
- u32 val32, val32_2, val32_3, val32_4, val32_5;
- u16 val16;
- int i;
- struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
-
- pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
- pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
- printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
- val16, val32);
-
- /* MAC block */
- printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
- tr32(MAC_MODE), tr32(MAC_STATUS));
- printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
- tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
- printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
- tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
- printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
- tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
-
- /* Send data initiator control block */
- printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
- tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
- printk(" SNDDATAI_STATSCTRL[%08x]\n",
- tr32(SNDDATAI_STATSCTRL));
-
- /* Send data completion control block */
- printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
-
- /* Send BD ring selector block */
- printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
- tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
-
- /* Send BD initiator control block */
- printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
- tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
-
- /* Send BD completion control block */
- printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
-
- /* Receive list placement control block */
- printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
- tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
- printk(" RCVLPC_STATSCTRL[%08x]\n",
- tr32(RCVLPC_STATSCTRL));
-
- /* Receive data and receive BD initiator control block */
- printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
- tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
-
- /* Receive data completion control block */
- printk("DEBUG: RCVDCC_MODE[%08x]\n",
- tr32(RCVDCC_MODE));
-
- /* Receive BD initiator control block */
- printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
- tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
-
- /* Receive BD completion control block */
- printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
- tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
-
- /* Receive list selector control block */
- printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
- tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
-
- /* Mbuf cluster free block */
- printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
- tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
-
- /* Host coalescing control block */
- printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
- tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
- printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
- tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
- tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
- printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
- tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
- tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
- printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
- tr32(HOSTCC_STATS_BLK_NIC_ADDR));
- printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
- tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
-
- /* Memory arbiter control block */
- printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
- tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
-
- /* Buffer manager control block */
- printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
- tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
- printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
- tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
- printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
- "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
- tr32(BUFMGR_DMA_DESC_POOL_ADDR),
- tr32(BUFMGR_DMA_DESC_POOL_SIZE));
-
- /* Read DMA control block */
- printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
- tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
-
- /* Write DMA control block */
- printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
- tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
-
- /* DMA completion block */
- printk("DEBUG: DMAC_MODE[%08x]\n",
- tr32(DMAC_MODE));
-
- /* GRC block */
- printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
- tr32(GRC_MODE), tr32(GRC_MISC_CFG));
- printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
- tr32(GRC_LOCAL_CTRL));
-
- /* TG3_BDINFOs */
- printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_JUMBO_BD + 0x0),
- tr32(RCVDBDI_JUMBO_BD + 0x4),
- tr32(RCVDBDI_JUMBO_BD + 0x8),
- tr32(RCVDBDI_JUMBO_BD + 0xc));
- printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_STD_BD + 0x0),
- tr32(RCVDBDI_STD_BD + 0x4),
- tr32(RCVDBDI_STD_BD + 0x8),
- tr32(RCVDBDI_STD_BD + 0xc));
- printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_MINI_BD + 0x0),
- tr32(RCVDBDI_MINI_BD + 0x4),
- tr32(RCVDBDI_MINI_BD + 0x8),
- tr32(RCVDBDI_MINI_BD + 0xc));
-
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
- printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4);
-
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
- printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4);
-
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
- printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4, val32_5);
-
- /* SW status block */
- printk(KERN_DEBUG
- "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
- sblk->status,
- sblk->status_tag,
- sblk->rx_jumbo_consumer,
- sblk->rx_consumer,
- sblk->rx_mini_consumer,
- sblk->idx[0].rx_producer,
- sblk->idx[0].tx_consumer);
-
- /* SW statistics block */
- printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
- ((u32 *)tp->hw_stats)[0],
- ((u32 *)tp->hw_stats)[1],
- ((u32 *)tp->hw_stats)[2],
- ((u32 *)tp->hw_stats)[3]);
-
- /* Mailboxes */
- printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
- tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
- tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
- tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
- tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
-
- /* NIC side send descriptors. */
- for (i = 0; i < 6; i++) {
- unsigned long txd;
-
- txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
- + (i * sizeof(struct tg3_tx_buffer_desc));
- printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
- i,
- readl(txd + 0x0), readl(txd + 0x4),
- readl(txd + 0x8), readl(txd + 0xc));
- }
-
- /* NIC side RX descriptors. */
- for (i = 0; i < 6; i++) {
- unsigned long rxd;
-
- rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
- + (i * sizeof(struct tg3_rx_buffer_desc));
- printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- rxd += (4 * sizeof(u32));
- printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- }
-
- for (i = 0; i < 6; i++) {
- unsigned long rxd;
-
- rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
- + (i * sizeof(struct tg3_rx_buffer_desc));
- printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- rxd += (4 * sizeof(u32));
- printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- }
-}
-#endif
-
static struct net_device_stats *tg3_get_stats(struct net_device *);
static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
@@ -9162,9 +9000,6 @@ static int tg3_close(struct net_device *dev)
tg3_phy_stop(tp);
tg3_full_lock(tp, 1);
-#if 0
- tg3_dump_state(tp);
-#endif
tg3_disable_ints(tp);
@@ -9406,9 +9241,8 @@ static inline u32 calc_crc(unsigned char *buf, int len)
reg >>= 1;
- if (tmp) {
+ if (tmp)
reg ^= 0xedb88320;
- }
}
}
@@ -9452,20 +9286,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
rx_mode |= RX_MODE_PROMISC;
} else if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast. */
- tg3_set_multi (tp, 1);
+ tg3_set_multi(tp, 1);
} else if (netdev_mc_empty(dev)) {
/* Reject all multicast. */
- tg3_set_multi (tp, 0);
+ tg3_set_multi(tp, 0);
} else {
/* Accept one or more multicast(s). */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[4] = { 0, };
u32 regidx;
u32 bit;
u32 crc;
- netdev_for_each_mc_addr(mclist, dev) {
- crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = calc_crc(ha->addr, ETH_ALEN);
bit = ~crc & 0x7f;
regidx = (bit & 0x60) >> 5;
bit &= 0x1f;
@@ -9618,7 +9452,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
memcpy(data, ((char*)&val) + b_offset, b_count);
len -= b_count;
offset += b_count;
- eeprom->len += b_count;
+ eeprom->len += b_count;
}
/* read bytes upto the last 4 byte boundary */
@@ -10166,8 +10000,8 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data)
if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
if (data != 0)
return -EINVAL;
- return 0;
- }
+ return 0;
+ }
spin_lock_bh(&tp->lock);
if (data)
@@ -10186,8 +10020,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
if (data != 0)
return -EINVAL;
- return 0;
- }
+ return 0;
+ }
if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
ethtool_op_set_tx_ipv6_csum(dev, data);
@@ -10197,7 +10031,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
return 0;
}
-static int tg3_get_sset_count (struct net_device *dev, int sset)
+static int tg3_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
@@ -10209,7 +10043,7 @@ static int tg3_get_sset_count (struct net_device *dev, int sset)
}
}
-static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
+static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
@@ -10256,7 +10090,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data)
return 0;
}
-static void tg3_get_ethtool_stats (struct net_device *dev,
+static void tg3_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats, u64 *tmp_stats)
{
struct tg3 *tp = netdev_priv(dev);
@@ -10362,8 +10196,7 @@ static int tg3_test_nvram(struct tg3 *tp)
for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
parity[k++] = buf8[i] & msk;
i++;
- }
- else if (i == 16) {
+ } else if (i == 16) {
int l;
u8 msk;
@@ -10461,7 +10294,7 @@ static int tg3_test_registers(struct tg3 *tp)
{ MAC_ADDR_0_HIGH, 0x0000,
0x00000000, 0x0000ffff },
{ MAC_ADDR_0_LOW, 0x0000,
- 0x00000000, 0xffffffff },
+ 0x00000000, 0xffffffff },
{ MAC_RX_MTU_SIZE, 0x0000,
0x00000000, 0x0000ffff },
{ MAC_TX_MODE, 0x0000,
@@ -10649,7 +10482,8 @@ static int tg3_test_registers(struct tg3 *tp)
out:
if (netif_msg_hw(tp))
- pr_err("Register test failed at offset %x\n", offset);
+ netdev_err(tp->dev,
+ "Register test failed at offset %x\n", offset);
tw32(offset, save_val);
return -EIO;
}
@@ -10825,9 +10659,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
tw32(MAC_MODE, mac_mode);
- }
- else
+ } else {
return -EINVAL;
+ }
err = -EIO;
@@ -10909,7 +10743,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
rx_skb = tpr->rx_std_buffers[desc_idx].skb;
- map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
+ map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
for (i = 14; i < tx_len; i++) {
@@ -11083,7 +10917,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(phydev, data, cmd);
}
- switch(cmd) {
+ switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = tp->phy_addr;
@@ -11776,7 +11610,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_NVRAM;
if (tg3_nvram_lock(tp)) {
- netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n",
+ netdev_warn(tp->dev,
+ "Cannot get nvram lock, %s failed\n",
__func__);
return;
}
@@ -11895,7 +11730,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
if (ret)
break;
- page_off = offset & pagemask;
+ page_off = offset & pagemask;
size = pagesize;
if (len < size)
size = len;
@@ -11923,7 +11758,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
- if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
break;
/* Issue another write enable to start the write. */
@@ -11977,7 +11812,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
memcpy(&data, buf + i, 4);
tw32(NVRAM_WRDATA, be32_to_cpu(data));
- page_off = offset % tp->nvram_pagesize;
+ page_off = offset % tp->nvram_pagesize;
phy_addr = tg3_nvram_phys_addr(tp, offset);
@@ -11985,7 +11820,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
- if ((page_off == 0) || (i == 0))
+ if (page_off == 0 || i == 0)
nvram_cmd |= NVRAM_CMD_FIRST;
if (page_off == (tp->nvram_pagesize - 4))
nvram_cmd |= NVRAM_CMD_LAST;
@@ -12028,8 +11863,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
- }
- else {
+ } else {
u32 grc_mode;
ret = tg3_nvram_lock(tp);
@@ -12049,8 +11883,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
ret = tg3_nvram_write_block_buffered(tp, offset, len,
buf);
- }
- else {
+ } else {
ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
buf);
}
@@ -12545,11 +12378,11 @@ skip_phy_reset:
return err;
}
-static void __devinit tg3_read_partno(struct tg3 *tp)
+static void __devinit tg3_read_vpd(struct tg3 *tp)
{
- unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */
+ u8 vpd_data[TG3_NVM_VPD_LEN];
unsigned int block_end, rosize, len;
- int i = 0;
+ int j, i = 0;
u32 magic;
if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -12598,6 +12431,32 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
if (block_end > TG3_NVM_VPD_LEN)
goto out_not_found;
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (j > 0) {
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len != 4 ||
+ memcmp(&vpd_data[j], "1028", 4))
+ goto partno;
+
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (j < 0)
+ goto partno;
+
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end)
+ goto partno;
+
+ memcpy(tp->fw_ver, &vpd_data[j], len);
+ strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
+ }
+
+partno:
i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
PCI_VPD_RO_KEYWORD_PARTNO);
if (i < 0)
@@ -12667,7 +12526,7 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
static void __devinit tg3_read_bc_ver(struct tg3 *tp)
{
u32 val, offset, start, ver_offset;
- int i;
+ int i, dst_off;
bool newver = false;
if (tg3_nvram_read(tp, 0xc, &offset) ||
@@ -12687,8 +12546,11 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
newver = true;
}
+ dst_off = strlen(tp->fw_ver);
+
if (newver) {
- if (tg3_nvram_read(tp, offset + 8, &ver_offset))
+ if (TG3_VER_SIZE - dst_off < 16 ||
+ tg3_nvram_read(tp, offset + 8, &ver_offset))
return;
offset = offset + ver_offset - start;
@@ -12697,7 +12559,7 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
if (tg3_nvram_read_be32(tp, offset + i, &v))
return;
- memcpy(tp->fw_ver + i, &v, sizeof(v));
+ memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
}
} else {
u32 major, minor;
@@ -12708,7 +12570,8 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
TG3_NVM_BCVER_MAJSFT;
minor = ver_offset & TG3_NVM_BCVER_MINMSK;
- snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
+ snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
+ "v%d.%02d", major, minor);
}
}
@@ -12732,9 +12595,7 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
{
u32 offset, major, minor, build;
- tp->fw_ver[0] = 's';
- tp->fw_ver[1] = 'b';
- tp->fw_ver[2] = '\0';
+ strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
return;
@@ -12771,11 +12632,14 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
if (minor > 99 || build > 26)
return;
- snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
+ offset = strlen(tp->fw_ver);
+ snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
+ " v%d.%02d", major, minor);
if (build > 0) {
- tp->fw_ver[8] = 'a' + build - 1;
- tp->fw_ver[9] = '\0';
+ offset = strlen(tp->fw_ver);
+ if (offset < TG3_VER_SIZE - 1)
+ tp->fw_ver[offset] = 'a' + build - 1;
}
}
@@ -12862,12 +12726,13 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
static void __devinit tg3_read_fw_ver(struct tg3 *tp)
{
u32 val;
+ bool vpd_vers = false;
- if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
- tp->fw_ver[0] = 's';
- tp->fw_ver[1] = 'b';
- tp->fw_ver[2] = '\0';
+ if (tp->fw_ver[0] != 0)
+ vpd_vers = true;
+ if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
+ strcat(tp->fw_ver, "sb");
return;
}
@@ -12884,11 +12749,12 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
return;
if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
- (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
- return;
+ (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
+ goto done;
tg3_read_mgmtfw_ver(tp);
+done:
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
}
@@ -12898,9 +12764,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
{
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ PCI_DEVICE_ID_AMD_FE_GATE_700C) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+ PCI_DEVICE_ID_AMD_8131_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8385_0) },
{ },
@@ -13066,8 +12932,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
- }
- else {
+ } else {
struct pci_dev *bridge = NULL;
do {
@@ -13129,6 +12994,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
tp->dev->features |= NETIF_F_IPV6_CSUM;
+ tp->dev->features |= NETIF_F_GRO;
}
/* Determine TSO capabilities */
@@ -13189,8 +13055,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
- (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
+ (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
@@ -13224,7 +13090,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
if (!tp->pcix_cap) {
- pr_err("Cannot find PCI-X capability, aborting\n");
+ dev_err(&tp->pdev->dev,
+ "Cannot find PCI-X capability, aborting\n");
return -EIO;
}
@@ -13421,7 +13288,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Force the chip into D0. */
err = tg3_set_power_state(tp, PCI_D0);
if (err) {
- pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev));
+ dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
return err;
}
@@ -13595,13 +13462,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
err = tg3_phy_probe(tp);
if (err) {
- pr_err("(%s) phy probe failed, err %d\n",
- pci_name(tp->pdev), err);
+ dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
/* ... but do not return immediately ... */
tg3_mdio_fini(tp);
}
- tg3_read_partno(tp);
+ tg3_read_vpd(tp);
tg3_read_fw_ver(tp);
if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
@@ -13639,10 +13505,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
- tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+ tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
- tp->rx_offset = 0;
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
+ tp->rx_offset -= NET_IP_ALIGN;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ tp->rx_copy_thresh = ~(u16)0;
+#endif
+ }
tp->rx_std_max_post = TG3_RX_RING_SIZE;
@@ -13965,11 +13836,10 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
}
pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
- if (to_device) {
+ if (to_device)
tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
- } else {
+ else
tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
- }
ret = -ENODEV;
for (i = 0; i < 40; i++) {
@@ -14105,8 +13975,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Send the buffer to the chip. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
if (ret) {
- pr_err("tg3_test_dma() Write the buffer failed %d\n",
- ret);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer write failed. err = %d\n",
+ __func__, ret);
break;
}
@@ -14116,8 +13987,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
u32 val;
tg3_read_mem(tp, 0x2100 + (i*4), &val);
if (le32_to_cpu(val) != p[i]) {
- pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n",
- val, i);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on device! "
+ "(%d != %d)\n", __func__, val, i);
/* ret = -ENODEV here? */
}
p[i] = 0;
@@ -14126,9 +13998,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
if (ret) {
- pr_err("tg3_test_dma() Read the buffer failed %d\n",
- ret);
-
+ dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
+ "err = %d\n", __func__, ret);
break;
}
@@ -14144,8 +14015,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
break;
} else {
- pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n",
- p[i], i);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on read back! "
+ "(%d != %d)\n", __func__, p[i], i);
ret = -ENODEV;
goto out;
}
@@ -14172,10 +14044,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
if (pci_dev_present(dma_wait_state_chipsets)) {
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
- }
- else
+ } else {
/* Safe to use the calculated DMA boundary. */
tp->dma_rwctrl = saved_dma_rwctrl;
+ }
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
}
@@ -14437,13 +14309,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- pr_err("Cannot enable PCI device, aborting\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- pr_err("Cannot obtain PCI resources, aborting\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
@@ -14452,14 +14324,15 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
/* Find power-management capability. */
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap == 0) {
- pr_err("Cannot find PowerManagement capability, aborting\n");
+ dev_err(&pdev->dev,
+ "Cannot find Power Management capability, aborting\n");
err = -EIO;
goto err_out_free_res;
}
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
- pr_err("Etherdev alloc failed, aborting\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -14509,7 +14382,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->regs = pci_ioremap_bar(pdev, BAR_0);
if (!tp->regs) {
- netdev_err(dev, "Cannot map device registers, aborting\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_dev;
}
@@ -14525,7 +14398,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_invariants(tp);
if (err) {
- netdev_err(dev, "Problem fetching invariants of chip, aborting\n");
+ dev_err(&pdev->dev,
+ "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
@@ -14560,7 +14434,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_set_consistent_dma_mask(pdev,
persist_dma_mask);
if (err < 0) {
- netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n");
+ dev_err(&pdev->dev, "Unable to obtain 64 bit "
+ "DMA for consistent allocations\n");
goto err_out_iounmap;
}
}
@@ -14568,7 +14443,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- netdev_err(dev, "No usable DMA configuration, aborting\n");
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
goto err_out_iounmap;
}
}
@@ -14617,14 +14493,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_device_address(tp);
if (err) {
- netdev_err(dev, "Could not obtain valid ethernet address, aborting\n");
+ dev_err(&pdev->dev,
+ "Could not obtain valid ethernet address, aborting\n");
goto err_out_iounmap;
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
- netdev_err(dev, "Cannot map APE registers, aborting\n");
+ dev_err(&pdev->dev,
+ "Cannot map APE registers, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -14648,7 +14526,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_test_dma(tp);
if (err) {
- netdev_err(dev, "DMA engine test failed, aborting\n");
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
goto err_out_apeunmap;
}
@@ -14709,7 +14587,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- netdev_err(dev, "Cannot register net device, aborting\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_apeunmap;
}
@@ -14722,11 +14600,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
struct phy_device *phydev;
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
- netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ netdev_info(dev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
phydev->drv->name, dev_name(&phydev->dev));
} else
- netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
- tg3_phy_string(tp),
+ netdev_info(dev, "attached PHY is %s (%s Ethernet) "
+ "(WireSpeed[%d])\n", tg3_phy_string(tp),
((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
"10/100/1000Base-T")),
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 574a1cc..ce9c491 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -23,11 +23,8 @@
#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
#define TG3_BDINFO_SIZE 0x10UL
-#define RX_COPY_THRESHOLD 256
-
#define TG3_RX_INTERNAL_RING_SZ_5906 32
-#define RX_STD_MAX_SIZE 1536
#define RX_STD_MAX_SIZE_5705 512
#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
@@ -183,6 +180,7 @@
#define METAL_REV_B2 0x02
#define TG3PCI_DMA_RW_CTRL 0x0000006c
#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
+#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
@@ -252,7 +250,7 @@
/* 0x94 --> 0x98 unused */
#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
-/* 0xa0 --> 0xb8 unused */
+/* 0xa8 --> 0xb8 unused */
#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
#define DUAL_MAC_CTRL_CH_MASK 0x00000003
#define DUAL_MAC_CTRL_ID 0x00000004
@@ -1854,6 +1852,8 @@
#define TG3_PCIE_TLDLPL_PORT 0x00007c00
#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
+#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014
+#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000
/* OTP bit definitions */
#define TG3_OTP_AGCTGT_MASK 0x000000e0
@@ -2082,7 +2082,7 @@
#define MII_TG3_DSP_AADJ1CH0 0x001f
#define MII_TG3_DSP_AADJ1CH3 0x601f
#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
-#define MII_TG3_DSP_EXP8 0x0708
+#define MII_TG3_DSP_EXP8 0x0f08
#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001
#define MII_TG3_DSP_EXP8_AEDW 0x0200
#define MII_TG3_DSP_EXP75 0x0f75
@@ -2512,7 +2512,7 @@ struct tg3_hw_stats {
*/
struct ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct tg3_config_info {
@@ -2561,7 +2561,7 @@ struct tg3_bufmgr_config {
struct tg3_ethtool_stats {
/* Statistics maintained by Receive MAC. */
- u64 rx_octets;
+ u64 rx_octets;
u64 rx_fragments;
u64 rx_ucast_packets;
u64 rx_mcast_packets;
@@ -2751,9 +2751,11 @@ struct tg3 {
struct tg3_napi napi[TG3_IRQ_MAX_VECS];
void (*write32_rx_mbox) (struct tg3 *, u32,
u32);
+ u32 rx_copy_thresh;
u32 rx_pending;
u32 rx_jumbo_pending;
u32 rx_std_max_post;
+ u32 rx_offset;
u32 rx_pkt_map_sz;
#if TG3_VLAN_TAG_USED
struct vlan_group *vlgrp;
@@ -2773,7 +2775,6 @@ struct tg3 {
unsigned long last_event_jiffies;
};
- u32 rx_offset;
u32 tg3_flags;
#define TG3_FLAG_TAGGED_STATUS 0x00000001
#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 390540c..ccee3ed 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1034,7 +1034,7 @@ static void TLan_tx_timeout(struct net_device *dev)
TLan_ResetLists( dev );
TLan_ReadAndClearStats( dev, TLAN_IGNORE );
TLan_ResetAdapter( dev );
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue( dev );
}
@@ -1147,7 +1147,6 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
} /* TLan_StartTx */
@@ -1314,7 +1313,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
static void TLan_SetMulticastList( struct net_device *dev )
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 hash1 = 0;
u32 hash2 = 0;
int i;
@@ -1336,12 +1335,12 @@ static void TLan_SetMulticastList( struct net_device *dev )
TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
} else {
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if ( i < 3 ) {
TLan_SetMac( dev, i + 1,
- (char *) &dmi->dmi_addr );
+ (char *) &ha->addr);
} else {
- offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
+ offset = TLan_HashFunc((u8 *)&ha->addr);
if ( offset < 32 )
hash1 |= ( 1 << offset );
else
@@ -2464,7 +2463,7 @@ static void TLan_PhyPrint( struct net_device *dev )
printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
} else if ( phy <= TLAN_PHY_MAX_ADDR ) {
printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
- printk( "TLAN: Off. +0 +1 +2 +3 \n" );
+ printk( "TLAN: Off. +0 +1 +2 +3\n" );
for ( i = 0; i < 0x20; i+= 4 ) {
printk( "TLAN: 0x%02x", i );
TLan_MiiReadReg( dev, phy, i, &data0 );
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 7d7f3ee..10800f1 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -77,7 +77,7 @@ static char version[] __devinitdata =
#define FW_NAME "3com/3C359.bin"
MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
-MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
+MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ;
MODULE_FIRMWARE(FW_NAME);
/* Module parameters */
@@ -163,19 +163,19 @@ static void print_tx_state(struct net_device *dev)
u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
int i ;
- printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d \n",xl_priv->tx_ring_head,
+ printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
- printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len \n");
+ printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n");
for (i = 0; i < 16; i++) {
txd = &(xl_priv->xl_tx_ring[i]) ;
- printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(txd),
+ printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd),
txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ;
}
- printk("DNLISTPTR = %04x \n", readl(xl_mmio + MMIO_DNLISTPTR) );
+ printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) );
- printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) );
- printk("Queue status = %0x \n",netif_running(dev) ) ;
+ printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) );
+ printk("Queue status = %0x\n",netif_running(dev) ) ;
}
static void print_rx_state(struct net_device *dev)
@@ -186,19 +186,19 @@ static void print_rx_state(struct net_device *dev)
u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
int i ;
- printk("rx_ring_tail: %d \n", xl_priv->rx_ring_tail) ;
- printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len \n");
+ printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
+ printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n");
for (i = 0; i < 16; i++) {
/* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
rxd = &(xl_priv->xl_rx_ring[i]) ;
- printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(rxd),
+ printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd),
rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ;
}
- printk("UPLISTPTR = %04x \n", readl(xl_mmio + MMIO_UPLISTPTR) );
+ printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR));
- printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) );
- printk("Queue status = %0x \n",netif_running(dev) ) ;
+ printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL));
+ printk("Queue status = %0x\n",netif_running(dev));
}
#endif
@@ -391,7 +391,7 @@ static int __devinit xl_init(struct net_device *dev)
struct xl_private *xl_priv = netdev_priv(dev);
int err;
- printk(KERN_INFO "%s \n", version);
+ printk(KERN_INFO "%s\n", version);
printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
@@ -463,7 +463,7 @@ static int xl_hw_reset(struct net_device *dev)
writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
#if XL_DEBUG
- printk(KERN_INFO "Read from PMBAR = %04x \n", readw(xl_mmio + MMIO_MACDATA)) ;
+ printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA));
#endif
if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) {
@@ -591,9 +591,9 @@ static int xl_hw_reset(struct net_device *dev)
#if XL_DEBUG
writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
if ( readw(xl_mmio + MMIO_MACDATA) & 2) {
- printk(KERN_INFO "Default ring speed 4 mbps \n") ;
+ printk(KERN_INFO "Default ring speed 4 mbps\n");
} else {
- printk(KERN_INFO "Default ring speed 16 mbps \n") ;
+ printk(KERN_INFO "Default ring speed 16 mbps\n");
}
printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
#endif
@@ -651,7 +651,7 @@ static int xl_open(struct net_device *dev)
if (open_err != 0) { /* Something went wrong with the open command */
if (open_err & 0x07) { /* Wrong speed, retry at different speed */
- printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name);
switchsettings = switchsettings ^ 2 ;
xl_ee_write(dev,0x08,switchsettings) ;
xl_hw_reset(dev) ;
@@ -703,7 +703,7 @@ static int xl_open(struct net_device *dev)
}
if (i==0) {
- printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled \n",dev->name) ;
+ printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
free_irq(dev->irq,dev) ;
kfree(xl_priv->xl_tx_ring);
kfree(xl_priv->xl_rx_ring);
@@ -853,7 +853,7 @@ static int xl_open_hw(struct net_device *dev)
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
- printk(", ARB: %04x \n",xl_priv->arb ) ;
+ printk(", ARB: %04x\n",xl_priv->arb );
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
@@ -867,7 +867,7 @@ static int xl_open_hw(struct net_device *dev)
ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ;
}
ver_str[i] = '\0' ;
- printk(KERN_INFO "%s: Microcode version String: %s \n",dev->name,ver_str);
+ printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str);
}
/*
@@ -991,7 +991,7 @@ static void xl_rx(struct net_device *dev)
skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
if (skb==NULL) { /* Still need to fix the rx ring */
- printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer \n",dev->name) ;
+ printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name);
adv_rx_ring(dev) ;
dev->stats.rx_dropped++ ;
writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
@@ -1092,7 +1092,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
*/
if (intstatus == 0x0001) {
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
- printk(KERN_INFO "%s: 00001 int received \n",dev->name) ;
+ printk(KERN_INFO "%s: 00001 int received\n",dev->name);
} else {
if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) {
@@ -1103,9 +1103,9 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
*/
if (intstatus & HOSTERRINT) {
- printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x \n",dev->name,intstatus) ;
+ printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus);
writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
- printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name);
+ printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
netif_stop_queue(dev) ;
xl_freemem(dev) ;
free_irq(dev->irq,dev);
@@ -1128,7 +1128,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
Must put a timeout check here ! */
/* Empty Loop */
}
- printk(KERN_WARNING "%s: TX Underrun received \n",dev->name) ;
+ printk(KERN_WARNING "%s: TX Underrun received\n",dev->name);
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
} /* TxUnderRun */
@@ -1157,13 +1157,13 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
macstatus = readw(xl_mmio + MMIO_MACDATA) ;
printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
if (macstatus & (1<<14))
- printk(KERN_WARNING "tchk error: Unrecoverable error \n") ;
+ printk(KERN_WARNING "tchk error: Unrecoverable error\n");
if (macstatus & (1<<3))
- printk(KERN_WARNING "eint error: Internal watchdog timer expired \n") ;
+ printk(KERN_WARNING "eint error: Internal watchdog timer expired\n");
if (macstatus & (1<<2))
- printk(KERN_WARNING "aint error: Host tried to perform invalid operation \n") ;
+ printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n");
printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ;
- printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name);
+ printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
netif_stop_queue(dev) ;
xl_freemem(dev) ;
free_irq(dev->irq,dev);
@@ -1175,7 +1175,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
} else {
- printk(KERN_WARNING "%s: Received Unknown interrupt : %04x \n", dev->name, intstatus) ;
+ printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus);
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
}
}
@@ -1350,11 +1350,11 @@ static int xl_close(struct net_device *dev)
writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) {
- printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response \n",dev->name) ;
+ printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name);
} else {
writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
if (readb(xl_mmio + MMIO_MACDATA)==0) {
- printk(KERN_INFO "%s: Adapter has been closed \n",dev->name) ;
+ printk(KERN_INFO "%s: Adapter has been closed\n",dev->name);
writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
xl_freemem(dev) ;
@@ -1391,7 +1391,7 @@ static int xl_close(struct net_device *dev)
static void xl_set_rx_mode(struct net_device *dev)
{
struct xl_private *xl_priv = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[4] ;
u16 options ;
@@ -1408,11 +1408,11 @@ static void xl_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
@@ -1447,11 +1447,11 @@ static void xl_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ;
break ;
case 4:
- printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command \n",dev->name,srb_cmd) ;
+ printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd);
break ;
case 6:
- printk(KERN_INFO "%s: Command: %d - Options Invalid for command \n",dev->name,srb_cmd) ;
+ printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd);
break ;
case 0: /* Successful command execution */
@@ -1472,11 +1472,11 @@ static void xl_srb_bh(struct net_device *dev)
break ;
case SET_FUNC_ADDRESS:
if(xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Functional Address Set \n",dev->name) ;
+ printk(KERN_INFO "%s: Functional Address Set\n",dev->name);
break ;
case CLOSE_NIC:
if(xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler \n",dev->name) ;
+ printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name);
break ;
case SET_MULTICAST_MODE:
if(xl_priv->xl_message_level)
@@ -1485,9 +1485,9 @@ static void xl_srb_bh(struct net_device *dev)
case SET_RECEIVE_MODE:
if(xl_priv->xl_message_level) {
if (xl_priv->xl_copy_all_options == 0x0004)
- printk(KERN_INFO "%s: Entering promiscuous mode \n", dev->name) ;
+ printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name);
else
- printk(KERN_INFO "%s: Entering normal receive mode \n",dev->name) ;
+ printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name);
}
break ;
@@ -1557,20 +1557,20 @@ static void xl_arb_cmd(struct net_device *dev)
xl_freemem(dev) ;
free_irq(dev->irq,dev);
- printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
} /* If serious error */
if (xl_priv->xl_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n",dev->name);
+ printk(KERN_INFO "%s: Beaconing\n",dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1579,7 +1579,7 @@ static void xl_arb_cmd(struct net_device *dev)
if (lan_status_diff & LSC_CO) {
if (xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
xl_srb_cmd(dev, READ_LOG) ;
}
@@ -1595,7 +1595,7 @@ static void xl_arb_cmd(struct net_device *dev)
} /* Lan.change.status */
else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
#if XL_DEBUG
- printk(KERN_INFO "Received.Data \n") ;
+ printk(KERN_INFO "Received.Data\n");
#endif
writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
@@ -1630,7 +1630,7 @@ static void xl_arb_cmd(struct net_device *dev)
xl_asb_cmd(dev) ;
} else {
- printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x \n",dev->name,arb_cmd) ;
+ printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
}
/* Acknowledge the arb interrupt */
@@ -1687,13 +1687,13 @@ static void xl_asb_bh(struct net_device *dev)
ret_code = readb(xl_mmio + MMIO_MACDATA) ;
switch (ret_code) {
case 0x01:
- printk(KERN_INFO "%s: ASB Command, unrecognized command code \n",dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name);
break ;
case 0x26:
- printk(KERN_INFO "%s: ASB Command, unexpected receive buffer \n", dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name);
break ;
case 0x40:
- printk(KERN_INFO "%s: ASB Command, Invalid Station ID \n", dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name);
break ;
}
xl_priv->asb_queued = 0 ;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 1a09672..91e6c78 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -986,7 +986,7 @@ static void open_sap(unsigned char type, struct net_device *dev)
static void tok_set_multicast_list(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
unsigned char address[4];
int i;
@@ -995,11 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev)
/*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- address[0] |= mclist->dmi_addr[2];
- address[1] |= mclist->dmi_addr[3];
- address[2] |= mclist->dmi_addr[4];
- address[3] |= mclist->dmi_addr[5];
+ netdev_for_each_mc_addr(ha, dev) {
+ address[0] |= ha->addr[2];
+ address[1] |= ha->addr[3];
+ address[2] |= ha->addr[4];
+ address[3] |= ha->addr[5];
}
SET_PAGE(ti->srb_page);
for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
@@ -1041,7 +1041,6 @@ static netdev_tx_t tok_send_packet(struct sk_buff *skb,
writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST);
writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
spin_unlock_irqrestore(&(ti->lock), flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 7a5fbf5..5bd1407 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -358,7 +358,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev,
pcr |= PCI_COMMAND_SERR;
pci_write_config_word (pdev, PCI_COMMAND, pcr);
- printk("%s \n", version);
+ printk("%s\n", version);
printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
streamer_priv->streamer_card_name,
(unsigned int) dev->base_addr,
@@ -651,7 +651,7 @@ static int streamer_open(struct net_device *dev)
#if STREAMER_DEBUG
writew(readw(streamer_mmio + LAPWWO),
streamer_mmio + LAPA);
- printk("srb open request: \n");
+ printk("srb open request:\n");
for (i = 0; i < 16; i++) {
printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
}
@@ -701,7 +701,7 @@ static int streamer_open(struct net_device *dev)
if (srb_word != 0) {
if (srb_word == 0x07) {
if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
- printk(KERN_WARNING "%s: Retrying at different ring speed \n",
+ printk(KERN_WARNING "%s: Retrying at different ring speed\n",
dev->name);
open_finished = 0;
} else {
@@ -717,7 +717,7 @@ static int streamer_open(struct net_device *dev)
((error_code & 0x0f) == 0x0d))
{
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
- printk(KERN_WARNING "%s: Please try again with a specified ring speed \n", dev->name);
+ printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name);
free_irq(dev->irq, dev);
return -EIO;
}
@@ -923,7 +923,7 @@ static void streamer_rx(struct net_device *dev)
if (rx_desc->status & 0x7E830000) { /* errors */
if (streamer_priv->streamer_message_level) {
- printk(KERN_WARNING "%s: Rx Error %x \n",
+ printk(KERN_WARNING "%s: Rx Error %x\n",
dev->name, rx_desc->status);
}
} else { /* received without errors */
@@ -936,7 +936,7 @@ static void streamer_rx(struct net_device *dev)
if (skb == NULL)
{
- printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
+ printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name);
dev->stats.rx_dropped++;
} else { /* we allocated an skb OK */
if (buffer_cnt == 1) {
@@ -1267,7 +1267,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
netdev_priv(dev);
__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
__u8 options = 0;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[5];
writel(streamer_priv->srb, streamer_mmio + LAPA);
@@ -1303,11 +1303,11 @@ static void streamer_set_rx_mode(struct net_device *dev)
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
@@ -1364,7 +1364,7 @@ static void streamer_srb_bh(struct net_device *dev)
case 0x00:
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1392,13 +1392,13 @@ static void streamer_srb_bh(struct net_device *dev)
case 0x00:
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
break;
case 0x39: /* Must deal with this if individual multicast addresses used */
- printk(KERN_INFO "%s: Group address not found \n", dev->name);
+ printk(KERN_INFO "%s: Group address not found\n", dev->name);
break;
default:
break;
@@ -1414,10 +1414,10 @@ static void streamer_srb_bh(struct net_device *dev)
switch (srb_word) {
case 0x00:
if (streamer_priv->streamer_message_level)
- printk(KERN_INFO "%s: Functional Address Mask Set \n", dev->name);
+ printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name);
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1448,7 +1448,7 @@ static void streamer_srb_bh(struct net_device *dev)
}
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1467,7 +1467,7 @@ static void streamer_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1556,7 +1556,7 @@ static void streamer_arb_cmd(struct net_device *dev)
(streamer_mmio + LAPDINC)));
}
- printk("next %04x, fs %02x, len %04x \n", next,
+ printk("next %04x, fs %02x, len %04x\n", next,
status, len);
}
#endif
@@ -1593,7 +1593,7 @@ static void streamer_arb_cmd(struct net_device *dev)
mac_frame->protocol = tr_type_trans(mac_frame, dev);
#if STREAMER_NETWORK_MONITOR
- printk(KERN_WARNING "%s: Received MAC Frame, details: \n",
+ printk(KERN_WARNING "%s: Received MAC Frame, details:\n",
dev->name);
mac_hdr = tr_hdr(mac_frame);
printk(KERN_WARNING
@@ -1669,15 +1669,15 @@ drop_frame:
/* If serious error */
if (streamer_priv->streamer_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name);
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n", dev->name);
+ printk(KERN_INFO "%s: Beaconing\n", dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n", dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1686,7 +1686,7 @@ drop_frame:
if (lan_status_diff & LSC_CO) {
if (streamer_priv->streamer_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
@@ -1716,7 +1716,7 @@ drop_frame:
streamer_priv->streamer_lan_status = lan_status;
} /* Lan.change.status */
else
- printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
}
static void streamer_asb_bh(struct net_device *dev)
@@ -1747,10 +1747,10 @@ static void streamer_asb_bh(struct net_device *dev)
rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
switch (rc) {
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
break;
case 0x26:
- printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
break;
case 0xFF:
/* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 53f631e..785ad1a 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -109,7 +109,6 @@ static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsign
SIFWRITEB(val, reg);
madgemc_setregpage(dev, 0);
}
- return;
}
/*
@@ -140,7 +139,6 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
SIFWRITEW(val, reg);
madgemc_setregpage(dev, 0);
}
- return;
}
static struct net_device_ops madgemc_netdev_ops __read_mostly;
@@ -505,8 +503,6 @@ static void madgemc_setregpage(struct net_device *dev, int page)
dev->base_addr + MC_CONTROL_REG1);
}
reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
-
- return;
}
/*
@@ -527,8 +523,6 @@ static void madgemc_setsifsel(struct net_device *dev, int val)
dev->base_addr + MC_CONTROL_REG0);
}
reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
-
- return;
}
/*
@@ -550,8 +544,6 @@ static void madgemc_setint(struct net_device *dev, int val)
outb(reg1 | MC_CONTROL_REG1_SINTEN,
dev->base_addr + MC_CONTROL_REG1);
}
-
- return;
}
/*
@@ -594,8 +586,6 @@ static void madgemc_chipset_close(struct net_device *dev)
madgemc_setint(dev, 0);
/* unmap SIF registers */
madgemc_setsifsel(dev, 0);
-
- return;
}
/*
@@ -656,8 +646,6 @@ static void madgemc_read_rom(struct net_device *dev, struct card_info *card)
/* Restore original register values */
outb(reg0, ioaddr + MC_CONTROL_REG0);
outb(reg1, ioaddr + MC_CONTROL_REG1);
-
- return;
}
static int madgemc_open(struct net_device *dev)
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3a25e04..3d2fbe6 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -302,7 +302,7 @@ static int olympic_init(struct net_device *dev)
olympic_priv=netdev_priv(dev);
olympic_mmio=olympic_priv->olympic_mmio;
- printk("%s \n", version);
+ printk("%s\n", version);
printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
@@ -468,7 +468,7 @@ static int olympic_open(struct net_device *dev)
#if OLYMPIC_DEBUG
printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
- printk("Before the open command \n");
+ printk("Before the open command\n");
#endif
do {
memset_io(init_srb,0,SRB_COMMAND_SIZE);
@@ -520,7 +520,7 @@ static int olympic_open(struct net_device *dev)
break;
}
if (time_after(jiffies, t + 10*HZ)) {
- printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
+ printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
olympic_priv->srb_queued=0;
break ;
}
@@ -549,7 +549,7 @@ static int olympic_open(struct net_device *dev)
break;
case 0x07:
if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
- printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
+ printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
open_finished = 0 ;
continue;
}
@@ -558,7 +558,7 @@ static int olympic_open(struct net_device *dev)
if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
- printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
+ printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
} else {
printk(KERN_WARNING "%s: %s - %s\n", dev->name,
open_maj_error[(err & 0xf0) >> 4],
@@ -759,7 +759,7 @@ static void olympic_rx(struct net_device *dev)
olympic_priv->rx_status_last_received++ ;
olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
#if OLYMPIC_DEBUG
- printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
+ printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
#endif
length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
@@ -774,15 +774,15 @@ static void olympic_rx(struct net_device *dev)
if (l_status_buffercnt & 0x3B000000) {
if (olympic_priv->olympic_message_level) {
if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
- printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
+ printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
- printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
+ printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
if (l_status_buffercnt & (1<<27)) /* No receive buffers */
- printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
+ printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
- printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
+ printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
- printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
+ printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
}
olympic_priv->rx_ring_last_received += i ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
@@ -796,7 +796,7 @@ static void olympic_rx(struct net_device *dev)
}
if (skb == NULL) {
- printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
+ printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
dev->stats.rx_dropped++;
/* Update counters even though we don't transfer the frame */
olympic_priv->rx_ring_last_received += i ;
@@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev)
}
if (t == 0) {
- printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
+ printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
}
olympic_priv->srb_queued=0;
}
@@ -1139,7 +1139,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
u8 options = 0;
u8 __iomem *srb;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[4] ;
writel(olympic_priv->srb,olympic_mmio+LAPA);
@@ -1177,11 +1177,11 @@ static void olympic_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
writeb(SRB_SET_FUNC_ADDRESS,srb+0);
@@ -1239,7 +1239,7 @@ static void olympic_srb_bh(struct net_device *dev)
case 0x00:
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
@@ -1266,13 +1266,13 @@ static void olympic_srb_bh(struct net_device *dev)
case 0x00:
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
break ;
case 0x39: /* Must deal with this if individual multicast addresses used */
- printk(KERN_INFO "%s: Group address not found \n",dev->name);
+ printk(KERN_INFO "%s: Group address not found\n",dev->name);
break ;
default:
break ;
@@ -1287,10 +1287,10 @@ static void olympic_srb_bh(struct net_device *dev)
switch (readb(srb+2)) {
case 0x00:
if (olympic_priv->olympic_message_level)
- printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
+ printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1310,7 +1310,7 @@ static void olympic_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1328,7 +1328,7 @@ static void olympic_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1404,7 +1404,7 @@ static void olympic_arb_cmd(struct net_device *dev)
printk("Loc %d = %02x\n",i,readb(frame_data + i));
}
- printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
+ printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
}
#endif
mac_frame = dev_alloc_skb(frame_len) ;
@@ -1426,7 +1426,7 @@ static void olympic_arb_cmd(struct net_device *dev)
if (olympic_priv->olympic_network_monitor) {
struct trh_hdr *mac_hdr;
- printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name);
+ printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
mac_hdr = tr_hdr(mac_frame);
printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
dev->name, mac_hdr->daddr);
@@ -1489,20 +1489,20 @@ drop_frame:
writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
netif_stop_queue(dev);
olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
- printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
} /* If serious error */
if (olympic_priv->olympic_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n",dev->name);
+ printk(KERN_INFO "%s: Beaconing\n",dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1512,7 +1512,7 @@ drop_frame:
if (lan_status_diff & LSC_CO) {
if (olympic_priv->olympic_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
@@ -1551,7 +1551,7 @@ drop_frame:
} /* Lan.change.status */
else
- printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
}
static void olympic_asb_bh(struct net_device *dev)
@@ -1578,10 +1578,10 @@ static void olympic_asb_bh(struct net_device *dev)
if (olympic_priv->asb_queued == 2) {
switch (readb(asb_block+2)) {
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
break ;
case 0x26:
- printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
break ;
case 0xFF:
/* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index e405601..0929fff 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -4562,7 +4562,7 @@ static void smctr_timeout(struct net_device *dev)
* fake transmission time and go on trying. Our own timeout
* routine is in sktr_timer_chk()
*/
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -5147,8 +5147,6 @@ static void smctr_set_multicast_list(struct net_device *dev)
{
if(smctr_debug > 10)
printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name);
-
- return;
}
static int smctr_set_page(struct net_device *dev, __u8 *buf)
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 8b508c9..435ef7d 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -325,8 +325,6 @@ static void tms380tr_timer_end_wait(unsigned long data)
tp->Sleeping = 0;
wake_up_interruptible(&tp->wait_for_tok_int);
}
-
- return;
}
/*
@@ -460,8 +458,6 @@ static void tms380tr_init_net_local(struct net_device *dev)
tp->RplHead = &tp->Rpl[0];
tp->RplTail = &tp->Rpl[RPL_NUM-1];
tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
-
- return;
}
/*
@@ -481,8 +477,6 @@ static void tms380tr_init_ipb(struct net_local *tp)
tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES;
tp->ipb.SCB_Addr = 0;
tp->ipb.SSB_Addr = 0;
-
- return;
}
/*
@@ -527,8 +521,6 @@ static void tms380tr_init_opb(struct net_device *dev)
tp->ocpl.ProdIDAddr[0] = LOWORD(Addr);
tp->ocpl.ProdIDAddr[1] = HIWORD(Addr);
-
- return;
}
/*
@@ -543,8 +535,6 @@ static void tms380tr_open_adapter(struct net_device *dev)
tp->OpenCommandIssued = 1;
tms380tr_exec_cmd(dev, OC_OPEN);
-
- return;
}
/*
@@ -554,8 +544,6 @@ static void tms380tr_open_adapter(struct net_device *dev)
static void tms380tr_disable_interrupts(struct net_device *dev)
{
SIFWRITEB(0, SIFACL);
-
- return;
}
/*
@@ -565,8 +553,6 @@ static void tms380tr_disable_interrupts(struct net_device *dev)
static void tms380tr_enable_interrupts(struct net_device *dev)
{
SIFWRITEB(ACL_SINTEN, SIFACL);
-
- return;
}
/*
@@ -578,8 +564,6 @@ static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command)
tp->CMDqueue |= Command;
tms380tr_chk_outstanding_cmds(dev);
-
- return;
}
static void tms380tr_timeout(struct net_device *dev)
@@ -592,7 +576,7 @@ static void tms380tr_timeout(struct net_device *dev)
* fake transmission time and go on trying. Our own timeout
* routine is in tms380tr_timer_chk()
*/
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -712,8 +696,6 @@ static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr)
SRBit = frame[8] & 0x80;
memcpy(&frame[8], hw_addr, 6);
frame[8] |= SRBit;
-
- return;
}
/*
@@ -743,8 +725,6 @@ static void tms380tr_timer_chk(unsigned long data)
return;
tp->ReOpenInProgress = 1;
tms380tr_open_adapter(dev);
-
- return;
}
/*
@@ -863,8 +843,6 @@ static void tms380tr_reset_interrupt(struct net_device *dev)
* and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts.
*/
tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ);
-
- return;
}
/*
@@ -1119,8 +1097,6 @@ static void tms380tr_cmd_status_irq(struct net_device *dev)
tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error;
tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error;
}
-
- return;
}
/*
@@ -1211,17 +1187,17 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
}
else
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
((char *)(&tp->ocpl.FunctAddr))[0] |=
- mclist->dmi_addr[2];
+ ha->addr[2];
((char *)(&tp->ocpl.FunctAddr))[1] |=
- mclist->dmi_addr[3];
+ ha->addr[3];
((char *)(&tp->ocpl.FunctAddr))[2] |=
- mclist->dmi_addr[4];
+ ha->addr[4];
((char *)(&tp->ocpl.FunctAddr))[3] |=
- mclist->dmi_addr[5];
+ ha->addr[5];
}
}
tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
@@ -1229,7 +1205,6 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
tp->ocpl.OPENOptions = OpenOptions;
tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS);
- return;
}
/*
@@ -1247,7 +1222,6 @@ void tms380tr_wait(unsigned long time)
#else
udelay(time);
#endif
- return;
}
/*
@@ -1266,8 +1240,6 @@ static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue
SifStsValue = SIFREADW(SIFSTS);
} while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--);
SIFWRITEW(cmd, SIFCMD);
-
- return;
}
/*
@@ -1390,7 +1362,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
Status &= STS_MASK;
if(tms380tr_debug > 3)
- printk(KERN_DEBUG " %04X \n", Status);
+ printk(KERN_DEBUG " %04X\n", Status);
/* BUD successfully completed */
if(Status == STS_INITIALIZE)
return (1);
@@ -1700,8 +1672,6 @@ static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
/* Execute SCB and generate IRQ when done. */
tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST);
-
- return;
}
/*
@@ -1774,8 +1744,6 @@ static void tms380tr_ring_status_irq(struct net_device *dev)
tp->AdapterOpenFlag = 0;
tms380tr_open_adapter(dev);
}
-
- return;
}
/*
@@ -1846,7 +1814,7 @@ static void tms380tr_chk_irq(struct net_device *dev)
break;
case DMA_WRITE_ABORT:
- printk(KERN_INFO "%s: DMA write operation aborted: \n",
+ printk(KERN_INFO "%s: DMA write operation aborted:\n",
dev->name);
switch (AdapterCheckBlock[1])
{
@@ -1932,8 +1900,6 @@ static void tms380tr_chk_irq(struct net_device *dev)
/* Restart of firmware successful */
tp->AdapterOpenFlag = 1;
}
-
- return;
}
/*
@@ -1988,8 +1954,6 @@ static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
/* Restore original values */
SIFWRITEW(old_sifadx, SIFADX);
SIFWRITEW(old_sifadr, SIFADR);
-
- return;
}
/*
@@ -2021,8 +1985,6 @@ static void tms380tr_cancel_tx_queue(struct net_local* tp)
dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(tpl->Skb);
}
-
- return;
}
/*
@@ -2094,7 +2056,6 @@ static void tms380tr_tx_status_irq(struct net_device *dev)
if(!tp->TplFree->NextTPLPtr->BusyFlag)
netif_wake_queue(dev);
- return;
}
/*
@@ -2255,8 +2216,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
/* Inform adapter about RPL valid. */
tms380tr_exec_sifcmd(dev, CMD_RX_VALID);
}
-
- return;
}
/*
@@ -2269,8 +2228,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
{
rpl->Status = Status;
-
- return;
}
/*
@@ -2287,8 +2244,6 @@ static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPt
/* Test functional bit */
if(DataPtr[2] & GROUP_BIT)
tp->MacStat.multicast++;
-
- return;
}
static int tms380tr_set_mac_address(struct net_device *dev, void *addr)
@@ -2318,8 +2273,6 @@ static void tms380tr_dump(unsigned char *Data, int length)
Data[j+0],Data[j+1],Data[j+2],Data[j+3],
Data[j+4],Data[j+5],Data[j+6],Data[j+7]);
}
-
- return;
}
#endif
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 5b1fbb3..a03730b 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -263,7 +263,7 @@ static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
return;
udelay(10);
}
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
}
static int mii_speed(struct mii_if_info *mii)
@@ -704,8 +704,8 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
if (i == 0) {
data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
- skb->len - skb->data_len, DMA_TO_DEVICE);
- data->txring[tx].len = skb->len - skb->data_len;
+ skb_headlen(skb), DMA_TO_DEVICE);
+ data->txring[tx].len = skb_headlen(skb);
misc |= TSI108_TX_SOF;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1056,7 +1056,7 @@ static void tsi108_stop_ethernet(struct net_device *dev)
return;
udelay(10);
}
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
}
static void tsi108_reset_ether(struct tsi108_prv_data * data)
@@ -1186,15 +1186,15 @@ static void tsi108_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
int i;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
memset(data->mc_hash, 0, sizeof(data->mc_hash));
- netdev_for_each_mc_addr(mc, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 hash, crc;
- crc = ether_crc(6, mc->dmi_addr);
+ crc = ether_crc(6, ha->addr);
hash = crc >> 23;
__set_bit(hash, &data->mc_hash[0]);
}
@@ -1233,7 +1233,7 @@ static void tsi108_init_phy(struct net_device *dev)
udelay(10);
}
if (i == 0)
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
if (data->phy_type == TSI108_PHY_BCM54XX) {
tsi108_write_mii(data, 0x09, 0x0300);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 19cafc2..c0e7000 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -654,7 +654,6 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
/* Trigger an immediate transmit demand. */
dw32(TxPoll, NormalTxPoll);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -671,15 +670,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
u16 hash_table[32];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- netdev_for_each_mc_addr(mclist, dev) {
- int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
set_bit_le(index, hash_table);
}
@@ -700,13 +699,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (u16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (u16 *) ha->addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 09b5719..75a64c8 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1337,7 +1337,7 @@ de4x5_open(struct net_device *dev)
}
lp->interrupt = UNMASK_INTERRUPTS;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
START_DE4X5;
@@ -1507,7 +1507,6 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
- dev->trans_start = jiffies;
if (TX_BUFFS_AVAIL) {
netif_start_queue(dev); /* Another pkt may be queued */
@@ -1884,8 +1883,6 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
if (lp->pktStats.bins[0] == 0) { /* Reset counters */
memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
}
-
- return;
}
/*
@@ -1937,7 +1934,7 @@ set_multicast_list(struct net_device *dev)
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
}
}
@@ -1951,7 +1948,7 @@ static void
SetMulticastFilter(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u_long iobase = dev->base_addr;
int i, bit, byte;
u16 hashcode;
@@ -1966,8 +1963,8 @@ SetMulticastFilter(struct net_device *dev)
if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
@@ -1983,8 +1980,8 @@ SetMulticastFilter(struct net_device *dev)
}
}
} else { /* Perfect filtering */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
for (i=0; i<ETH_ALEN; i++) {
*(pa + (i&1)) = *addrs++;
if (i & 0x01) pa += 4;
@@ -1992,8 +1989,6 @@ SetMulticastFilter(struct net_device *dev)
}
}
outl(omr, DE4X5_OMR);
-
- return;
}
#ifdef CONFIG_EISA
@@ -2188,8 +2183,6 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
return;
}
}
-
- return;
}
/*
@@ -3292,8 +3285,6 @@ de4x5_init_connection(struct net_device *dev)
outl(POLL_DEMAND, DE4X5_TPD);
netif_wake_queue(dev);
-
- return;
}
/*
@@ -3665,8 +3656,6 @@ de4x5_free_rx_buffs(struct net_device *dev)
lp->rx_ring[i].status = 0;
lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
}
-
- return;
}
static void
@@ -3709,8 +3698,6 @@ de4x5_save_skbs(struct net_device *dev)
lp->cache.save_cnt++;
START_DE4X5;
}
-
- return;
}
static void
@@ -3742,8 +3729,6 @@ de4x5_rst_desc_ring(struct net_device *dev)
lp->cache.save_cnt--;
START_DE4X5;
}
-
- return;
}
static void
@@ -3772,8 +3757,6 @@ de4x5_cache_state(struct net_device *dev, int flag)
}
break;
}
-
- return;
}
static void
@@ -3846,8 +3829,6 @@ de4x5_setup_intr(struct net_device *dev)
outl(sts, DE4X5_STS);
ENABLE_IRQs;
}
-
- return;
}
/*
@@ -3880,8 +3861,6 @@ reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
outl(csr13, DE4X5_SICR);
mdelay(10);
-
- return;
}
/*
@@ -3902,8 +3881,6 @@ create_packet(struct net_device *dev, char *frame, int len)
*buf++ = 0; /* Packet length (2 bytes) */
*buf++ = 1;
-
- return;
}
/*
@@ -4007,8 +3984,6 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
}
de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
}
-
- return;
}
/*
@@ -4046,8 +4021,6 @@ enet_addr_rst(u_long aprom_addr)
}
}
}
-
- return;
}
/*
@@ -4187,8 +4160,6 @@ srom_repair(struct net_device *dev, int card)
lp->useSROM = true;
break;
}
-
- return;
}
/*
@@ -4262,8 +4233,6 @@ srom_latch(u_int command, u_long addr)
sendto_srom(command, addr);
sendto_srom(command | DT_CLK, addr);
sendto_srom(command, addr);
-
- return;
}
static void
@@ -4272,8 +4241,6 @@ srom_command(u_int command, u_long addr)
srom_latch(command, addr);
srom_latch(command, addr);
srom_latch((command & 0x0000ff00) | DT_CS, addr);
-
- return;
}
static void
@@ -4288,8 +4255,6 @@ srom_address(u_int command, u_long addr, u_char offset)
udelay(1);
i = (getfrom_srom(addr) >> 3) & 0x01;
-
- return;
}
static short
@@ -4323,8 +4288,6 @@ srom_busy(u_int command, u_long addr)
}
sendto_srom(command & 0x0000ff00, addr);
-
- return;
}
*/
@@ -4333,8 +4296,6 @@ sendto_srom(u_int command, u_long addr)
{
outl(command, addr);
udelay(1);
-
- return;
}
static int
@@ -4433,8 +4394,6 @@ srom_init(struct net_device *dev)
p += ((*p & BLOCK_LEN) + 1);
}
}
-
- return;
}
/*
@@ -4463,8 +4422,6 @@ srom_exec(struct net_device *dev, u_char *p)
outl(lp->cache.csr14, DE4X5_STRR);
outl(lp->cache.csr13, DE4X5_SICR);
}
-
- return;
}
/*
@@ -4889,8 +4846,6 @@ mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
data = mii_swap(data, 16); /* Swap data bit ordering */
mii_wdata(data, 16, ioaddr); /* Write data */
-
- return;
}
static int
@@ -4916,8 +4871,6 @@ mii_wdata(int data, int len, u_long ioaddr)
sendto_mii(MII_MWR | MII_WR, data, ioaddr);
data >>= 1;
}
-
- return;
}
static void
@@ -4930,8 +4883,6 @@ mii_address(u_char addr, u_long ioaddr)
sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
addr >>= 1;
}
-
- return;
}
static void
@@ -4943,8 +4894,6 @@ mii_ta(u_long rw, u_long ioaddr)
} else {
getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
}
-
- return;
}
static int
@@ -4971,8 +4920,6 @@ sendto_mii(u32 command, int data, u_long ioaddr)
udelay(1);
outl(command | MII_MDC | j, ioaddr);
udelay(1);
-
- return;
}
static int
@@ -5077,7 +5024,7 @@ mii_get_phy(struct net_device *dev)
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
lp->mii_cnt++;
lp->active++;
- printk("%s: Using generic MII device control. If the board doesn't operate, \nplease mail the following dump to the author:\n", dev->name);
+ printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
j = de4x5_debug;
de4x5_debug |= DEBUG_MII;
de4x5_dbg_mii(dev, k);
@@ -5186,8 +5133,6 @@ gep_wr(s32 data, struct net_device *dev)
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
}
-
- return;
}
static int
@@ -5247,8 +5192,6 @@ yawn(struct net_device *dev, int state)
break;
}
}
-
- return;
}
static void
@@ -5290,8 +5233,6 @@ de4x5_parse_params(struct net_device *dev)
}
*q = t;
}
-
- return;
}
static void
@@ -5337,12 +5278,10 @@ de4x5_dbg_open(struct net_device *dev)
}
}
printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
- printk("Ring size: \nRX: %d\nTX: %d\n",
+ printk("Ring size:\nRX: %d\nTX: %d\n",
(short)lp->rxRingSize,
(short)lp->txRingSize);
}
-
- return;
}
static void
@@ -5369,8 +5308,6 @@ de4x5_dbg_mii(struct net_device *dev, int k)
printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
}
}
-
- return;
}
static void
@@ -5395,8 +5332,6 @@ de4x5_dbg_media(struct net_device *dev)
}
lp->c_media = lp->media;
}
-
- return;
}
static void
@@ -5417,8 +5352,6 @@ de4x5_dbg_srom(struct de4x5_srom *p)
printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
}
}
-
- return;
}
static void
@@ -5440,8 +5373,6 @@ de4x5_dbg_rx(struct sk_buff *skb, int len)
printk("\n");
}
}
-
- return;
}
/*
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 9568156..29e6c63 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -1118,7 +1118,6 @@ static void dmfe_ethtool_get_wol(struct net_device *dev,
wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
wolinfo->wolopts = db->wol_mode;
- return;
}
@@ -1180,11 +1179,11 @@ static void dmfe_timer(unsigned long data)
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
- time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
+ time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
/* TX Timeout */
- if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
+ if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
dev_warn(&dev->dev, "Tx timeout - resetting\n");
@@ -1453,7 +1452,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
static void dm9132_id_table(struct DEVICE *dev)
{
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u16 * addrptr;
unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
u32 hash_val;
@@ -1477,8 +1476,8 @@ static void dm9132_id_table(struct DEVICE *dev)
hash_table[3] = 0x8000;
/* the multicast address in Hash Table : 64 bits */
- netdev_for_each_mc_addr(mcptr, dev) {
- hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -1496,7 +1495,7 @@ static void dm9132_id_table(struct DEVICE *dev)
static void send_filter_frame(struct DEVICE *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
struct tx_desc *txptr;
u16 * addrptr;
u32 * suptr;
@@ -1519,8 +1518,8 @@ static void send_filter_frame(struct DEVICE *dev)
*suptr++ = 0xffff;
/* fit the multicast address */
- netdev_for_each_mc_addr(mcptr, dev) {
- addrptr = (u16 *) mcptr->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrptr = (u16 *) ha->addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index 68b170a..a0c770e 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -396,8 +396,6 @@ void tulip_select_media(struct net_device *dev, int startup)
tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
mdelay(1);
-
- return;
}
/*
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index 966efa1..a63e64b 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -67,7 +67,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
*/
if (tulip_media_cap[dev->if_port] & MediaIsMII)
return;
- if (! tp->nwayset || time_after(jiffies, dev->trans_start + 1*HZ)) {
+ if (! tp->nwayset || time_after(jiffies, dev_trans_start(dev) + 1*HZ)) {
tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
iowrite32(tp->csr6, ioaddr + CSR6);
iowrite32(0x30, ioaddr + CSR12);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 3810db9..254643e 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev)
out_unlock:
spin_unlock_irqrestore (&tp->lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -707,8 +707,6 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&tp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -991,15 +989,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
u16 hash_table[32];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- netdev_for_each_mc_addr(mclist, dev) {
- int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
set_bit_le(index, hash_table);
}
@@ -1019,13 +1017,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (u16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (u16 *) ha->addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
@@ -1062,7 +1060,7 @@ static void set_rx_mode(struct net_device *dev)
} else if (tp->flags & MC_HASH_ONLY) {
/* Some work-alikes have only a 64-entry hash filter table. */
/* Should verify correctness on big-endian/__powerpc__ */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
if (netdev_mc_count(dev) > 64) {
/* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
@@ -1070,18 +1068,21 @@ static void set_rx_mode(struct net_device *dev)
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (tp->flags & COMET_MAC_ADDR)
- filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ filterbit = ether_crc_le(ETH_ALEN,
+ ha->addr);
else
- filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ filterbit = ether_crc(ETH_ALEN,
+ ha->addr) >> 26;
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
if (tulip_debug > 2)
dev_info(&dev->dev,
"Added filter for %pM %08x bit %d\n",
- mclist->dmi_addr,
- ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ ha->addr,
+ ether_crc(ETH_ALEN, ha->addr),
+ filterbit);
}
if (mc_filter[0] == tp->mc_filter[0] &&
mc_filter[1] == tp->mc_filter[1])
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index a589dd3..96de582 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1040,11 +1040,11 @@ static void uli526x_timer(unsigned long data)
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
- time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
+ time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
outl(0x1, dev->base_addr + DCR1); // Tx polling again
// TX Timeout
- if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
+ if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
printk( "%s: Tx timeout - resetting\n",
@@ -1393,7 +1393,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
static void send_filter_frame(struct net_device *dev, int mc_cnt)
{
struct uli526x_board_info *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
struct tx_desc *txptr;
u16 * addrptr;
u32 * suptr;
@@ -1416,8 +1416,8 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
*suptr++ = 0xffff << FLT_SHIFT;
/* fit the multicast address */
- netdev_for_each_mc_addr(mcptr, dev) {
- addrptr = (u16 *) mcptr->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrptr = (u16 *) ha->addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 98dbf6c..608b279 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -626,7 +626,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
- return;
}
@@ -969,9 +968,8 @@ static void tx_timeout(struct net_device *dev)
enable_irq(dev->irq);
netif_wake_queue(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
- return;
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
@@ -1055,8 +1053,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irq(&np->lock);
- dev->trans_start = jiffies;
-
if (debug > 4) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
dev->name, np->cur_tx, entry);
@@ -1366,13 +1362,15 @@ static u32 __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
- filterbit &= 0x3f;
- mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
+ netdev_for_each_mc_addr(ha, dev) {
+ int filbit;
+
+ filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
+ filbit &= 0x3f;
+ mc_filter[filbit >> 5] |= 1 << (filbit & 31);
}
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
}
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index acfeeb9..a439e93 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -350,9 +350,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
#ifdef DEBUG
print_binary(status);
- printk("tx status 0x%08x 0x%08x \n",
+ printk("tx status 0x%08x 0x%08x\n",
card->tx_buffer[0], card->tx_buffer[4]);
- printk("rx status 0x%08x 0x%08x \n",
+ printk("rx status 0x%08x 0x%08x\n",
card->rx_buffer[0], card->rx_buffer[4]);
#endif
/* Handle shared irq and hotplug */
@@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev)
struct xircom_private *xp = netdev_priv(dev);
int retval;
enter("xircom_open");
- pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n",
+ pr_info("xircom cardbus adaptor found, registering as %s, using irq %i\n",
dev->name, dev->irq);
retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4326520..97b2553 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,6 +109,9 @@ struct tun_struct {
struct tap_filter txflt;
struct socket socket;
+ struct socket_wq wq;
+
+ int vnet_hdr_sz;
#ifdef TUN_DEBUG
int debug;
@@ -323,7 +326,7 @@ static void tun_net_uninit(struct net_device *dev)
/* Inform the methods they need to stop using the dev.
*/
if (tfile) {
- wake_up_all(&tun->socket.wait);
+ wake_up_all(&tun->wq.wait);
if (atomic_dec_and_test(&tfile->count))
__tun_detach(tun);
}
@@ -393,12 +396,11 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Enqueue packet */
skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
- dev->trans_start = jiffies;
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
+ wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
POLLRDNORM | POLLRDBAND);
return NETDEV_TX_OK;
@@ -415,7 +417,6 @@ static void tun_net_mclist(struct net_device *dev)
* _rx_ path and has nothing to do with the _tx_ path.
* In rx path we always accept everything userspace gives us.
*/
- return;
}
#define MIN_MTU 68
@@ -498,7 +499,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
- poll_wait(file, &tun->socket.wait, wait);
+ poll_wait(file, &tun->wq.wait, wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -563,7 +564,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= sizeof(gso)) > count)
+ if ((len -= tun->vnet_hdr_sz) > count)
return -EINVAL;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
@@ -575,7 +576,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
if (gso.hdr_len > len)
return -EINVAL;
- offset += sizeof(gso);
+ offset += tun->vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -718,7 +719,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
if (tun->flags & TUN_VNET_HDR) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
- if ((len -= sizeof(gso)) < 0)
+ if ((len -= tun->vnet_hdr_sz) < 0)
return -EINVAL;
if (skb_is_gso(skb)) {
@@ -749,7 +750,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
sizeof(gso))))
return -EFAULT;
- total += sizeof(gso);
+ total += tun->vnet_hdr_sz;
}
len = min_t(int, skb->len, len);
@@ -773,7 +774,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- add_wait_queue(&tun->socket.wait, &wait);
+ add_wait_queue(&tun->wq.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -804,7 +805,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
}
current->state = TASK_RUNNING;
- remove_wait_queue(&tun->socket.wait, &wait);
+ remove_wait_queue(&tun->wq.wait, &wait);
return ret;
}
@@ -861,6 +862,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
static void tun_sock_write_space(struct sock *sk)
{
struct tun_struct *tun;
+ wait_queue_head_t *wqueue;
if (!sock_writeable(sk))
return;
@@ -868,8 +870,9 @@ static void tun_sock_write_space(struct sock *sk)
if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_sync_poll(wqueue, POLLOUT |
POLLWRNORM | POLLWRBAND);
tun = tun_sk(sk)->tun;
@@ -1033,13 +1036,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->dev = dev;
tun->flags = flags;
tun->txflt.count = 0;
+ tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
err = -ENOMEM;
sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
if (!sk)
goto err_free_dev;
- init_waitqueue_head(&tun->socket.wait);
+ tun->socket.wq = &tun->wq;
+ init_waitqueue_head(&tun->wq.wait);
tun->socket.ops = &tun_socket_ops;
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
@@ -1174,6 +1179,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct sock_fprog fprog;
struct ifreq ifr;
int sndbuf;
+ int vnet_hdr_sz;
int ret;
if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
@@ -1319,6 +1325,25 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun->socket.sk->sk_sndbuf = sndbuf;
break;
+ case TUNGETVNETHDRSZ:
+ vnet_hdr_sz = tun->vnet_hdr_sz;
+ if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
+ ret = -EFAULT;
+ break;
+
+ case TUNSETVNETHDRSZ:
+ if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ tun->vnet_hdr_sz = vnet_hdr_sz;
+ break;
+
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
@@ -1342,7 +1367,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
default:
ret = -EINVAL;
break;
- };
+ }
unlock:
rtnl_unlock();
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 98d818d..22bde49 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -881,8 +881,6 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
wmb();
iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
- dev->trans_start = jiffies;
-
/* If we don't have room to put the worst case packet on the
* queue, then we must stop the queue. We need 2 extra
* descriptors -- one to prevent ring wrap, and one for the
@@ -920,11 +918,11 @@ typhoon_set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
filter |= TYPHOON_RX_FILTER_ALL_MCAST;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
}
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 1b0aef3..932602d 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1999,7 +1999,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
static void ucc_geth_set_multi(struct net_device *dev)
{
struct ucc_geth_private *ugeth;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct ucc_fast __iomem *uf_regs;
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
@@ -2028,16 +2028,16 @@ static void ucc_geth_set_multi(struct net_device *dev)
out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now.
*/
- if (!(dmi->dmi_addr[0] & 1))
+ if (!(ha->addr[0] & 1))
continue;
/* Ask CPM to run CRC and set bit in
* filter mask.
*/
- hw_add_addr_in_hash(ugeth, dmi->dmi_addr);
+ hw_add_addr_in_hash(ugeth, ha->addr);
}
}
}
@@ -3148,8 +3148,6 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set bd status and length */
out_be32((u32 __iomem *)bd, bd_status);
- dev->trans_start = jiffies;
-
/* Move to next BD in the ring */
if (!(bd_status & T_W))
bd += sizeof(struct qe_bd);
@@ -3883,7 +3881,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
}
if (netif_msg_probe(&debug))
- printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
+ printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n",
ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
ug_info->uf_info.irq);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 35f56fc..31b7331 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -224,10 +224,9 @@ static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
cmd, value, index, size);
if (data) {
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (!buf)
goto out;
- memcpy(buf, data, size);
}
err = usb_control_msg(
@@ -322,8 +321,29 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* get the packet length */
size = (u16) (header & 0x0000ffff);
- if ((skb->len) - ((size + 1) & 0xfffe) == 0)
+ if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
+ u8 alignment = (u32)skb->data & 0x3;
+ if (alignment != 0x2) {
+ /*
+ * not 16bit aligned so use the room provided by
+ * the 32 bit header to align the data
+ *
+ * note we want 16bit alignment as MAC header is
+ * 14bytes thus ip header will be aligned on
+ * 32bit boundary so accessing ipheader elements
+ * using a cast to struct ip header wont cause
+ * an unaligned accesses.
+ */
+ u8 realignment = (alignment + 2) & 0x3;
+ memmove(skb->data - realignment,
+ skb->data,
+ size);
+ skb->data -= realignment;
+ skb_set_tail_pointer(skb, size);
+ }
return 2;
+ }
+
if (size > ETH_FRAME_LEN) {
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
@@ -331,7 +351,18 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (ax_skb) {
+ u8 alignment = (u32)packet & 0x3;
ax_skb->len = size;
+
+ if (alignment != 0x2) {
+ /*
+ * not 16bit aligned use the room provided by
+ * the 32 bit header to align the data
+ */
+ u8 realignment = (alignment + 2) & 0x3;
+ memmove(packet - realignment, packet, size);
+ packet -= realignment;
+ }
ax_skb->data = packet;
skb_set_tail_pointer(ax_skb, size);
usbnet_skb_return(dev, ax_skb);
@@ -558,16 +589,14 @@ static void asix_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits =
- ether_crc(ETH_ALEN,
- mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
@@ -794,16 +823,14 @@ static void ax88172_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits =
- ether_crc(ETH_ALEN,
- mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 602e123..97687d3 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -629,7 +629,7 @@ static void catc_multicast(unsigned char *addr, u8 *multicast)
static void catc_set_multicast_list(struct net_device *netdev)
{
struct catc *catc = netdev_priv(netdev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u8 broadcast[6];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
@@ -647,8 +647,8 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
} else {
- netdev_for_each_mc_addr(mc, netdev) {
- u32 crc = ether_crc_le(6, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 crc = ether_crc_le(6, ha->addr);
if (!catc->is_f5u011) {
catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
} else {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3547cf1..b3fe0de 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -64,6 +64,11 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
#endif
+static const u8 mbm_guid[16] = {
+ 0xa3, 0x17, 0xa8, 0x8b, 0x04, 0x5e, 0x4f, 0x01,
+ 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
+};
+
/*
* probes control interface, claims data interface, collects the bulk
* endpoints, activates data interface (if needed), maybe sets MTU.
@@ -79,6 +84,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
int status;
int rndis;
struct usb_driver *driver = driver_of(intf);
+ struct usb_cdc_mdlm_desc *desc = NULL;
+ struct usb_cdc_mdlm_detail_desc *detail = NULL;
if (sizeof dev->data < sizeof *info)
return -EDOM;
@@ -229,6 +236,34 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
* side link address we were given.
*/
break;
+ case USB_CDC_MDLM_TYPE:
+ if (desc) {
+ dev_dbg(&intf->dev, "extra MDLM descriptor\n");
+ goto bad_desc;
+ }
+
+ desc = (void *)buf;
+
+ if (desc->bLength != sizeof(*desc))
+ goto bad_desc;
+
+ if (memcmp(&desc->bGUID, mbm_guid, 16))
+ goto bad_desc;
+ break;
+ case USB_CDC_MDLM_DETAIL_TYPE:
+ if (detail) {
+ dev_dbg(&intf->dev, "extra MDLM detail descriptor\n");
+ goto bad_desc;
+ }
+
+ detail = (void *)buf;
+
+ if (detail->bGuidDescriptorType == 0) {
+ if (detail->bLength < (sizeof(*detail) + 1))
+ goto bad_desc;
+ } else
+ goto bad_desc;
+ break;
}
next_desc:
len -= buf [0]; /* bLength */
@@ -543,80 +578,10 @@ static const struct usb_device_id products [] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
}, {
- /* Ericsson F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3507g ver. 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw ver 3 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3307 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3307 ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson C3607w */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson C3607w ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
+ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&mbm_info,
+
},
{ }, // END
};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 5dfed92..02b622e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -93,10 +93,9 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length);
if (data) {
- buf = kmalloc(length, GFP_KERNEL);
+ buf = kmemdup(data, length, GFP_KERNEL);
if (!buf)
goto out;
- memcpy(buf, data, length);
}
err = usb_control_msg(dev->udev,
@@ -387,10 +386,10 @@ static void dm9601_set_multicast(struct net_device *net)
netdev_mc_count(net) > DM_MAX_MCAST) {
rx_ctl |= 0x04;
} else if (!netdev_mc_empty(net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_list, net) {
- u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
hashes[crc >> 3] |= 1 << (crc & 0x7);
}
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index be0cc99..9964df1 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -834,8 +834,6 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
} else {
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
- /* And tell the kernel when the last transmit started. */
- net->trans_start = jiffies;
}
dev_kfree_skb(skb);
/* we're done */
@@ -1474,7 +1472,6 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
spin_unlock_irqrestore(&serial->serial_lock, flags);
/* done */
- return;
}
/* how many characters in the buffer */
@@ -1994,7 +1991,6 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
hso_kick_transmit(serial);
D1(" ");
- return;
}
/* called for writing diag or CS serial port */
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 418825d..197c352 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -128,17 +128,13 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
if (rx_urb == NULL)
goto free_tx_urb;
- tx_buf = usb_buffer_alloc(iphone->udev,
- IPHETH_BUF_SIZE,
- GFP_KERNEL,
- &tx_urb->transfer_dma);
+ tx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
+ GFP_KERNEL, &tx_urb->transfer_dma);
if (tx_buf == NULL)
goto free_rx_urb;
- rx_buf = usb_buffer_alloc(iphone->udev,
- IPHETH_BUF_SIZE,
- GFP_KERNEL,
- &rx_urb->transfer_dma);
+ rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
+ GFP_KERNEL, &rx_urb->transfer_dma);
if (rx_buf == NULL)
goto free_tx_buf;
@@ -150,8 +146,8 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
return 0;
free_tx_buf:
- usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
- tx_urb->transfer_dma);
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
+ tx_urb->transfer_dma);
free_rx_urb:
usb_free_urb(rx_urb);
free_tx_urb:
@@ -162,10 +158,10 @@ error_nomem:
static void ipheth_free_urbs(struct ipheth_device *iphone)
{
- usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
- iphone->rx_urb->transfer_dma);
- usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
- iphone->tx_urb->transfer_dma);
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
+ iphone->rx_urb->transfer_dma);
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
+ iphone->tx_urb->transfer_dma);
usb_free_urb(iphone->rx_urb);
usb_free_urb(iphone->tx_urb);
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index c4c334d..d6078b8 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -856,7 +856,6 @@ skip:
{
kaweth->stats.tx_packets++;
kaweth->stats.tx_bytes += skb->len;
- net->trans_start = jiffies;
}
spin_unlock_irq(&kaweth->device_lock);
@@ -1156,13 +1155,13 @@ err_fw:
if (!kaweth->irq_urb)
goto err_tx_and_rx;
- kaweth->intbuffer = usb_buffer_alloc( kaweth->dev,
+ kaweth->intbuffer = usb_alloc_coherent( kaweth->dev,
INTBUFFERSIZE,
GFP_KERNEL,
&kaweth->intbufferhandle);
if (!kaweth->intbuffer)
goto err_tx_and_rx_and_irq;
- kaweth->rx_buf = usb_buffer_alloc( kaweth->dev,
+ kaweth->rx_buf = usb_alloc_coherent( kaweth->dev,
KAWETH_BUF_SIZE,
GFP_KERNEL,
&kaweth->rxbufferhandle);
@@ -1203,9 +1202,9 @@ err_fw:
err_intfdata:
usb_set_intfdata(intf, NULL);
- usb_buffer_free(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
+ usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
err_all_but_rxbuf:
- usb_buffer_free(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
+ usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
err_tx_and_rx_and_irq:
usb_free_urb(kaweth->irq_urb);
err_tx_and_rx:
@@ -1242,8 +1241,8 @@ static void kaweth_disconnect(struct usb_interface *intf)
usb_free_urb(kaweth->tx_urb);
usb_free_urb(kaweth->irq_urb);
- usb_buffer_free(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
- usb_buffer_free(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
+ usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
+ usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
free_netdev(netdev);
}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 9f24e3f..a6281e3 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -142,12 +142,10 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *
int ret;
void *buffer;
- buffer = kmalloc(size, GFP_NOIO);
+ buffer = kmemdup(data, size, GFP_NOIO);
if (buffer == NULL)
return -ENOMEM;
- memcpy(buffer, data, size);
-
ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
MCS7830_WR_BMREQ, 0x0000, index, buffer,
size, MCS7830_CTRL_TIMEOUT);
@@ -453,12 +451,12 @@ static void mcs7830_data_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
}
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 4183877..974d17f 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -203,13 +203,12 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
char *buffer;
DECLARE_WAITQUEUE(wait, current);
- buffer = kmalloc(size, GFP_KERNEL);
+ buffer = kmemdup(data, size, GFP_KERNEL);
if (!buffer) {
netif_warn(pegasus, drv, pegasus->net,
"out of memory in %s\n", __func__);
return -ENOMEM;
}
- memcpy(buffer, data, size);
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -255,13 +254,12 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
char *tmp;
DECLARE_WAITQUEUE(wait, current);
- tmp = kmalloc(1, GFP_KERNEL);
+ tmp = kmemdup(&data, 1, GFP_KERNEL);
if (!tmp) {
netif_warn(pegasus, drv, pegasus->net,
"out of memory in %s\n", __func__);
return -ENOMEM;
}
- memcpy(tmp, &data, 1);
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
while (pegasus->flags & ETH_REGS_CHANGED)
@@ -808,7 +806,7 @@ static void write_bulk_callback(struct urb *urb)
break;
}
- net->trans_start = jiffies;
+ net->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(net);
}
@@ -909,7 +907,6 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
} else {
pegasus->stats.tx_packets++;
pegasus->stats.tx_bytes += skb->len;
- net->trans_start = jiffies;
}
dev_kfree_skb(skb);
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index b90d876..29f5211 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -256,7 +256,7 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a,
+PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
DEFAULT_GPIO_RESET)
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index dd8a4ad..28d3ee1 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -104,8 +104,10 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
{
struct cdc_state *info = (void *) &dev->data;
+ struct usb_cdc_notification notification;
int master_ifnum;
int retval;
+ int partial;
unsigned count;
__le32 rsp;
u32 xid = 0, msg_len, request_id;
@@ -133,13 +135,17 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
if (unlikely(retval < 0 || xid == 0))
return retval;
- // FIXME Seems like some devices discard responses when
- // we time out and cancel our "get response" requests...
- // so, this is fragile. Probably need to poll for status.
+ /* Some devices don't respond on the control channel until
+ * polled on the status channel, so do that first. */
+ retval = usb_interrupt_msg(
+ dev->udev,
+ usb_rcvintpipe(dev->udev, dev->status->desc.bEndpointAddress),
+ &notification, sizeof(notification), &partial,
+ RNDIS_CONTROL_TIMEOUT_MS);
+ if (unlikely(retval < 0))
+ return retval;
- /* ignore status endpoint, just poll the control channel;
- * the request probably completed immediately
- */
+ /* Poll the control channel; the request probably completed immediately */
rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
for (count = 0; count < 10; count++) {
memset(buf, 0, CONTROL_BUFFER_SIZE);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 35b98b1..753ee6e 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -445,14 +445,14 @@ static void smsc75xx_set_multicast(struct net_device *netdev)
netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
} else if (!netdev_mc_empty(dev->net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
- netdev_for_each_mc_addr(mc_list, netdev) {
- u32 bitnum = smsc75xx_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 bitnum = smsc75xx_hash(ha->addr);
pdata->multicast_hash_table[bitnum / 32] |=
(1 << (bitnum % 32));
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3135af6..12a3c88 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -385,13 +385,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
pdata->mac_cr |= MAC_CR_MCPAS_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
} else if (!netdev_mc_empty(dev->net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
pdata->mac_cr |= MAC_CR_HPFILT_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- netdev_for_each_mc_addr(mc_list, netdev) {
- u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 bitnum = smsc95xx_hash(ha->addr);
u32 mask = 0x01 << (bitnum & 0x1F);
if (bitnum & 0x20)
hash_hi |= mask;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7177abc..a95c73d 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1069,12 +1069,15 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
* NOTE: strictly conforming cdc-ether devices should expect
* the ZLP here, but ignore the one-byte packet.
*/
- if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
- urb->transfer_buffer_length++;
- if (skb_tailroom(skb)) {
- skb->data[skb->len] = 0;
- __skb_put(skb, 1);
- }
+ if (length % dev->maxpacket == 0) {
+ if (!(info->flags & FLAG_SEND_ZLP)) {
+ urb->transfer_buffer_length++;
+ if (skb_tailroom(skb)) {
+ skb->data[skb->len] = 0;
+ __skb_put(skb, 1);
+ }
+ } else
+ urb->transfer_flags |= URB_ZERO_PACKET;
}
spin_lock_irqsave(&dev->txq.lock, flags);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 388751a..4930f9d 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1209,7 +1209,7 @@ static void rhine_reset_task(struct work_struct *work)
spin_unlock_bh(&rp->lock);
enable_irq(rp->pdev->irq);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -1294,8 +1294,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&rp->lock, flags);
if (debug > 4) {
@@ -1703,11 +1701,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
rx_mode = 0x0C;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index bc278d4..42dffd3 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -719,30 +719,30 @@ static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
u32 status = 0;
u16 ANAR;
- if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
+ if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
status |= VELOCITY_LINK_FAIL;
- if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
- else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
+ else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
status |= (VELOCITY_SPEED_1000);
else {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if (ANAR & ANAR_TXFD)
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if (ANAR & ADVERTISE_100FULL)
status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
- else if (ANAR & ANAR_TX)
+ else if (ANAR & ADVERTISE_100HALF)
status |= VELOCITY_SPEED_100;
- else if (ANAR & ANAR_10FD)
+ else if (ANAR & ADVERTISE_10FULL)
status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
else
status |= (VELOCITY_SPEED_10);
}
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_AUTONEG_ENABLE;
}
}
@@ -801,23 +801,23 @@ static void set_mii_flow_control(struct velocity_info *vptr)
/*Enable or Disable PAUSE in ANAR */
switch (vptr->options.flow_cntl) {
case FLOW_CNTL_TX:
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_RX:
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_TX_RX:
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_DISABLE:
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
default:
break;
@@ -832,10 +832,10 @@ static void set_mii_flow_control(struct velocity_info *vptr)
*/
static void mii_set_auto_on(struct velocity_info *vptr)
{
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
else
- MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
}
static u32 check_connection_type(struct mac_regs __iomem *regs)
@@ -860,11 +860,11 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
else
status |= VELOCITY_SPEED_100;
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_AUTONEG_ENABLE;
}
}
@@ -905,7 +905,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
*/
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
/*
* If connection type is AUTO
@@ -915,9 +915,9 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
/* clear force MAC mode bit */
BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
/* set duplex mode of MAC according to duplex mode of MII */
- MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
- MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
/* enable AUTO-NEGO mode */
mii_set_auto_on(vptr);
@@ -952,31 +952,31 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
}
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
else
BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
- /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
- velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
- ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
+ /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
+ velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
+ ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
if (mii_status & VELOCITY_SPEED_100) {
if (mii_status & VELOCITY_DUPLEX_FULL)
- ANAR |= ANAR_TXFD;
+ ANAR |= ADVERTISE_100FULL;
else
- ANAR |= ANAR_TX;
+ ANAR |= ADVERTISE_100HALF;
} else {
if (mii_status & VELOCITY_DUPLEX_FULL)
- ANAR |= ANAR_10FD;
+ ANAR |= ADVERTISE_10FULL;
else
- ANAR |= ANAR_10;
+ ANAR |= ADVERTISE_10HALF;
}
- velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
+ velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
/* enable AUTO-NEGO mode */
mii_set_auto_on(vptr);
- /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
+ /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
}
/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
@@ -1126,7 +1126,7 @@ static void velocity_set_multi(struct net_device *dev)
struct mac_regs __iomem *regs = vptr->mac_regs;
u8 rx_mode;
int i;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writel(0xffffffff, &regs->MARCAM[0]);
@@ -1142,8 +1142,8 @@ static void velocity_set_multi(struct net_device *dev)
mac_get_cam_mask(regs, vptr->mCAMmask);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- mac_set_cam(regs, i + offset, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_set_cam(regs, i + offset, ha->addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
i++;
}
@@ -1178,36 +1178,36 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
/*
* Reset to hardware default
*/
- MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
* off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue.
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
else
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
/*
* Turn on Link/Activity LED enable bit for CIS8201
*/
- MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
+ MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
break;
case PHYID_VT3216_32BIT:
case PHYID_VT3216_64BIT:
/*
* Reset to hardware default
*/
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
* off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
else
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
break;
case PHYID_MARVELL_1000:
@@ -1219,15 +1219,15 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
/*
* Reset to hardware default
*/
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
break;
default:
;
}
- velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
- if (BMCR & BMCR_ISO) {
- BMCR &= ~BMCR_ISO;
- velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
+ velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
+ if (BMCR & BMCR_ISOLATE) {
+ BMCR &= ~BMCR_ISOLATE;
+ velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
}
}
@@ -2606,7 +2606,6 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&vptr->lock, flags);
out:
return NETDEV_TX_OK;
@@ -2953,13 +2952,13 @@ static int velocity_set_wol(struct velocity_info *vptr)
if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
}
if (vptr->mii_status & VELOCITY_SPEED_1000)
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index ef4a0f6..c381911 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1240,86 +1240,16 @@ struct velocity_context {
u32 pattern[8];
};
-
-/*
- * MII registers.
- */
-
-
/*
* Registers in the MII (offset unit is WORD)
*/
-#define MII_REG_BMCR 0x00 // physical address
-#define MII_REG_BMSR 0x01 //
-#define MII_REG_PHYID1 0x02 // OUI
-#define MII_REG_PHYID2 0x03 // OUI + Module ID + REV ID
-#define MII_REG_ANAR 0x04 //
-#define MII_REG_ANLPAR 0x05 //
-#define MII_REG_G1000CR 0x09 //
-#define MII_REG_G1000SR 0x0A //
-#define MII_REG_MODCFG 0x10 //
-#define MII_REG_TCSR 0x16 //
-#define MII_REG_PLED 0x1B //
-// NS, MYSON only
-#define MII_REG_PCR 0x17 //
-// ESI only
-#define MII_REG_PCSR 0x17 //
-#define MII_REG_AUXCR 0x1C //
-
// Marvell 88E1000/88E1000S
#define MII_REG_PSCR 0x10 // PHY specific control register
//
-// Bits in the BMCR register
-//
-#define BMCR_RESET 0x8000 //
-#define BMCR_LBK 0x4000 //
-#define BMCR_SPEED100 0x2000 //
-#define BMCR_AUTO 0x1000 //
-#define BMCR_PD 0x0800 //
-#define BMCR_ISO 0x0400 //
-#define BMCR_REAUTO 0x0200 //
-#define BMCR_FDX 0x0100 //
-#define BMCR_SPEED1G 0x0040 //
-//
-// Bits in the BMSR register
-//
-#define BMSR_AUTOCM 0x0020 //
-#define BMSR_LNK 0x0004 //
-
-//
-// Bits in the ANAR register
-//
-#define ANAR_ASMDIR 0x0800 // Asymmetric PAUSE support
-#define ANAR_PAUSE 0x0400 // Symmetric PAUSE Support
-#define ANAR_T4 0x0200 //
-#define ANAR_TXFD 0x0100 //
-#define ANAR_TX 0x0080 //
-#define ANAR_10FD 0x0040 //
-#define ANAR_10 0x0020 //
-//
-// Bits in the ANLPAR register
-//
-#define ANLPAR_ASMDIR 0x0800 // Asymmetric PAUSE support
-#define ANLPAR_PAUSE 0x0400 // Symmetric PAUSE Support
-#define ANLPAR_T4 0x0200 //
-#define ANLPAR_TXFD 0x0100 //
-#define ANLPAR_TX 0x0080 //
-#define ANLPAR_10FD 0x0040 //
-#define ANLPAR_10 0x0020 //
-
-//
-// Bits in the G1000CR register
-//
-#define G1000CR_1000FD 0x0200 // PHY is 1000-T Full-duplex capable
-#define G1000CR_1000 0x0100 // PHY is 1000-T Half-duplex capable
-
-//
-// Bits in the G1000SR register
+// Bits in the Silicon revision register
//
-#define G1000SR_1000FD 0x0800 // LP PHY is 1000-T Full-duplex capable
-#define G1000SR_1000 0x0400 // LP PHY is 1000-T Half-duplex capable
#define TCSR_ECHODIS 0x2000 //
#define AUXCR_MDPPS 0x0004 //
@@ -1338,7 +1268,6 @@ struct velocity_context {
#define PHYID_REV_ID_MASK 0x0000000FUL
-#define PHYID_GET_PHY_REV_ID(i) ((i) & PHYID_REV_ID_MASK)
#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
#define MII_REG_BITS_ON(x,i,p) do {\
@@ -1362,8 +1291,8 @@ struct velocity_context {
#define MII_GET_PHY_ID(p) ({\
u32 id;\
- velocity_mii_read((p),MII_REG_PHYID2,(u16 *) &id);\
- velocity_mii_read((p),MII_REG_PHYID1,((u16 *) &id)+1);\
+ velocity_mii_read((p),MII_PHYSID2,(u16 *) &id);\
+ velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\
(id);})
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0577dd..b0a85d0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -40,8 +40,7 @@ module_param(gso, bool, 0444);
#define VIRTNET_SEND_COMMAND_SG_MAX 2
-struct virtnet_info
-{
+struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *rvq, *svq, *cvq;
struct net_device *dev;
@@ -62,6 +61,10 @@ struct virtnet_info
/* Chain pages by the private ptr. */
struct page *pages;
+
+ /* fragments + linear part + virtio header */
+ struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
+ struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
};
struct skb_vnet_hdr {
@@ -324,10 +327,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
- struct scatterlist sg[2];
int err;
- sg_init_table(sg, 2);
skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
if (unlikely(!skb))
return -ENOMEM;
@@ -335,11 +336,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
- sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
- skb_to_sgvec(skb, sg + 1, 0, skb->len);
+ skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
if (err < 0)
dev_kfree_skb(skb);
@@ -348,13 +349,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
{
- struct scatterlist sg[MAX_SKB_FRAGS + 2];
struct page *first, *list = NULL;
char *p;
int i, err, offset;
- sg_init_table(sg, MAX_SKB_FRAGS + 2);
- /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
+ /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
first = get_a_page(vi, gfp);
if (!first) {
@@ -362,7 +361,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
give_pages(vi, list);
return -ENOMEM;
}
- sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
+ sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
/* chain new page in list head to match sg */
first->private = (unsigned long)list;
@@ -376,17 +375,17 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
}
p = page_address(first);
- /* sg[0], sg[1] share the same page */
- /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
- sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
+ /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
+ /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
+ sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
- /* sg[1] for data packet, from offset */
+ /* vi->rx_sg[1] for data packet, from offset */
offset = sizeof(struct padded_vnet_hdr);
- sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
+ sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
/* chain first in list head */
first->private = (unsigned long)list;
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
first);
if (err < 0)
give_pages(vi, first);
@@ -397,16 +396,15 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
{
struct page *page;
- struct scatterlist sg;
int err;
page = get_a_page(vi, gfp);
if (!page)
return -ENOMEM;
- sg_init_one(&sg, page_address(page), PAGE_SIZE);
+ sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
if (err < 0)
give_pages(vi, page);
@@ -515,12 +513,9 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
{
- struct scatterlist sg[2+MAX_SKB_FRAGS];
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
- sg_init_table(sg, 2+MAX_SKB_FRAGS);
-
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -554,12 +549,13 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
+ sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
else
- sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
- hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
- return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
+ hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
+ return vi->svq->vq_ops->add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
+ 0, skb);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -722,7 +718,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
struct scatterlist sg[2];
u8 promisc, allmulti;
struct virtio_net_ctrl_mac *mac_data;
- struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
int uc_count;
int mc_count;
@@ -779,8 +774,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
mac_data->entries = mc_count;
i = 0;
- netdev_for_each_mc_addr(addr, dev)
- memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
@@ -942,6 +937,8 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
vi->pages = NULL;
INIT_DELAYED_WORK(&vi->refill, refill_work);
+ sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
+ sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index cff3485..989b742 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -992,7 +992,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
tq->tx_ring.next2fill);
}
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
@@ -1174,7 +1173,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
netif_receive_skb(skb);
}
- adapter->netdev->last_rx = jiffies;
ctx->skb = NULL;
}
@@ -1371,13 +1369,12 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
- bi = kmalloc(sz, GFP_KERNEL);
+ bi = kzalloc(sz, GFP_KERNEL);
if (!bi) {
printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
adapter->netdev->name);
goto err;
}
- memset(bi, 0, sz);
rq->buf_info[0] = bi;
rq->buf_info[1] = bi + rq->rx_ring[0].size;
@@ -1675,11 +1672,11 @@ vmxnet3_copy_mc(struct net_device *netdev)
/* We may be called with BH disabled */
buf = kmalloc(sz, GFP_ATOMIC);
if (buf) {
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
int i = 0;
- netdev_for_each_mc_addr(mc, netdev)
- memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr,
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(buf + i++ * ETH_ALEN, ha->addr,
ETH_ALEN);
}
}
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index a21a25d..297f0d2 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -183,8 +183,6 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
pci_save_state(hldev->pdev);
-
- return;
}
/*
@@ -342,8 +340,6 @@ void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
hldev->minor_revision =
(u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
-
- return;
}
/*
@@ -357,8 +353,10 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
switch (host_type) {
case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ if (func_id == 0) {
+ access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
+ VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ }
break;
case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
@@ -426,8 +424,6 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
hldev->first_vp_id = i;
break;
}
-
- return;
}
/*
@@ -633,8 +629,10 @@ vxge_hw_device_initialize(
__vxge_hw_device_pci_e_init(hldev);
status = __vxge_hw_device_reg_addr_get(hldev);
- if (status != VXGE_HW_OK)
+ if (status != VXGE_HW_OK) {
+ vfree(hldev);
goto exit;
+ }
__vxge_hw_device_id_get(hldev);
__vxge_hw_device_host_info_get(hldev);
@@ -1213,19 +1211,16 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
/* link this RxD block with previous one */
__vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
}
-
- return;
}
/*
- * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs
+ * __vxge_hw_ring_replenish - Initial replenish of RxDs
* This function replenishes the RxDs from reserve array to work array
*/
enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
{
void *rxd;
- int i = 0;
struct __vxge_hw_channel *channel;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -1246,11 +1241,6 @@ vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
}
vxge_hw_ring_rxd_post(ring, rxd);
- if (min_flag) {
- i++;
- if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION)
- break;
- }
}
status = VXGE_HW_OK;
exit:
@@ -1355,7 +1345,7 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
* Currently we don't have a case when the 1) is done without the 2).
*/
if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring, 1);
+ status = vxge_hw_ring_replenish(ring);
if (status != VXGE_HW_OK) {
__vxge_hw_ring_delete(vp);
goto exit;
@@ -1417,7 +1407,7 @@ enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
goto exit;
if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring, 1);
+ status = vxge_hw_ring_replenish(ring);
if (status != VXGE_HW_OK)
goto exit;
}
@@ -2320,8 +2310,6 @@ __vxge_hw_fifo_mempool_item_alloc(
txdl_priv->first_txdp = txdp;
txdl_priv->next_txdl_priv = NULL;
txdl_priv->alloc_frags = 0;
-
- return;
}
/*
@@ -2578,7 +2566,6 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
writeq(0, &vpath_reg->rts_access_steer_data1);
wmb();
- return;
}
@@ -3486,7 +3473,6 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
writeq(val64, &vp_reg->prc_cfg4);
- return;
}
/*
@@ -3905,7 +3891,6 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
}
}
- return;
}
/*
* __vxge_hw_vpath_initialize
@@ -5039,8 +5024,6 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
if (status == VXGE_HW_OK)
__vxge_hw_blockpool_blocks_remove(blockpool);
}
-
- return;
}
/*
@@ -5096,6 +5079,4 @@ __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
}
__vxge_hw_blockpool_blocks_remove(blockpool);
-
- return;
}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 13f5416..4ae2625 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -765,10 +765,18 @@ struct vxge_hw_device_hw_info {
#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
#define VXGE_HW_VH_NORMAL_FUNCTION 7
u64 function_mode;
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0
-#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1
+#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
#define VXGE_HW_FUNCTION_MODE_SRIOV 2
#define VXGE_HW_FUNCTION_MODE_MRIOV 3
+#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
+#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
+#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
+#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
+
u32 func_id;
u64 vpath_mask;
struct vxge_hw_device_version fw_version;
@@ -1915,20 +1923,32 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
gfp_t flags;
void *vaddr;
unsigned long misaligned = 0;
+ int realloc_flag = 0;
*p_dma_acch = *p_dmah = NULL;
if (in_interrupt())
flags = GFP_ATOMIC | GFP_DMA;
else
flags = GFP_KERNEL | GFP_DMA;
-
- size += VXGE_CACHE_LINE_SIZE;
-
+realloc:
vaddr = kmalloc((size), flags);
if (vaddr == NULL)
return vaddr;
- misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr),
+ misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
VXGE_CACHE_LINE_SIZE);
+ if (realloc_flag)
+ goto out;
+
+ if (misaligned) {
+ /* misaligned, free current one and try allocating
+ * size + VXGE_CACHE_LINE_SIZE memory
+ */
+ kfree((void *) vaddr);
+ size += VXGE_CACHE_LINE_SIZE;
+ realloc_flag = 1;
+ goto realloc;
+ }
+out:
*(unsigned long *)p_dma_acch = misaligned;
vaddr = (void *)((u8 *)vaddr + misaligned);
return vaddr;
@@ -2254,4 +2274,6 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
struct vxge_hw_rth_hash_types *hash_type,
u16 bucket_size);
+enum vxge_hw_status
+__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index aaf374c..cadef85 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -109,7 +109,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
int index, offset;
enum vxge_hw_status status;
u64 reg;
- u8 *reg_space = (u8 *) space;
+ u64 *reg_space = (u64 *) space;
struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
pci_get_drvdata(vdev->pdev);
@@ -129,8 +129,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
__func__, __LINE__);
return;
}
-
- memcpy((reg_space + offset), &reg, 8);
+ *reg_space++ = reg;
}
}
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index ba6d0da..b504bd5 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -445,7 +445,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
ring->ndev->name, __func__, __LINE__);
ring->pkts_processed = 0;
- vxge_hw_ring_replenish(ringh, 0);
+ vxge_hw_ring_replenish(ringh);
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1118,7 +1118,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
*/
static void vxge_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct vxgedev *vdev;
int i, mcast_cnt = 0;
struct __vxge_hw_device *hldev;
@@ -1218,8 +1218,8 @@ static void vxge_set_multicast(struct net_device *dev)
}
/* Add new ones */
- netdev_for_each_mc_addr(mclist, dev) {
- memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
vpath_idx++) {
mac_info.vpath_no = vpath_idx;
@@ -1364,28 +1364,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int msix_id, alarm_msix_id;
- int tim_msix_id[4] = {[0 ...3] = 0};
+ int msix_id = 0;
+ int tim_msix_id[4] = {0, 1, 0, 0};
+ int alarm_msix_id = VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_intr_enable(vpath->handle);
if (vdev->config.intr_type == INTA)
vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
else {
- msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
-
- tim_msix_id[0] = msix_id;
- tim_msix_id[1] = msix_id + 1;
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
alarm_msix_id);
+ msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
/* enable the alarm vector */
- vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id);
+ msix_id = (vpath->handle->vpath->hldev->first_vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
+ vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
}
}
@@ -1406,12 +1404,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
if (vdev->config.intr_type == INTA)
vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
else {
- msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
+ msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
/* disable the alarm vector */
- msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
+ msix_id = (vpath->handle->vpath->hldev->first_vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
}
}
@@ -1765,7 +1764,6 @@ static void vxge_netpoll(struct net_device *dev)
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
- return;
}
#endif
@@ -2224,19 +2222,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
enum vxge_hw_status status;
struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
struct vxgedev *vdev = vpath->vdev;
- int alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
+ int msix_id = (vpath->handle->vpath->vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle,
- alarm_msix_id);
+ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- alarm_msix_id);
+ msix_id);
continue;
}
vxge_debug_intr(VXGE_ERR,
@@ -2249,18 +2246,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
static int vxge_alloc_msix(struct vxgedev *vdev)
{
int j, i, ret = 0;
- int intr_cnt = 0;
- int alarm_msix_id = 0, msix_intr_vect = 0;
+ int msix_intr_vect = 0, temp;
vdev->intr_cnt = 0;
+start:
/* Tx/Rx MSIX Vectors count */
vdev->intr_cnt = vdev->no_of_vpath * 2;
/* Alarm MSIX Vectors count */
vdev->intr_cnt++;
- intr_cnt = (vdev->max_vpath_supported * 2) + 1;
- vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
+ vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
GFP_KERNEL);
if (!vdev->entries) {
vxge_debug_init(VXGE_ERR,
@@ -2269,8 +2265,9 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
return -ENOMEM;
}
- vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry),
- GFP_KERNEL);
+ vdev->vxge_entries =
+ kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
+ GFP_KERNEL);
if (!vdev->vxge_entries) {
vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
VXGE_DRIVER_NAME);
@@ -2278,9 +2275,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
return -ENOMEM;
}
- /* Last vector in the list is used for alarm */
- alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
- for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
+ for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
@@ -2298,47 +2293,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
}
/* Initialize the alarm vector */
- vdev->entries[j].entry = alarm_msix_id;
- vdev->vxge_entries[j].entry = alarm_msix_id;
+ vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
+ vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
vdev->vxge_entries[j].in_use = 0;
- ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
- /* if driver request exceeeds available irq's, request with a small
- * number.
- */
- if (ret > 0) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enable failed for %d vectors, available: %d",
- VXGE_DRIVER_NAME, intr_cnt, ret);
- vdev->max_vpath_supported = vdev->no_of_vpath;
- intr_cnt = (vdev->max_vpath_supported * 2) + 1;
-
- /* Reset the alarm vector setting */
- vdev->entries[j].entry = 0;
- vdev->vxge_entries[j].entry = 0;
-
- /* Initialize the alarm vector with new setting */
- vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
- vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
- vdev->vxge_entries[intr_cnt - 1].in_use = 0;
-
- ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
- if (!ret)
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enabled for %d vectors",
- VXGE_DRIVER_NAME, intr_cnt);
- }
+ ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
- if (ret) {
+ if (ret > 0) {
vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, ret: %d",
- VXGE_DRIVER_NAME, intr_cnt, ret);
+ VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
kfree(vdev->entries);
kfree(vdev->vxge_entries);
vdev->entries = NULL;
vdev->vxge_entries = NULL;
+
+ if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3))
+ return -ENODEV;
+ /* Try with less no of vector by reducing no of vpaths count */
+ temp = (ret - 1)/2;
+ vxge_close_vpaths(vdev, temp);
+ vdev->no_of_vpath = temp;
+ goto start;
+ } else if (ret < 0)
return -ENODEV;
- }
+
return 0;
}
@@ -2346,43 +2325,26 @@ static int vxge_enable_msix(struct vxgedev *vdev)
{
int i, ret = 0;
- enum vxge_hw_status status;
/* 0 - Tx, 1 - Rx */
- int tim_msix_id[4];
- int alarm_msix_id = 0, msix_intr_vect = 0;
+ int tim_msix_id[4] = {0, 1, 0, 0};
+
vdev->intr_cnt = 0;
/* allocate msix vectors */
ret = vxge_alloc_msix(vdev);
if (!ret) {
- /* Last vector in the list is used for alarm */
- alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
for (i = 0; i < vdev->no_of_vpath; i++) {
/* If fifo or ring are not enabled
the MSIX vector for that should be set to 0
Hence initializeing this array to all 0s.
*/
- memset(tim_msix_id, 0, sizeof(tim_msix_id));
- msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
- tim_msix_id[0] = msix_intr_vect;
-
- tim_msix_id[1] = msix_intr_vect + 1;
- vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
+ vdev->vpaths[i].ring.rx_vector_no =
+ (vdev->vpaths[i].device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
- status = vxge_hw_vpath_msix_set(
- vdev->vpaths[i].handle,
- tim_msix_id, alarm_msix_id);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_msix_set "
- "failed with status : %x", status);
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- pci_disable_msix(vdev->pdev);
- return -ENODEV;
- }
+ vxge_hw_vpath_msix_set(vdev->vpaths[i].handle,
+ tim_msix_id, VXGE_ALARM_MSIX_ID);
}
}
@@ -2393,7 +2355,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
{
int intr_cnt;
- for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1);
+ for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
intr_cnt++) {
if (vdev->vxge_entries[intr_cnt].in_use) {
synchronize_irq(vdev->entries[intr_cnt].vector);
@@ -2458,9 +2420,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
switch (msix_idx) {
case 0:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge fn: %d vpath: %d Tx MSI-X: %d",
- vdev->ndev->name, pci_fun, vp_idx,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun, vp_idx);
ret = request_irq(
vdev->entries[intr_cnt].vector,
vxge_tx_msix_handle, 0,
@@ -2472,9 +2435,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
break;
case 1:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge fn: %d vpath: %d Rx MSI-X: %d",
- vdev->ndev->name, pci_fun, vp_idx,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun, vp_idx);
ret = request_irq(
vdev->entries[intr_cnt].vector,
vxge_rx_msix_napi_handle,
@@ -2502,9 +2466,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
if (irq_req) {
/* We requested for this msix interrupt */
vdev->vxge_entries[intr_cnt].in_use = 1;
+ msix_idx += vdev->vpaths[vp_idx].device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_unmask(
vdev->vpaths[vp_idx].handle,
- intr_idx);
+ msix_idx);
intr_cnt++;
}
@@ -2514,16 +2480,17 @@ static int vxge_add_isr(struct vxgedev *vdev)
vp_idx++;
}
- intr_cnt = vdev->max_vpath_supported * 2;
+ intr_cnt = vdev->no_of_vpath * 2;
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge Alarm fn: %d MSI-X: %d",
- vdev->ndev->name, pci_fun,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Alarm - fn:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun);
/* For Alarm interrupts */
ret = request_irq(vdev->entries[intr_cnt].vector,
vxge_alarm_msix_handle, 0,
vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx]);
+ &vdev->vpaths[0]);
if (ret) {
vxge_debug_init(VXGE_ERR,
"%s: MSIX - %d Registration failed",
@@ -2536,16 +2503,19 @@ static int vxge_add_isr(struct vxgedev *vdev)
goto INTA_MODE;
}
+ msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
- intr_idx - 2);
+ msix_idx);
vdev->vxge_entries[intr_cnt].in_use = 1;
- vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx];
+ vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
}
INTA_MODE:
#endif
- snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
if (vdev->config.intr_type == INTA) {
+ snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
+ "%s:vxge:INTA", vdev->ndev->name);
vxge_hw_device_set_intr_type(vdev->devh,
VXGE_HW_INTR_MODE_IRQLINE);
vxge_hw_vpath_tti_ci_set(vdev->devh,
@@ -2844,7 +2814,6 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
for (i = 0; i < vdev->no_of_vpath; i++)
netif_napi_del(&vdev->vpaths[i].ring.napi);
}
- return;
}
int do_vxge_close(struct net_device *dev, int do_io)
@@ -3529,8 +3498,6 @@ static void verify_bandwidth(void)
for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
bw_percentage[i] = bw_percentage[0];
}
-
- return;
}
/*
@@ -3995,6 +3962,36 @@ static void vxge_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
+static inline u32 vxge_get_num_vfs(u64 function_mode)
+{
+ u32 num_functions = 0;
+
+ switch (function_mode) {
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
+ case VXGE_HW_FUNCTION_MODE_SRIOV_8:
+ num_functions = 8;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
+ num_functions = 1;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV:
+ case VXGE_HW_FUNCTION_MODE_MRIOV:
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
+ num_functions = 17;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV_4:
+ num_functions = 4;
+ break;
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
+ num_functions = 2;
+ break;
+ case VXGE_HW_FUNCTION_MODE_MRIOV_8:
+ num_functions = 8; /* TODO */
+ break;
+ }
+ return num_functions;
+}
+
/**
* vxge_probe
* @pdev : structure containing the PCI related information of the device.
@@ -4022,14 +4019,19 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
u8 *macaddr;
struct vxge_mac_addrs *entry;
static int bus = -1, device = -1;
+ u32 host_type;
u8 new_device = 0;
+ enum vxge_hw_status is_privileged;
+ u32 function_mode;
+ u32 num_vfs = 0;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
attr.pdev = pdev;
- if (bus != pdev->bus->number)
- new_device = 1;
- if (device != PCI_SLOT(pdev->devfn))
+ /* In SRIOV-17 mode, functions of the same adapter
+ * can be deployed on different buses */
+ if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
+ (device != PCI_SLOT(pdev->devfn))))
new_device = 1;
bus = pdev->bus->number;
@@ -4046,9 +4048,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
driver_config->total_dev_cnt);
driver_config->config_dev_cnt = 0;
driver_config->total_dev_cnt = 0;
- driver_config->g_no_cpus = 0;
}
-
+ /* Now making the CPU based no of vpath calculation
+ * applicable for individual functions as well.
+ */
+ driver_config->g_no_cpus = 0;
driver_config->vpath_per_dev = max_config_vpath;
driver_config->total_dev_cnt++;
@@ -4161,6 +4165,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s:%d Vpath mask = %llx", __func__, __LINE__,
(unsigned long long)vpath_mask);
+ function_mode = ll_config.device_hw_info.function_mode;
+ host_type = ll_config.device_hw_info.host_type;
+ is_privileged = __vxge_hw_device_is_privilaged(host_type,
+ ll_config.device_hw_info.func_id);
+
/* Check how many vpaths are available */
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!((vpath_mask) & vxge_mBIT(i)))
@@ -4168,14 +4177,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
max_vpath_supported++;
}
+ if (new_device)
+ num_vfs = vxge_get_num_vfs(function_mode) - 1;
+
/* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
- if ((VXGE_HW_FUNCTION_MODE_SRIOV ==
- ll_config.device_hw_info.function_mode) &&
- (max_config_dev > 1) && (pdev->is_physfn)) {
- ret = pci_enable_sriov(pdev, max_config_dev - 1);
- if (ret)
- vxge_debug_ll_config(VXGE_ERR,
- "Failed to enable SRIOV: %d \n", ret);
+ if (is_sriov(function_mode) && (max_config_dev > 1) &&
+ (ll_config.intr_type != INTA) &&
+ (is_privileged == VXGE_HW_OK)) {
+ ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
+ ? (max_config_dev - 1) : num_vfs);
+ if (ret)
+ vxge_debug_ll_config(VXGE_ERR,
+ "Failed in enabling SRIOV mode: %d\n", ret);
}
/*
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 7c83ba4..60276b2 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -31,6 +31,7 @@
#define PCI_DEVICE_ID_TITAN_UNI 0x5833
#define VXGE_USE_DEFAULT 0xffffffff
#define VXGE_HW_VPATH_MSIX_ACTIVE 4
+#define VXGE_ALARM_MSIX_ID 2
#define VXGE_HW_RXSYNC_FREQ_CNT 4
#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
#define VXGE_LL_RX_COPY_THRESHOLD 256
@@ -89,6 +90,11 @@
#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
+#define is_sriov(function_mode) \
+ ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
+ (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
+ (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
+
enum vxge_reset_event {
/* reset events */
VXGE_LL_VPATH_RESET = 0,
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 2c012f4..6cc1dd7 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -231,11 +231,8 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
- 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->set_msix_mask_vect[msix_id%4]);
-
- return;
}
/**
@@ -252,11 +249,8 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
- 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
-
- return;
}
/**
@@ -331,8 +325,6 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
val64 = readq(&hldev->common_reg->titan_general_int_status);
vxge_hw_device_unmask_all(hldev);
-
- return;
}
/**
@@ -364,8 +356,6 @@ void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
vxge_hw_vpath_intr_disable(
VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
}
-
- return;
}
/**
@@ -385,8 +375,6 @@ void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->titan_mask_all_int);
-
- return;
}
/**
@@ -406,8 +394,6 @@ void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->titan_mask_all_int);
-
- return;
}
/**
@@ -649,8 +635,6 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
&hldev->common_reg->tim_int_status1);
}
-
- return;
}
/*
@@ -878,7 +862,7 @@ void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
channel = &ring->channel;
- rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
if (ring->stats->common_stats.usage_cnt > 0)
ring->stats->common_stats.usage_cnt--;
@@ -902,7 +886,7 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
channel = &ring->channel;
wmb();
- rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
vxge_hw_channel_dtr_post(channel, rxdh);
@@ -966,6 +950,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
struct __vxge_hw_channel *channel;
struct vxge_hw_ring_rxd_1 *rxdp;
enum vxge_hw_status status = VXGE_HW_OK;
+ u64 control_0, own;
channel = &ring->channel;
@@ -977,8 +962,12 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
goto exit;
}
+ control_0 = rxdp->control_0;
+ own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
+
/* check whether it is not the end */
- if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
+ if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
0);
@@ -986,8 +975,6 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
++ring->cmpl_cnt;
vxge_hw_channel_dtr_complete(channel);
- *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
-
vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
ring->stats->common_stats.usage_cnt++;
@@ -1035,12 +1022,13 @@ enum vxge_hw_status vxge_hw_ring_handle_tcode(
* such as unknown UPV6 header), Drop it !!!
*/
- if (t_code == 0 || t_code == 5) {
+ if (t_code == VXGE_HW_RING_T_CODE_OK ||
+ t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
status = VXGE_HW_OK;
goto exit;
}
- if (t_code > 0xF) {
+ if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
status = VXGE_HW_ERR_INVALID_TCODE;
goto exit;
}
@@ -2216,29 +2204,24 @@ exit:
* This API will associate a given MSIX vector numbers with the four TIM
* interrupts and alarm interrupt.
*/
-enum vxge_hw_status
+void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
int alarm_msix_id)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath = vp->vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- u32 first_vp_id = vpath->hldev->first_vp_id;
+ u32 vp_id = vp->vpath->vp_id;
val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[0]) |
+ (vp_id * 4) + tim_msix_id[0]) |
VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[1]) |
- VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[2]);
-
- val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[3]);
+ (vp_id * 4) + tim_msix_id[1]);
writeq(val64, &vp_reg->interrupt_cfg0);
writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
- (first_vp_id * 4) + alarm_msix_id),
+ (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
&vp_reg->interrupt_cfg2);
if (vpath->hldev->config.intr_mode ==
@@ -2258,8 +2241,6 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
0, 32), &vp_reg->one_shot_vect3_en);
}
-
- return VXGE_HW_OK;
}
/**
@@ -2279,11 +2260,8 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id / 4)), 0, 32),
+ (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
-
- return;
}
/**
@@ -2305,19 +2283,15 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
if (hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clr_msix_one_shot_vec[msix_id%4]);
} else {
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clear_msix_mask_vect[msix_id%4]);
}
-
- return;
}
/**
@@ -2337,11 +2311,8 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
-
- return;
}
/**
@@ -2358,8 +2329,6 @@ vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
&vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
-
- return;
}
/**
@@ -2398,8 +2367,6 @@ void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
&hldev->common_reg->tim_int_mask1);
}
-
- return;
}
/**
@@ -2436,8 +2403,6 @@ void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
&hldev->common_reg->tim_int_mask1);
}
-
- return;
}
/**
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 861c853..c252f3d 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1866,6 +1866,51 @@ struct vxge_hw_ring_rxd_info {
u32 rth_hash_type;
u32 rth_value;
};
+/**
+ * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
+ * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
+ * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
+ * configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
+ * configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
+ * presentation configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
+ * such as unknown IPv6 header.
+ * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
+ * error, such as FCS or ECC).
+ * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
+ * s) were not appropriately sized and data loss occurred.
+ * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
+ * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
+ * Segment1 exceeded the capacity of Buffer1 and the remainder
+ * was placed in Buffer2. Segment2 now starts in Buffer3.
+ * No data loss or errors occurred.
+ * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
+ * assigned buffers has a size of 0 bytes.
+ * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
+ * VPath Reset or because of a VPIN mismatch.
+ * @VXGE_HW_RING_T_CODE_UNUSED: Unused
+ * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
+ * transfer code condition occurred.
+ *
+ * Transfer codes returned by adapter.
+ */
+enum vxge_hw_ring_tcode {
+ VXGE_HW_RING_T_CODE_OK = 0x0,
+ VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
+ VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
+ VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
+ VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
+ VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
+ VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
+ VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
+ VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
+ VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
+ VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
+ VXGE_HW_RING_T_CODE_UNUSED = 0xE,
+ VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
+};
/**
* enum enum vxge_hw_ring_hash_type - RTH hash types
@@ -1910,7 +1955,7 @@ vxge_hw_ring_rxd_post_post(
void *rxdh);
enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag);
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
void
vxge_hw_ring_rxd_post_post_wmb(
@@ -2042,7 +2087,6 @@ void vxge_hw_fifo_txdl_free(
#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
-#define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64
/*
* struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
@@ -2332,7 +2376,7 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process(
struct __vxge_hw_vpath_handle *vpath_handle,
u32 skip_alarms);
-enum vxge_hw_status
+void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
int *tim_msix_id, int alarm_msix_id);
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 77c2a75..5da7ab1 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "0"
-#define VXGE_VERSION_FIX "6"
-#define VXGE_VERSION_BUILD "18937"
+#define VXGE_VERSION_FIX "8"
+#define VXGE_VERSION_BUILD "20182"
#define VXGE_VERSION_FOR "k"
#endif
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index cd8cb95..cf9e15f 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -634,11 +634,12 @@ static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
}
} else { /* chan->protocol == ETH_P_X25 */
switch (skb->data[0]) {
- case 0: break;
- case 1: /* Connect request */
+ case X25_IFACE_DATA:
+ break;
+ case X25_IFACE_CONNECT:
cycx_x25_chan_connect(dev);
goto free_packet;
- case 2: /* Disconnect request */
+ case X25_IFACE_DISCONNECT:
cycx_x25_chan_disconnect(dev);
goto free_packet;
default:
@@ -1406,7 +1407,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
reset_timer(dev);
if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev, 1);
+ cycx_x25_chan_send_event(dev,
+ X25_IFACE_CONNECT);
break;
case WAN_CONNECTING:
@@ -1424,7 +1426,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
}
if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev, 2);
+ cycx_x25_chan_send_event(dev,
+ X25_IFACE_DISCONNECT);
netif_wake_queue(dev);
break;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index a4859f7..d45b08d 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1175,8 +1175,6 @@ static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
spin_unlock(&dpriv->lock);
#endif
- dev->trans_start = jiffies;
-
if (debug > 2)
dscc4_tx_print(dev, dpriv, "Xmit");
/* To be cleaned(unsigned int)/optimized. Later, ok ? */
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 4dde2ea..a3ea27c 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -658,7 +658,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
- dev->trans_start = jiffies;
port->txin = next_desc(port, port->txin, 1);
sca_outw(desc_offset(port, port->txin, 1),
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index aad9ed4..ea476cb 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -585,7 +585,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
- dev->trans_start = jiffies;
port->txin = (port->txin + 1) % card->tx_ring_buffers;
sca_outl(desc_offset(port, port->txin, 1),
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index c7adbb7..70527e5 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -49,14 +49,14 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
static void x25_connected(struct net_device *dev, int reason)
{
- x25_connect_disconnect(dev, reason, 1);
+ x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
}
static void x25_disconnected(struct net_device *dev, int reason)
{
- x25_connect_disconnect(dev, reason, 2);
+ x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
}
@@ -71,7 +71,7 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
ptr = skb->data;
- *ptr = 0;
+ *ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
@@ -94,13 +94,13 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
/* X.25 to LAPB */
switch (skb->data[0]) {
- case 0: /* Data to be transmitted */
+ case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
- case 1:
+ case X25_IFACE_CONNECT:
if ((result = lapb_connect_request(dev))!= LAPB_OK) {
if (result == LAPB_CONNECTED)
/* Send connect confirm. msg to level 3 */
@@ -112,7 +112,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
}
break;
- case 2:
+ case X25_IFACE_DISCONNECT:
if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
if (result == LAPB_NOTCONNECTED)
/* Send disconnect confirm. msg to level 3 */
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0c2cdde..88e3630 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -891,7 +891,6 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
- dev->trans_start = jiffies;
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 98e2f99..4d4dc38 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -139,7 +139,7 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
ptr = skb->data;
- *ptr = 0x00;
+ *ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
@@ -161,14 +161,14 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
goto drop;
switch (skb->data[0]) {
- case 0x00:
+ case X25_IFACE_DATA:
break;
- case 0x01:
+ case X25_IFACE_CONNECT:
if ((err = lapb_connect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_connect_request "
"error: %d\n", err);
goto drop;
- case 0x02:
+ case X25_IFACE_DISCONNECT:
if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_disconnect_request "
"err: %d\n", err);
@@ -225,7 +225,7 @@ static void lapbeth_connected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x01;
+ *ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
@@ -242,7 +242,7 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x02;
+ *ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index b278503..e2c6f7f 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1506,8 +1506,6 @@ static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
/* send now! */
LMC_CSR_WRITE (sc, csr_txpoll, 0);
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&sc->lmc_lock, flags);
lmc_trace(dev, "lmc_start_xmit_out");
@@ -2103,7 +2101,7 @@ static void lmc_driver_timeout(struct net_device *dev)
printk("%s: Xmitter busy|\n", dev->name);
sc->extra_stats.tx_tbusy_calls++;
- if (jiffies - dev->trans_start < TX_TIMEOUT)
+ if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
goto bug_out;
/*
@@ -2135,7 +2133,7 @@ static void lmc_driver_timeout(struct net_device *dev)
sc->lmc_device->stats.tx_errors++;
sc->extra_stats.tx_ProcTimeout++; /* -baz */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
bug_out:
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 3f744c6..c6aa66e 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -396,7 +396,7 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
u16 next_bd = card->chan[ch].tx_next_bd;
u32 scabase = card->hw.scabase;
- printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd);
+ printk ("\nnfree_tx_bd = %d\n", card->chan[ch].nfree_tx_bd);
printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
first_bd, TX_BD_ADDR(ch, first_bd),
next_bd, TX_BD_ADDR(ch, next_bd));
@@ -1790,7 +1790,7 @@ static void cpc_tx_timeout(struct net_device *dev)
cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
~(CPLD_REG2_FALC_LED1 << (2 * ch)));
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
CPC_UNLOCK(card, flags);
netif_wake_queue(dev);
}
@@ -1849,7 +1849,6 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
if (d->trace_on) {
cpc_trace(dev, skb, 'T');
}
- dev->trans_start = jiffies;
/* Start transmission */
CPC_LOCK(card, flags);
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4917a94..4293889 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -366,7 +366,7 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
int res;
if (!tty || !tty->driver_data ) {
- CPC_TTY_DBG("hdlx-tty: no TTY in close \n");
+ CPC_TTY_DBG("hdlx-tty: no TTY in close\n");
return;
}
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 31c41af..43ae6f4 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1352,7 +1352,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
return(-EINVAL);
if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
- printk(KERN_WARNING "SDLA: io-port 0x%04lx in use \n", dev->base_addr);
+ printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr);
return(-EINVAL);
}
base = map->base_addr;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 541c700..db73a7b 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -298,7 +298,6 @@ static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
desc->stat = PACKET_FULL;
writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
port->card->plx + PLX_DOORBELL_TO_CARD);
- dev->trans_start = jiffies;
port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 80d5c58..166e77d 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -29,12 +29,12 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
-#include <linux/x25.h>
#include <linux/lapb.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/compat.h>
#include <linux/slab.h>
+#include <net/x25device.h>
#include "x25_asy.h"
#include <net/x25device.h>
@@ -315,15 +315,15 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
}
switch (skb->data[0]) {
- case 0x00:
+ case X25_IFACE_DATA:
break;
- case 0x01: /* Connection request .. do nothing */
+ case X25_IFACE_CONNECT: /* Connection request .. do nothing */
err = lapb_connect_request(dev);
if (err != LAPB_OK)
printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
kfree_skb(skb);
return NETDEV_TX_OK;
- case 0x02: /* Disconnect request .. do nothing - hang up ?? */
+ case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */
err = lapb_disconnect_request(dev);
if (err != LAPB_OK)
printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
@@ -411,7 +411,7 @@ static void x25_asy_connected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x01;
+ *ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, sl->dev);
netif_rx(skb);
@@ -430,7 +430,7 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x02;
+ *ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, sl->dev);
netif_rx(skb);
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index d8322d2..746a5ee 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -395,7 +395,6 @@ wd_reset_8390(struct net_device *dev)
outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 6180772..d86e8f3 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -83,6 +83,21 @@
#define D_SUBMODULE control
#include "debug-levels.h"
+static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */
+module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
+MODULE_PARM_DESC(idle_mode_disabled,
+ "If true, the device will not enable idle mode negotiation "
+ "with the base station (when connected) to save power.");
+
+/* 0 (power saving enabled) by default */
+static int i2400m_power_save_disabled;
+module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
+MODULE_PARM_DESC(power_save_disabled,
+ "If true, the driver will not tell the device to enter "
+ "power saving mode when it reports it is ready for it. "
+ "False by default (so the device is told to do power "
+ "saving).");
+
int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
MODULE_PARM_DESC(passive_mode,
@@ -346,7 +361,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
i2400m_state);
i2400m_reset(i2400m, I2400M_RT_WARM);
break;
- };
+ }
d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
i2400m, ss, i2400m_state);
}
@@ -395,7 +410,7 @@ void i2400m_report_tlv_media_status(struct i2400m *i2400m,
default:
dev_err(dev, "HW BUG? unknown media status %u\n",
status);
- };
+ }
d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
i2400m, ms, status);
}
@@ -524,7 +539,7 @@ void i2400m_report_hook(struct i2400m *i2400m,
}
}
break;
- };
+ }
d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n",
i2400m, l3l4_hdr, size);
}
@@ -567,8 +582,7 @@ void i2400m_msg_ack_hook(struct i2400m *i2400m,
size);
}
break;
- };
- return;
+ }
}
@@ -740,7 +754,7 @@ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
break;
default:
ack_timeout = HZ;
- };
+ }
if (unlikely(i2400m->trace_msg_from_user))
wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
@@ -1419,5 +1433,4 @@ void i2400m_dev_shutdown(struct i2400m *i2400m)
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
- return;
}
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 94dc83c..9c8b78d 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -75,25 +75,6 @@
#include "debug-levels.h"
-int i2400m_idle_mode_disabled; /* 0 (idle mode enabled) by default */
-module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
-MODULE_PARM_DESC(idle_mode_disabled,
- "If true, the device will not enable idle mode negotiation "
- "with the base station (when connected) to save power.");
-
-int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
-module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
-MODULE_PARM_DESC(rx_reorder_disabled,
- "If true, RX reordering will be disabled.");
-
-int i2400m_power_save_disabled; /* 0 (power saving enabled) by default */
-module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
-MODULE_PARM_DESC(power_save_disabled,
- "If true, the driver will not tell the device to enter "
- "power saving mode when it reports it is ready for it. "
- "False by default (so the device is told to do power "
- "saving).");
-
static char i2400m_debug_params[128];
module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params),
0644);
@@ -395,6 +376,16 @@ retry:
result = i2400m_dev_initialize(i2400m);
if (result < 0)
goto error_dev_initialize;
+
+ /* We don't want any additional unwanted error recovery triggered
+ * from any other context so if anything went wrong before we come
+ * here, let's keep i2400m->error_recovery untouched and leave it to
+ * dev_reset_handle(). See dev_reset_handle(). */
+
+ atomic_dec(&i2400m->error_recovery);
+ /* Every thing works so far, ok, now we are ready to
+ * take error recovery if it's required. */
+
/* At this point, reports will come for the device and set it
* to the right state if it is different than UNINITIALIZED */
d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
@@ -403,10 +394,10 @@ retry:
error_dev_initialize:
error_check_mac_addr:
+error_fw_check:
i2400m->ready = 0;
wmb(); /* see i2400m->ready's documentation */
flush_workqueue(i2400m->work_queue);
-error_fw_check:
if (i2400m->bus_dev_stop)
i2400m->bus_dev_stop(i2400m);
error_bus_dev_start:
@@ -436,7 +427,8 @@ int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
result = __i2400m_dev_start(i2400m, bm_flags);
if (result >= 0) {
i2400m->updown = 1;
- wmb(); /* see i2400m->updown's documentation */
+ i2400m->alive = 1;
+ wmb();/* see i2400m->updown and i2400m->alive's doc */
}
}
mutex_unlock(&i2400m->init_mutex);
@@ -497,7 +489,8 @@ void i2400m_dev_stop(struct i2400m *i2400m)
if (i2400m->updown) {
__i2400m_dev_stop(i2400m);
i2400m->updown = 0;
- wmb(); /* see i2400m->updown's documentation */
+ i2400m->alive = 0;
+ wmb(); /* see i2400m->updown and i2400m->alive's doc */
}
mutex_unlock(&i2400m->init_mutex);
}
@@ -617,12 +610,12 @@ int i2400m_post_reset(struct i2400m *i2400m)
error_dev_start:
if (i2400m->bus_release)
i2400m->bus_release(i2400m);
-error_bus_setup:
/* even if the device was up, it could not be recovered, so we
* mark it as down. */
i2400m->updown = 0;
wmb(); /* see i2400m->updown's documentation */
mutex_unlock(&i2400m->init_mutex);
+error_bus_setup:
d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
return result;
}
@@ -669,6 +662,9 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
+ i2400m->boot_mode = 1;
+ wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
+
result = 0;
if (mutex_trylock(&i2400m->init_mutex) == 0) {
/* We are still in i2400m_dev_start() [let it fail] or
@@ -679,39 +675,68 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
complete(&i2400m->msg_completion);
goto out;
}
- if (i2400m->updown == 0) {
- dev_info(dev, "%s: device is down, doing nothing\n", reason);
- goto out_unlock;
- }
+
dev_err(dev, "%s: reinitializing driver\n", reason);
- __i2400m_dev_stop(i2400m);
- result = __i2400m_dev_start(i2400m,
- I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
- if (result < 0) {
+ rmb();
+ if (i2400m->updown) {
+ __i2400m_dev_stop(i2400m);
i2400m->updown = 0;
wmb(); /* see i2400m->updown's documentation */
- dev_err(dev, "%s: cannot start the device: %d\n",
- reason, result);
- result = -EUCLEAN;
}
-out_unlock:
+
+ if (i2400m->alive) {
+ result = __i2400m_dev_start(i2400m,
+ I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
+ if (result < 0) {
+ dev_err(dev, "%s: cannot start the device: %d\n",
+ reason, result);
+ result = -EUCLEAN;
+ if (atomic_read(&i2400m->bus_reset_retries)
+ >= I2400M_BUS_RESET_RETRIES) {
+ result = -ENODEV;
+ dev_err(dev, "tried too many times to "
+ "reset the device, giving up\n");
+ }
+ }
+ }
+
if (i2400m->reset_ctx) {
ctx->result = result;
complete(&ctx->completion);
}
mutex_unlock(&i2400m->init_mutex);
if (result == -EUCLEAN) {
+ /*
+ * We come here because the reset during operational mode
+ * wasn't successully done and need to proceed to a bus
+ * reset. For the dev_reset_handle() to be able to handle
+ * the reset event later properly, we restore boot_mode back
+ * to the state before previous reset. ie: just like we are
+ * issuing the bus reset for the first time
+ */
+ i2400m->boot_mode = 0;
+ wmb();
+
+ atomic_inc(&i2400m->bus_reset_retries);
/* ops, need to clean up [w/ init_mutex not held] */
result = i2400m_reset(i2400m, I2400M_RT_BUS);
if (result >= 0)
result = -ENODEV;
+ } else {
+ rmb();
+ if (i2400m->alive) {
+ /* great, we expect the device state up and
+ * dev_start() actually brings the device state up */
+ i2400m->updown = 1;
+ wmb();
+ atomic_set(&i2400m->bus_reset_retries, 0);
+ }
}
out:
i2400m_put(i2400m);
kfree(iw);
d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
ws, i2400m, reason);
- return;
}
@@ -729,14 +754,72 @@ out:
*/
int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
{
- i2400m->boot_mode = 1;
- wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
GFP_ATOMIC, &reason, sizeof(reason));
}
EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
+ /*
+ * The actual work of error recovery.
+ *
+ * The current implementation of error recovery is to trigger a bus reset.
+ */
+static
+void __i2400m_error_recovery(struct work_struct *ws)
+{
+ struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
+ struct i2400m *i2400m = iw->i2400m;
+
+ i2400m_reset(i2400m, I2400M_RT_BUS);
+
+ i2400m_put(i2400m);
+ kfree(iw);
+ return;
+}
+
+/*
+ * Schedule a work struct for error recovery.
+ *
+ * The intention of error recovery is to bring back the device to some
+ * known state whenever TX sees -110 (-ETIMEOUT) on copying the data to
+ * the device. The TX failure could mean a device bus stuck, so the current
+ * error recovery implementation is to trigger a bus reset to the device
+ * and hopefully it can bring back the device.
+ *
+ * The actual work of error recovery has to be in a thread context because
+ * it is kicked off in the TX thread (i2400ms->tx_workqueue) which is to be
+ * destroyed by the error recovery mechanism (currently a bus reset).
+ *
+ * Also, there may be already a queue of TX works that all hit
+ * the -ETIMEOUT error condition because the device is stuck already.
+ * Since bus reset is used as the error recovery mechanism and we don't
+ * want consecutive bus resets simply because the multiple TX works
+ * in the queue all hit the same device erratum, the flag "error_recovery"
+ * is introduced for preventing unwanted consecutive bus resets.
+ *
+ * Error recovery shall only be invoked again if previous one was completed.
+ * The flag error_recovery is set when error recovery mechanism is scheduled,
+ * and is checked when we need to schedule another error recovery. If it is
+ * in place already, then we shouldn't schedule another one.
+ */
+void i2400m_error_recovery(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+
+ if (atomic_add_return(1, &i2400m->error_recovery) == 1) {
+ if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
+ GFP_ATOMIC, NULL, 0) < 0) {
+ dev_err(dev, "run out of memory for "
+ "scheduling an error recovery ?\n");
+ atomic_dec(&i2400m->error_recovery);
+ }
+ } else
+ atomic_dec(&i2400m->error_recovery);
+ return;
+}
+EXPORT_SYMBOL_GPL(i2400m_error_recovery);
+
/*
* Alloc the command and ack buffers for boot mode
*
@@ -803,6 +886,13 @@ void i2400m_init(struct i2400m *i2400m)
mutex_init(&i2400m->init_mutex);
/* wake_tx_ws is initialized in i2400m_tx_setup() */
+ atomic_set(&i2400m->bus_reset_retries, 0);
+
+ i2400m->alive = 0;
+
+ /* initialize error_recovery to 1 for denoting we
+ * are not yet ready to take any error recovery */
+ atomic_set(&i2400m->error_recovery, 1);
}
EXPORT_SYMBOL_GPL(i2400m_init);
@@ -996,7 +1086,6 @@ void __exit i2400m_driver_exit(void)
/* for scheds i2400m_dev_reset_handle() */
flush_scheduled_work();
i2400m_barker_db_exit();
- return;
}
module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index b9c4bed..360d4fb 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -99,7 +99,10 @@ enum {
*
* @tx_workqueue: workqeueue used for data TX; we don't use the
* system's workqueue as that might cause deadlocks with code in
- * the bus-generic driver.
+ * the bus-generic driver. The read/write operation to the queue
+ * is protected with spinlock (tx_lock in struct i2400m) to avoid
+ * the queue being destroyed in the middle of a the queue read/write
+ * operation.
*
* @debugfs_dentry: dentry for the SDIO specific debugfs files
*
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 820b128..fa74777 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -160,6 +160,16 @@
#include <linux/wimax/i2400m.h>
#include <asm/byteorder.h>
+enum {
+/* netdev interface */
+ /*
+ * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
+ *
+ * The MTU is 1400 or less
+ */
+ I2400M_MAX_MTU = 1400,
+};
+
/* Misc constants */
enum {
/* Size of the Boot Mode Command buffer */
@@ -167,6 +177,11 @@ enum {
I2400M_BM_ACK_BUF_SIZE = 256,
};
+enum {
+ /* Maximum number of bus reset can be retried */
+ I2400M_BUS_RESET_RETRIES = 3,
+};
+
/**
* struct i2400m_poke_table - Hardware poke table for the Intel 2400m
*
@@ -227,6 +242,11 @@ struct i2400m_barker_db;
* so we have a tx_blk_size variable that the bus layer sets to
* tell the engine how much of that we need.
*
+ * @bus_tx_room_min: [fill] Minimum room required while allocating
+ * TX queue's buffer space for message header. SDIO requires
+ * 224 bytes and USB 16 bytes. Refer bus specific driver code
+ * for details.
+ *
* @bus_pl_size_max: [fill] Maximum payload size.
*
* @bus_setup: [optional fill] Function called by the bus-generic code
@@ -397,7 +417,7 @@ struct i2400m_barker_db;
*
* @tx_size_max: biggest TX message sent.
*
- * @rx_lock: spinlock to protect RX members
+ * @rx_lock: spinlock to protect RX members and rx_roq_refcount.
*
* @rx_pl_num: total number of payloads received
*
@@ -421,6 +441,10 @@ struct i2400m_barker_db;
* delivered. Then the driver can release them to the host. See
* drivers/net/i2400m/rx.c for details.
*
+ * @rx_roq_refcount: refcount rx_roq. This refcounts any access to
+ * rx_roq thus preventing rx_roq being destroyed when rx_roq
+ * is being accessed. rx_roq_refcount is protected by rx_lock.
+ *
* @rx_reports: reports received from the device that couldn't be
* processed because the driver wasn't still ready; when ready,
* they are pulled from here and chewed.
@@ -507,6 +531,38 @@ struct i2400m_barker_db;
* same.
*
* @pm_notifier: used to register for PM events
+ *
+ * @bus_reset_retries: counter for the number of bus resets attempted for
+ * this boot. It's not for tracking the number of bus resets during
+ * the whole driver life cycle (from insmod to rmmod) but for the
+ * number of dev_start() executed until dev_start() returns a success
+ * (ie: a good boot means a dev_stop() followed by a successful
+ * dev_start()). dev_reset_handler() increments this counter whenever
+ * it is triggering a bus reset. It checks this counter to decide if a
+ * subsequent bus reset should be retried. dev_reset_handler() retries
+ * the bus reset until dev_start() succeeds or the counter reaches
+ * I2400M_BUS_RESET_RETRIES. The counter is cleared to 0 in
+ * dev_reset_handle() when dev_start() returns a success,
+ * ie: a successul boot is completed.
+ *
+ * @alive: flag to denote if the device *should* be alive. This flag is
+ * everything like @updown (see doc for @updown) except reflecting
+ * the device state *we expect* rather than the actual state as denoted
+ * by @updown. It is set 1 whenever @updown is set 1 in dev_start().
+ * Then the device is expected to be alive all the time
+ * (i2400m->alive remains 1) until the driver is removed. Therefore
+ * all the device reboot events detected can be still handled properly
+ * by either dev_reset_handle() or .pre_reset/.post_reset as long as
+ * the driver presents. It is set 0 along with @updown in dev_stop().
+ *
+ * @error_recovery: flag to denote if we are ready to take an error recovery.
+ * 0 for ready to take an error recovery; 1 for not ready. It is
+ * initialized to 1 while probe() since we don't tend to take any error
+ * recovery during probe(). It is decremented by 1 whenever dev_start()
+ * succeeds to indicate we are ready to take error recovery from now on.
+ * It is checked every time we wanna schedule an error recovery. If an
+ * error recovery is already in place (error_recovery was set 1), we
+ * should not schedule another one until the last one is done.
*/
struct i2400m {
struct wimax_dev wimax_dev; /* FIRST! See doc */
@@ -522,6 +578,7 @@ struct i2400m {
wait_queue_head_t state_wq; /* Woken up when on state updates */
size_t bus_tx_block_size;
+ size_t bus_tx_room_min;
size_t bus_pl_size_max;
unsigned bus_bm_retries;
@@ -550,10 +607,12 @@ struct i2400m {
tx_num, tx_size_acc, tx_size_min, tx_size_max;
/* RX stuff */
- spinlock_t rx_lock; /* protect RX state */
+ /* protect RX state and rx_roq_refcount */
+ spinlock_t rx_lock;
unsigned rx_pl_num, rx_pl_max, rx_pl_min,
rx_num, rx_size_acc, rx_size_min, rx_size_max;
- struct i2400m_roq *rx_roq; /* not under rx_lock! */
+ struct i2400m_roq *rx_roq; /* access is refcounted */
+ struct kref rx_roq_refcount; /* refcount access to rx_roq */
u8 src_mac_addr[ETH_HLEN];
struct list_head rx_reports; /* under rx_lock! */
struct work_struct rx_report_ws;
@@ -581,6 +640,16 @@ struct i2400m {
struct i2400m_barker_db *barker;
struct notifier_block pm_notifier;
+
+ /* counting bus reset retries in this boot */
+ atomic_t bus_reset_retries;
+
+ /* if the device is expected to be alive */
+ unsigned alive;
+
+ /* 0 if we are ready for error recovery; 1 if not ready */
+ atomic_t error_recovery;
+
};
@@ -803,6 +872,7 @@ void i2400m_put(struct i2400m *i2400m)
extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
extern int i2400m_pre_reset(struct i2400m *);
extern int i2400m_post_reset(struct i2400m *);
+extern void i2400m_error_recovery(struct i2400m *);
/*
* _setup()/_release() are called by the probe/disconnect functions of
@@ -815,7 +885,6 @@ extern int i2400m_rx(struct i2400m *, struct sk_buff *);
extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
extern void i2400m_tx_msg_sent(struct i2400m *);
-extern int i2400m_power_save_disabled;
/*
* Utility functions
@@ -922,10 +991,5 @@ extern int i2400m_barker_db_init(const char *);
extern void i2400m_barker_db_exit(void);
-/* Module parameters */
-
-extern int i2400m_idle_mode_disabled;
-extern int i2400m_rx_reorder_disabled;
-
#endif /* #ifndef __I2400M_H__ */
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index b811c2f..94742e1 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -84,17 +84,15 @@
enum {
/* netdev interface */
- /*
- * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
- *
- * The MTU is 1400 or less
- */
- I2400M_MAX_MTU = 1400,
/* 20 secs? yep, this is the maximum timeout that the device
* might take to get out of IDLE / negotiate it with the base
* station. We add 1sec for good measure. */
I2400M_TX_TIMEOUT = 21 * HZ,
- I2400M_TX_QLEN = 5,
+ /*
+ * Experimentation has determined that, 20 to be a good value
+ * for minimizing the jitter in the throughput.
+ */
+ I2400M_TX_QLEN = 20,
};
@@ -255,7 +253,6 @@ void i2400m_net_wake_stop(struct i2400m *i2400m)
kfree_skb(wake_tx_skb);
}
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
- return;
}
@@ -434,7 +431,6 @@ void i2400m_tx_timeout(struct net_device *net_dev)
* this, there might be data pending to be sent or not...
*/
net_dev->stats.tx_errors++;
- return;
}
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index fa2e11e..6537593 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -155,6 +155,11 @@
#define D_SUBMODULE rx
#include "debug-levels.h"
+static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
+module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
+MODULE_PARM_DESC(rx_reorder_disabled,
+ "If true, RX reordering will be disabled.");
+
struct i2400m_report_hook_args {
struct sk_buff *skb_rx;
const struct i2400m_l3l4_hdr *l3l4_hdr;
@@ -300,20 +305,18 @@ void i2400m_rx_ctl_ack(struct i2400m *i2400m,
d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
goto error_waiter_cancelled;
}
- if (ack_skb == NULL) {
+ if (IS_ERR(ack_skb))
dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
- i2400m->ack_skb = ERR_PTR(-ENOMEM);
- } else
- i2400m->ack_skb = ack_skb;
+ i2400m->ack_skb = ack_skb;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
complete(&i2400m->msg_completion);
return;
error_waiter_cancelled:
- kfree_skb(ack_skb);
+ if (!IS_ERR(ack_skb))
+ kfree_skb(ack_skb);
error_no_waiter:
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
- return;
}
@@ -718,7 +721,6 @@ void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
out:
d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
i2400m, roq, skb, sn, nsn);
- return;
}
@@ -743,12 +745,12 @@ unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
unsigned new_nws, nsn_itr;
new_nws = __i2400m_roq_nsn(roq, sn);
- if (unlikely(new_nws >= 1024) && d_test(1)) {
- dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n",
- new_nws, sn, roq->ws);
- WARN_ON(1);
- i2400m_roq_log_dump(i2400m, roq);
- }
+ /*
+ * For type 2(update_window_start) rx messages, there is no
+ * need to check if the normalized sequence number is greater 1023.
+ * Simply insert and deliver all packets to the host up to the
+ * window start.
+ */
skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
@@ -798,7 +800,6 @@ void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
}
roq->ws = 0;
d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
- return;
}
@@ -837,7 +838,6 @@ void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
}
d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
i2400m, roq, skb, lbn);
- return;
}
@@ -863,7 +863,6 @@ void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
old_ws, len, sn, nsn, roq->ws);
d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
- return;
}
@@ -890,33 +889,52 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
i2400m, roq, skb, sn);
len = skb_queue_len(&roq->queue);
nsn = __i2400m_roq_nsn(roq, sn);
+ /*
+ * For type 3(queue_update_window_start) rx messages, there is no
+ * need to check if the normalized sequence number is greater 1023.
+ * Simply insert and deliver all packets to the host up to the
+ * window start.
+ */
old_ws = roq->ws;
- if (unlikely(nsn >= 1024)) {
- dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n",
- nsn, sn, roq->ws);
- i2400m_roq_log_dump(i2400m, roq);
- i2400m_reset(i2400m, I2400M_RT_WARM);
- } else {
- /* if the queue is empty, don't bother as we'd queue
- * it and inmediately unqueue it -- just deliver it */
- if (len == 0) {
- struct i2400m_roq_data *roq_data;
- roq_data = (struct i2400m_roq_data *) &skb->cb;
- i2400m_net_erx(i2400m, skb, roq_data->cs);
- }
- else
- __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
- __i2400m_roq_update_ws(i2400m, roq, sn + 1);
- i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
- old_ws, len, sn, nsn, roq->ws);
- }
+ /* If the queue is empty, don't bother as we'd queue
+ * it and immediately unqueue it -- just deliver it.
+ */
+ if (len == 0) {
+ struct i2400m_roq_data *roq_data;
+ roq_data = (struct i2400m_roq_data *) &skb->cb;
+ i2400m_net_erx(i2400m, skb, roq_data->cs);
+ } else
+ __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
+
+ __i2400m_roq_update_ws(i2400m, roq, sn + 1);
+ i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
+ old_ws, len, sn, nsn, roq->ws);
+
d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
i2400m, roq, skb, sn);
- return;
}
/*
+ * This routine destroys the memory allocated for rx_roq, when no
+ * other thread is accessing it. Access to rx_roq is refcounted by
+ * rx_roq_refcount, hence memory allocated must be destroyed when
+ * rx_roq_refcount becomes zero. This routine gets executed when
+ * rx_roq_refcount becomes zero.
+ */
+void i2400m_rx_roq_destroy(struct kref *ref)
+{
+ unsigned itr;
+ struct i2400m *i2400m
+ = container_of(ref, struct i2400m, rx_roq_refcount);
+ for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
+ __skb_queue_purge(&i2400m->rx_roq[itr].queue);
+ kfree(i2400m->rx_roq[0].log);
+ kfree(i2400m->rx_roq);
+ i2400m->rx_roq = NULL;
+}
+
+/*
* Receive and send up an extended data packet
*
* @i2400m: device descriptor
@@ -969,6 +987,7 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
unsigned ro_needed, ro_type, ro_cin, ro_sn;
struct i2400m_roq *roq;
struct i2400m_roq_data *roq_data;
+ unsigned long flags;
BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
@@ -1007,7 +1026,16 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
roq = &i2400m->rx_roq[ro_cin];
+ if (roq == NULL) {
+ kfree_skb(skb); /* rx_roq is already destroyed */
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ goto error;
+ }
+ kref_get(&i2400m->rx_roq_refcount);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+
roq_data = (struct i2400m_roq_data *) &skb->cb;
roq_data->sn = ro_sn;
roq_data->cs = cs;
@@ -1034,6 +1062,10 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
default:
dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
}
+
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
}
else
i2400m_net_erx(i2400m, skb, cs);
@@ -1041,7 +1073,6 @@ error_skb_clone:
error:
d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
"size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
- return;
}
@@ -1344,6 +1375,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
__i2400m_roq_init(&i2400m->rx_roq[itr]);
i2400m->rx_roq[itr].log = &rd[itr];
}
+ kref_init(&i2400m->rx_roq_refcount);
}
return 0;
@@ -1357,12 +1389,12 @@ error_roq_alloc:
/* Tear down the RX queue and infrastructure */
void i2400m_rx_release(struct i2400m *i2400m)
{
+ unsigned long flags;
+
if (i2400m->rx_reorder) {
- unsigned itr;
- for(itr = 0; itr < I2400M_RO_CIN + 1; itr++)
- __skb_queue_purge(&i2400m->rx_roq[itr].queue);
- kfree(i2400m->rx_roq[0].log);
- kfree(i2400m->rx_roq);
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
}
/* at this point, nothing can be received... */
i2400m_report_hook_flush(i2400m);
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index d619da3..8b809c2 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -197,7 +197,6 @@ error_alloc_skb:
error_get_size:
error_bad_size:
d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
- return;
}
@@ -229,7 +228,6 @@ void i2400ms_irq(struct sdio_func *func)
i2400ms_rx(i2400ms);
error_no_irq:
d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
- return;
}
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
index de66d06..b53cd1c 100644
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ b/drivers/net/wimax/i2400m/sdio-tx.c
@@ -98,6 +98,10 @@ void i2400ms_tx_submit(struct work_struct *ws)
tx_msg_size, result);
}
+ if (result == -ETIMEDOUT) {
+ i2400m_error_recovery(i2400m);
+ break;
+ }
d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
}
@@ -114,13 +118,17 @@ void i2400ms_bus_tx_kick(struct i2400m *i2400m)
{
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
struct device *dev = &i2400ms->func->dev;
+ unsigned long flags;
d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
/* schedule tx work, this is because tx may block, therefore
* it has to run in a thread context.
*/
- queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ if (i2400ms->tx_workqueue != NULL)
+ queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
@@ -130,27 +138,40 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms)
int result;
struct device *dev = &i2400ms->func->dev;
struct i2400m *i2400m = &i2400ms->i2400m;
+ struct workqueue_struct *tx_workqueue;
+ unsigned long flags;
d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
"%s-tx", i2400m->wimax_dev.name);
- i2400ms->tx_workqueue =
+ tx_workqueue =
create_singlethread_workqueue(i2400ms->tx_wq_name);
- if (NULL == i2400ms->tx_workqueue) {
+ if (tx_workqueue == NULL) {
dev_err(dev, "TX: failed to create workqueue\n");
result = -ENOMEM;
} else
result = 0;
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400ms->tx_workqueue = tx_workqueue;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
return result;
}
void i2400ms_tx_release(struct i2400ms *i2400ms)
{
- if (i2400ms->tx_workqueue) {
- destroy_workqueue(i2400ms->tx_workqueue);
- i2400ms->tx_workqueue = NULL;
- }
+ struct i2400m *i2400m = &i2400ms->i2400m;
+ struct workqueue_struct *tx_workqueue;
+ unsigned long flags;
+
+ tx_workqueue = i2400ms->tx_workqueue;
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400ms->tx_workqueue = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+ if (tx_workqueue)
+ destroy_workqueue(tx_workqueue);
}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 7632f80..9bfc26e 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -483,6 +483,13 @@ int i2400ms_probe(struct sdio_func *func,
sdio_set_drvdata(func, i2400ms);
i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
+ /*
+ * Room required in the TX queue for SDIO message to accommodate
+ * a smallest payload while allocating header space is 224 bytes,
+ * which is the smallest message size(the block size 256 bytes)
+ * minus the smallest message header size(32 bytes).
+ */
+ i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2;
i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
i2400m->bus_setup = i2400ms_bus_setup;
i2400m->bus_dev_start = i2400ms_bus_dev_start;
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index b0cb906..3f819ef 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -258,8 +258,10 @@ enum {
* Doc says maximum transaction is 16KiB. If we had 16KiB en
* route and 16KiB being queued, it boils down to needing
* 32KiB.
+ * 32KiB is insufficient for 1400 MTU, hence increasing
+ * tx buffer size to 64KiB.
*/
- I2400M_TX_BUF_SIZE = 32768,
+ I2400M_TX_BUF_SIZE = 65536,
/**
* Message header and payload descriptors have to be 16
* aligned (16 + 4 * N = 16 * M). If we take that average sent
@@ -270,10 +272,21 @@ enum {
* at the end there are less, we pad up to the nearest
* multiple of 16.
*/
- I2400M_TX_PLD_MAX = 12,
+ /*
+ * According to Intel Wimax i3200, i5x50 and i6x50 specification
+ * documents, the maximum number of payloads per message can be
+ * up to 60. Increasing the number of payloads to 60 per message
+ * helps to accommodate smaller payloads in a single transaction.
+ */
+ I2400M_TX_PLD_MAX = 60,
I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
+ I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
I2400M_TX_SKIP = 0x80000000,
+ /*
+ * According to Intel Wimax i3200, i5x50 and i6x50 specification
+ * documents, the maximum size of each message can be up to 16KiB.
+ */
+ I2400M_TX_MSG_SIZE = 16384,
};
#define TAIL_FULL ((void *)~(unsigned long)NULL)
@@ -328,6 +341,14 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
* @padding: ensure that there is at least this many bytes of free
* contiguous space in the fifo. This is needed because later on
* we might need to add padding.
+ * @try_head: specify either to allocate head room or tail room space
+ * in the TX FIFO. This boolean is required to avoids a system hang
+ * due to an infinite loop caused by i2400m_tx_fifo_push().
+ * The caller must always try to allocate tail room space first by
+ * calling this routine with try_head = 0. In case if there
+ * is not enough tail room space but there is enough head room space,
+ * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head
+ * room space, by calling this routine again with try_head = 1.
*
* Returns:
*
@@ -359,6 +380,48 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
* fail and return TAIL_FULL and let the caller figure out if we wants to
* skip the tail room and try to allocate from the head.
*
+ * There is a corner case, wherein i2400m_tx_new() can get into
+ * an infinite loop calling i2400m_tx_fifo_push().
+ * In certain situations, tx_in would have reached on the top of TX FIFO
+ * and i2400m_tx_tail_room() returns 0, as described below:
+ *
+ * N ___________ tail room is zero
+ * |<- IN ->|
+ * | |
+ * | |
+ * | |
+ * | data |
+ * |<- OUT ->|
+ * | |
+ * | |
+ * | head room |
+ * 0 -----------
+ * During such a time, where tail room is zero in the TX FIFO and if there
+ * is a request to add a payload to TX FIFO, which calls:
+ * i2400m_tx()
+ * ->calls i2400m_tx_close()
+ * ->calls i2400m_tx_skip_tail()
+ * goto try_new;
+ * ->calls i2400m_tx_new()
+ * |----> [try_head:]
+ * infinite loop | ->calls i2400m_tx_fifo_push()
+ * | if (tail_room < needed)
+ * | if (head_room => needed)
+ * | return TAIL_FULL;
+ * |<---- goto try_head;
+ *
+ * i2400m_tx() calls i2400m_tx_close() to close the message, since there
+ * is no tail room to accommodate the payload and calls
+ * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls
+ * i2400m_tx_new() to allocate space for new message header calling
+ * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space
+ * to accommodate the message header, but there is enough head space.
+ * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push()
+ * ending up in a loop causing system freeze.
+ *
+ * This corner case is avoided by using a try_head boolean,
+ * as an argument to i2400m_tx_fifo_push().
+ *
* Note:
*
* Assumes i2400m->tx_lock is taken, and we use that as a barrier
@@ -367,7 +430,8 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
* pop data off the queue
*/
static
-void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
+void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size,
+ size_t padding, bool try_head)
{
struct device *dev = i2400m_dev(i2400m);
size_t room, tail_room, needed_size;
@@ -382,9 +446,21 @@ void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
}
/* Is there space at the tail? */
tail_room = __i2400m_tx_tail_room(i2400m);
- if (tail_room < needed_size) {
- if (i2400m->tx_out % I2400M_TX_BUF_SIZE
- < i2400m->tx_in % I2400M_TX_BUF_SIZE) {
+ if (!try_head && tail_room < needed_size) {
+ /*
+ * If the tail room space is not enough to push the message
+ * in the TX FIFO, then there are two possibilities:
+ * 1. There is enough head room space to accommodate
+ * this message in the TX FIFO.
+ * 2. There is not enough space in the head room and
+ * in tail room of the TX FIFO to accommodate the message.
+ * In the case (1), return TAIL_FULL so that the caller
+ * can figure out, if the caller wants to push the message
+ * into the head room space.
+ * In the case (2), return NULL, indicating that the TX FIFO
+ * cannot accommodate the message.
+ */
+ if (room - tail_room >= needed_size) {
d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
size, padding);
return TAIL_FULL; /* There might be head space */
@@ -485,14 +561,25 @@ void i2400m_tx_new(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_msg_hdr *tx_msg;
+ bool try_head = 0;
BUG_ON(i2400m->tx_msg != NULL);
+ /*
+ * In certain situations, TX queue might have enough space to
+ * accommodate the new message header I2400M_TX_PLD_SIZE, but
+ * might not have enough space to accommodate the payloads.
+ * Adding bus_tx_room_min padding while allocating a new TX message
+ * increases the possibilities of including at least one payload of the
+ * size <= bus_tx_room_min.
+ */
try_head:
- tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 0);
+ tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE,
+ i2400m->bus_tx_room_min, try_head);
if (tx_msg == NULL)
goto out;
else if (tx_msg == TAIL_FULL) {
i2400m_tx_skip_tail(i2400m);
d_printf(2, dev, "new TX message: tail full, trying head\n");
+ try_head = 1;
goto try_head;
}
memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -566,7 +653,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
padding = aligned_size - tx_msg_moved->size;
if (padding > 0) {
- pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0);
+ pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
if (unlikely(WARN_ON(pad_buf == NULL
|| pad_buf == TAIL_FULL))) {
/* This should not happen -- append should verify
@@ -632,6 +719,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
unsigned long flags;
size_t padded_len;
void *ptr;
+ bool try_head = 0;
unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
|| pl_type == I2400M_PT_RESET_COLD;
@@ -643,9 +731,11 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
* current one is out of payload slots or we have a singleton,
* close it and start a new one */
spin_lock_irqsave(&i2400m->tx_lock, flags);
- result = -ESHUTDOWN;
- if (i2400m->tx_buf == NULL)
+ /* If tx_buf is NULL, device is shutdown */
+ if (i2400m->tx_buf == NULL) {
+ result = -ESHUTDOWN;
goto error_tx_new;
+ }
try_new:
if (unlikely(i2400m->tx_msg == NULL))
i2400m_tx_new(i2400m);
@@ -659,7 +749,13 @@ try_new:
}
if (i2400m->tx_msg == NULL)
goto error_tx_new;
- if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) {
+ /*
+ * Check if this skb will fit in the TX queue's current active
+ * TX message. The total message size must not exceed the maximum
+ * size of each message I2400M_TX_MSG_SIZE. If it exceeds,
+ * close the current message and push this skb into the new message.
+ */
+ if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) {
d_printf(2, dev, "TX: message too big, going new\n");
i2400m_tx_close(i2400m);
i2400m_tx_new(i2400m);
@@ -669,11 +765,12 @@ try_new:
/* So we have a current message header; now append space for
* the message -- if there is not enough, try the head */
ptr = i2400m_tx_fifo_push(i2400m, padded_len,
- i2400m->bus_tx_block_size);
+ i2400m->bus_tx_block_size, try_head);
if (ptr == TAIL_FULL) { /* Tail is full, try head */
d_printf(2, dev, "pl append: tail full\n");
i2400m_tx_close(i2400m);
i2400m_tx_skip_tail(i2400m);
+ try_head = 1;
goto try_new;
} else if (ptr == NULL) { /* All full */
result = -ENOSPC;
@@ -689,7 +786,7 @@ try_new:
pl_type, buf_len);
tx_msg->num_pls = le16_to_cpu(num_pls+1);
tx_msg->size += padded_len;
- d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n",
+ d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
padded_len, tx_msg->size, num_pls+1);
d_printf(2, dev,
"TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
@@ -860,25 +957,43 @@ EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
* i2400m_tx_setup - Initialize the TX queue and infrastructure
*
* Make sure we reset the TX sequence to zero, as when this function
- * is called, the firmware has been just restarted.
+ * is called, the firmware has been just restarted. Same rational
+ * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since
+ * the memory for TX queue is reallocated.
*/
int i2400m_tx_setup(struct i2400m *i2400m)
{
- int result;
+ int result = 0;
+ void *tx_buf;
+ unsigned long flags;
/* Do this here only once -- can't do on
* i2400m_hard_start_xmit() as we'll cause race conditions if
* the WS was scheduled on another CPU */
INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
- i2400m->tx_sequence = 0;
- i2400m->tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_KERNEL);
- if (i2400m->tx_buf == NULL)
+ tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC);
+ if (tx_buf == NULL) {
result = -ENOMEM;
- else
- result = 0;
+ goto error_kmalloc;
+ }
+
+ /*
+ * Fail the build if we can't fit at least two maximum size messages
+ * on the TX FIFO [one being delivered while one is constructed].
+ */
+ BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400m->tx_sequence = 0;
+ i2400m->tx_in = 0;
+ i2400m->tx_out = 0;
+ i2400m->tx_msg_size = 0;
+ i2400m->tx_msg = NULL;
+ i2400m->tx_buf = tx_buf;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
/* Huh? the bus layer has to define this... */
BUG_ON(i2400m->bus_tx_block_size == 0);
+error_kmalloc:
return result;
}
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index 7b6a1d9..d44b545 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -178,7 +178,6 @@ error_submit:
out:
d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
urb, urb->status, urb->actual_length);
- return;
}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index d8c4d64..0d5081d 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -82,6 +82,8 @@ MODULE_PARM_DESC(debug,
/* Our firmware file name */
static const char *i2400mu_bus_fw_names_5x50[] = {
+#define I2400MU_FW_FILE_NAME_v1_5 "i2400m-fw-usb-1.5.sbcf"
+ I2400MU_FW_FILE_NAME_v1_5,
#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
I2400MU_FW_FILE_NAME_v1_4,
NULL,
@@ -467,6 +469,13 @@ int i2400mu_probe(struct usb_interface *iface,
usb_set_intfdata(iface, i2400mu);
i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
+ /*
+ * Room required in the Tx queue for USB message to accommodate
+ * a smallest payload while allocating header space is 16 bytes.
+ * Adding this room for the new tx message increases the
+ * possibilities of including any payload with size <= 16 bytes.
+ */
+ i2400m->bus_tx_room_min = I2400MU_BLK_SIZE;
i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
i2400m->bus_setup = NULL;
i2400m->bus_dev_start = i2400mu_bus_dev_start;
@@ -505,7 +514,7 @@ int i2400mu_probe(struct usb_interface *iface,
iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
device_init_wakeup(dev, 1);
usb_dev->autosuspend_delay = 15 * HZ;
- usb_dev->autosuspend_disabled = 0;
+ usb_enable_autosuspend(usb_dev);
#endif
result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT);
@@ -778,4 +787,5 @@ MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
"(5x50 & 6050)");
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4);
+MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_5);
+MODULE_FIRMWARE(I6050U_FW_FILE_NAME_v1_5);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 5889436..174e344 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -5,6 +5,7 @@
menuconfig WLAN
bool "Wireless LAN"
depends on !S390
+ depends on NET
select WIRELESS
default y
---help---
@@ -38,6 +39,12 @@ config LIBERTAS_THINFIRM
---help---
A library for Marvell Libertas 8xxx devices using thinfirm.
+config LIBERTAS_THINFIRM_DEBUG
+ bool "Enable full debugging output in the Libertas thin firmware module."
+ depends on LIBERTAS_THINFIRM
+ ---help---
+ Debugging support.
+
config LIBERTAS_THINFIRM_USB
tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
depends on LIBERTAS_THINFIRM && USB
@@ -210,90 +217,7 @@ config USB_NET_RNDIS_WLAN
If you choose to build a module, it'll be called rndis_wlan.
-config RTL8180
- tristate "Realtek 8180/8185 PCI support"
- depends on MAC80211 && PCI && EXPERIMENTAL
- select EEPROM_93CX6
- ---help---
- This is a driver for RTL8180 and RTL8185 based cards.
- These are PCI based chips found in cards such as:
-
- (RTL8185 802.11g)
- A-Link WL54PC
-
- (RTL8180 802.11b)
- Belkin F5D6020 v3
- Belkin F5D6020 v3
- Dlink DWL-610
- Dlink DWL-510
- Netgear MA521
- Level-One WPC-0101
- Acer Aspire 1357 LMi
- VCTnet PC-11B1
- Ovislink AirLive WL-1120PCM
- Mentor WL-PCI
- Linksys WPC11 v4
- TrendNET TEW-288PI
- D-Link DWL-520 Rev D
- Repotec RP-WP7126
- TP-Link TL-WN250/251
- Zonet ZEW1000
- Longshine LCS-8031-R
- HomeLine HLW-PCC200
- GigaFast WF721-AEX
- Planet WL-3553
- Encore ENLWI-PCI1-NT
- TrendNET TEW-266PC
- Gigabyte GN-WLMR101
- Siemens-fujitsu Amilo D1840W
- Edimax EW-7126
- PheeNet WL-11PCIR
- Tonze PC-2100T
- Planet WL-8303
- Dlink DWL-650 v M1
- Edimax EW-7106
- Q-Tec 770WC
- Topcom Skyr@cer 4011b
- Roper FreeLan 802.11b (edition 2004)
- Wistron Neweb Corp CB-200B
- Pentagram HorNET
- QTec 775WC
- TwinMOS Booming B Series
- Micronet SP906BB
- Sweex LC700010
- Surecom EP-9428
- Safecom SWLCR-1100
-
- Thanks to Realtek for their support!
-
-config RTL8187
- tristate "Realtek 8187 and 8187B USB support"
- depends on MAC80211 && USB
- select EEPROM_93CX6
- ---help---
- This is a driver for RTL8187 and RTL8187B based cards.
- These are USB based chips found in devices such as:
-
- Netgear WG111v2
- Level 1 WNC-0301USB
- Micronet SP907GK V5
- Encore ENUWI-G2
- Trendnet TEW-424UB
- ASUS P5B Deluxe/P5K Premium motherboards
- Toshiba Satellite Pro series of laptops
- Asus Wireless Link
- Linksys WUSB54GC-EU v2
- (v1 = rt73usb; v3 is rt2070-based,
- use staging/rt3070 or try rt2800usb)
-
- Thanks to Realtek for their support!
-
-# If possible, automatically enable LEDs for RTL8187.
-
-config RTL8187_LEDS
- bool
- depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
- default y
+source "drivers/net/wireless/rtl818x/Kconfig"
config ADM8211
tristate "ADMtek ADM8211 support"
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index ab61d2b..880ad9d 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1318,21 +1318,19 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
}
static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
- unsigned int bit_nr, i;
+ unsigned int bit_nr;
u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
bit_nr &= 0x3F;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- mclist = mclist->next;
}
return mc_filter[0] | ((u64)(mc_filter[1]) << 32);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index dc5018a..a441aad 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2876,7 +2876,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0;
ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0;
- airo_print_info(dev->name, "Firmware version %x.%x.%02x",
+ airo_print_info(dev->name, "Firmware version %x.%x.%02d",
((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF),
(le16_to_cpu(cap_rid.softVer) & 0xFF),
le16_to_cpu(cap_rid.softSubVer));
@@ -3193,19 +3193,26 @@ static void airo_print_status(const char *devname, u16 status)
{
u8 reason = status & 0xFF;
- switch (status) {
+ switch (status & 0xFF00) {
case STAT_NOBEACON:
- airo_print_dbg(devname, "link lost (missed beacons)");
- break;
- case STAT_MAXRETRIES:
- case STAT_MAXARL:
- airo_print_dbg(devname, "link lost (max retries)");
- break;
- case STAT_FORCELOSS:
- airo_print_dbg(devname, "link lost (local choice)");
- break;
- case STAT_TSFSYNC:
- airo_print_dbg(devname, "link lost (TSF sync lost)");
+ switch (status) {
+ case STAT_NOBEACON:
+ airo_print_dbg(devname, "link lost (missed beacons)");
+ break;
+ case STAT_MAXRETRIES:
+ case STAT_MAXARL:
+ airo_print_dbg(devname, "link lost (max retries)");
+ break;
+ case STAT_FORCELOSS:
+ airo_print_dbg(devname, "link lost (local choice)");
+ break;
+ case STAT_TSFSYNC:
+ airo_print_dbg(devname, "link lost (TSF sync lost)");
+ break;
+ default:
+ airo_print_dbg(devname, "unknow status %x\n", status);
+ break;
+ }
break;
case STAT_DEAUTH:
airo_print_dbg(devname, "deauthenticated (reason: %d)", reason);
@@ -3221,7 +3228,11 @@ static void airo_print_status(const char *devname, u16 status)
airo_print_dbg(devname, "authentication failed (reason: %d)",
reason);
break;
+ case STAT_ASSOC:
+ case STAT_REASSOC:
+ break;
default:
+ airo_print_dbg(devname, "unknow status %x\n", status);
break;
}
}
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 0fb4199..8a2d4af 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1223,7 +1223,6 @@ static void at76_rx_callback(struct urb *urb)
priv->rx_tasklet.data = (unsigned long)urb;
tasklet_schedule(&priv->rx_tasklet);
- return;
}
static int at76_submit_rx_urb(struct at76_priv *priv)
@@ -1889,6 +1888,7 @@ static void at76_dwork_hw_scan(struct work_struct *work)
}
static int at76_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct at76_priv *priv = hw->priv;
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 4e7a7fd..0a75be0 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -3,7 +3,7 @@ menuconfig ATH_COMMON
depends on CFG80211
---help---
This will enable the support for the Atheros wireless drivers.
- ath5k, ath9k and ar9170 drivers share some common code, this option
+ ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option
enables the common ath.ko module which shares common helpers.
For more information and documentation on this module you can visit:
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index dc662b7..4f845f8 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,41 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
bool has_plcp;
};
-#define AR9170_NUM_TID 16
-#define WME_BA_BMP_SIZE 64
-#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
-
-#define WME_AC_BE 2
-#define WME_AC_BK 3
-#define WME_AC_VI 1
-#define WME_AC_VO 0
-
-#define TID_TO_WME_AC(_tid) \
- ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
- (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
- (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
- WME_AC_VO)
-
-#define BAW_WITHIN(_start, _bawsz, _seqno) \
- ((((_seqno) - (_start)) & 0xfff) < (_bawsz))
-
-enum ar9170_tid_state {
- AR9170_TID_STATE_INVALID,
- AR9170_TID_STATE_SHUTDOWN,
- AR9170_TID_STATE_PROGRESS,
- AR9170_TID_STATE_COMPLETE,
-};
-
-struct ar9170_sta_tid {
- struct list_head list;
- struct sk_buff_head queue;
- u8 addr[ETH_ALEN];
- u16 ssn;
- u16 tid;
- enum ar9170_tid_state state;
- bool active;
-};
-
struct ar9170_tx_queue_stats {
unsigned int len;
unsigned int limit;
@@ -152,14 +117,11 @@ struct ar9170_tx_queue_stats {
#define AR9170_QUEUE_TIMEOUT 64
#define AR9170_TX_TIMEOUT 8
-#define AR9170_BA_TIMEOUT 4
#define AR9170_JANITOR_DELAY 128
#define AR9170_TX_INVALID_RATE 0xffffffff
-#define AR9170_NUM_TX_STATUS 128
-#define AR9170_NUM_TX_AGG_MAX 30
-#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
-#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
+#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
+#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
struct ar9170 {
struct ieee80211_hw *hw;
@@ -234,11 +196,6 @@ struct ar9170 {
struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
struct delayed_work tx_janitor;
- /* tx ampdu */
- struct sk_buff_head tx_status_ampdu;
- spinlock_t tx_ampdu_list_lock;
- struct list_head tx_ampdu_list;
- atomic_t tx_ampdu_pending;
/* rxstream mpdu merge */
struct ar9170_rxstream_mpdu_merge rx_mpdu;
@@ -250,11 +207,6 @@ struct ar9170 {
u8 global_ampdu_factor;
};
-struct ar9170_sta_info {
- struct ar9170_sta_tid agg[AR9170_NUM_TID];
- unsigned int ampdu_max_len;
-};
-
struct ar9170_tx_info {
unsigned long timeout;
};
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
index 826c45e..ec8134b4 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ b/drivers/net/wireless/ath/ar9170/cmd.h
@@ -79,7 +79,7 @@ __regwrite_out : \
if (__nreg) { \
if (IS_ACCEPTING_CMD(__ar)) \
__err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
- 8 * __nreg, \
+ 8 * __nreg, \
(u8 *) &__ar->cmdbuf[1], \
0, NULL); \
__nreg = 0; \
diff --git a/drivers/net/wireless/ath/ar9170/eeprom.h b/drivers/net/wireless/ath/ar9170/eeprom.h
index d2c8cc8..6c46638 100644
--- a/drivers/net/wireless/ath/ar9170/eeprom.h
+++ b/drivers/net/wireless/ath/ar9170/eeprom.h
@@ -127,8 +127,8 @@ struct ar9170_eeprom {
__le16 checksum;
__le16 version;
u8 operating_flags;
-#define AR9170_OPFLAG_5GHZ 1
-#define AR9170_OPFLAG_2GHZ 2
+#define AR9170_OPFLAG_5GHZ 1
+#define AR9170_OPFLAG_2GHZ 2
u8 misc;
__le16 reg_domain[2];
u8 mac_address[6];
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 0a1d4c2..06f1f3c 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -425,5 +425,6 @@ enum ar9170_txq {
#define AR9170_TXQ_DEPTH 32
#define AR9170_TX_MAX_PENDING 128
+#define AR9170_RX_STREAM_MAX_SIZE 65535
#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c536929..2abc875 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -50,10 +50,6 @@ static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-static int modparam_ht;
-module_param_named(ht, modparam_ht, bool, S_IRUGO);
-MODULE_PARM_DESC(ht, "enable MPDU aggregation.");
-
#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
.bitrate = (_bitrate), \
.flags = (_flags), \
@@ -182,7 +178,6 @@ static struct ieee80211_supported_band ar9170_band_5GHz = {
};
static void ar9170_tx(struct ar9170 *ar);
-static bool ar9170_tx_ampdu(struct ar9170 *ar);
static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
{
@@ -195,21 +190,7 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
return ar9170_get_seq_h((void *) txc->frame_data);
}
-static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
-{
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
-}
-
-static inline u16 ar9170_get_tid(struct sk_buff *skb)
-{
- struct ar9170_tx_control *txc = (void *) skb->data;
- return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
-}
-
-#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
-#define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb)))
-
-#if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG)
+#ifdef AR9170_QUEUE_DEBUG
static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
@@ -236,7 +217,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
skb_queue_walk(queue, skb) {
- printk(KERN_DEBUG "index:%d => \n", i++);
+ printk(KERN_DEBUG "index:%d =>\n", i++);
ar9170_print_txheader(ar, skb);
}
if (i != skb_queue_len(queue))
@@ -244,7 +225,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
"mismatch %d != %d\n", skb_queue_len(queue), i);
printk(KERN_DEBUG "---[ end ]---\n");
}
-#endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */
+#endif /* AR9170_QUEUE_DEBUG */
#ifdef AR9170_QUEUE_DEBUG
static void ar9170_dump_txqueue(struct ar9170 *ar,
@@ -275,20 +256,6 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
}
#endif /* AR9170_QUEUE_STOP_DEBUG */
-#ifdef AR9170_TXAGG_DEBUG
-static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
- printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n",
- wiphy_name(ar->hw->wiphy));
- __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
- spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
-}
-
-#endif /* AR9170_TXAGG_DEBUG */
-
/* caller must guarantee exclusive access for _bin_ queue. */
static void ar9170_recycle_expired(struct ar9170 *ar,
struct sk_buff_head *queue,
@@ -308,7 +275,7 @@ static void ar9170_recycle_expired(struct ar9170 *ar,
if (time_is_before_jiffies(arinfo->timeout)) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
- "recycle \n", wiphy_name(ar->hw->wiphy),
+ "recycle\n", wiphy_name(ar->hw->wiphy),
jiffies, arinfo->timeout);
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
@@ -360,70 +327,6 @@ static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
ieee80211_tx_status_irqsafe(ar->hw, skb);
}
-static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
-{
- struct sk_buff_head success;
- struct sk_buff *skb;
- unsigned int i;
- unsigned long queue_bitmap = 0;
-
- skb_queue_head_init(&success);
-
- while (skb_queue_len(&ar->tx_status_ampdu) > AR9170_NUM_TX_STATUS)
- __skb_queue_tail(&success, skb_dequeue(&ar->tx_status_ampdu));
-
- ar9170_recycle_expired(ar, &ar->tx_status_ampdu, &success);
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: collected %d A-MPDU frames.\n",
- wiphy_name(ar->hw->wiphy), skb_queue_len(&success));
- __ar9170_dump_txqueue(ar, &success);
-#endif /* AR9170_TXAGG_DEBUG */
-
- while ((skb = __skb_dequeue(&success))) {
- struct ieee80211_tx_info *txinfo;
-
- queue_bitmap |= BIT(skb_get_queue_mapping(skb));
-
- txinfo = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(txinfo);
-
- txinfo->flags |= IEEE80211_TX_STAT_ACK;
- txinfo->status.rates[0].count = 1;
-
- skb_pull(skb, sizeof(struct ar9170_tx_control));
- ieee80211_tx_status_irqsafe(ar->hw, skb);
- }
-
- for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
-#ifdef AR9170_QUEUE_STOP_DEBUG
- printk(KERN_DEBUG "%s: wake queue %d\n",
- wiphy_name(ar->hw->wiphy), i);
- __ar9170_dump_txstats(ar);
-#endif /* AR9170_QUEUE_STOP_DEBUG */
- ieee80211_wake_queue(ar->hw, i);
- }
-
- if (queue_bitmap)
- ar9170_tx(ar);
-}
-
-static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
- struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
-
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_BA_TIMEOUT);
-
- skb_queue_tail(&ar->tx_status_ampdu, skb);
- ar9170_tx_fake_ampdu_status(ar);
-
- if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
- !list_empty(&ar->tx_ampdu_list))
- ar9170_tx_ampdu(ar);
-}
-
void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -447,14 +350,10 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
} else {
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ar9170_tx_ampdu_callback(ar, skb);
- } else {
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_TX_TIMEOUT);
+ arinfo->timeout = jiffies +
+ msecs_to_jiffies(AR9170_TX_TIMEOUT);
- skb_queue_tail(&ar->tx_status[queue], skb);
- }
+ skb_queue_tail(&ar->tx_status[queue], skb);
}
if (!ar->tx_stats[queue].len &&
@@ -524,38 +423,6 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
return NULL;
}
-static void ar9170_handle_block_ack(struct ar9170 *ar, u16 count, u16 r)
-{
- struct sk_buff *skb;
- struct ieee80211_tx_info *txinfo;
-
- while (count) {
- skb = ar9170_get_queued_skb(ar, NULL, &ar->tx_status_ampdu, r);
- if (!skb)
- break;
-
- txinfo = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(txinfo);
-
- /* FIXME: maybe more ? */
- txinfo->status.rates[0].count = 1;
-
- skb_pull(skb, sizeof(struct ar9170_tx_control));
- ieee80211_tx_status_irqsafe(ar->hw, skb);
- count--;
- }
-
-#ifdef AR9170_TXAGG_DEBUG
- if (count) {
- printk(KERN_DEBUG "%s: got %d more failed mpdus, but no more "
- "suitable frames left in tx_status queue.\n",
- wiphy_name(ar->hw->wiphy), count);
-
- ar9170_dump_tx_status_ampdu(ar);
- }
-#endif /* AR9170_TXAGG_DEBUG */
-}
-
/*
* This worker tries to keeps an maintain tx_status queues.
* So we can guarantee that incoming tx_status reports are
@@ -592,8 +459,6 @@ static void ar9170_tx_janitor(struct work_struct *work)
resched = true;
}
- ar9170_tx_fake_ampdu_status(ar);
-
if (!resched)
return;
@@ -673,10 +538,6 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
case 0xc5:
/* BlockACK events */
- ar9170_handle_block_ack(ar,
- le16_to_cpu(cmd->ba_fail_cnt.failed),
- le16_to_cpu(cmd->ba_fail_cnt.rate));
- ar9170_tx_fake_ampdu_status(ar);
break;
case 0xc6:
@@ -689,7 +550,8 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
/* firmware debug */
case 0xca:
- printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
+ printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
+ (char *)buf + 4);
break;
case 0xcb:
len -= 4;
@@ -926,7 +788,6 @@ static void ar9170_rx_phy_status(struct ar9170 *ar,
/* TODO: we could do something with phy_errors */
status->signal = ar->noise[0] + phy->rssi_combined;
- status->noise = ar->noise[0];
}
static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
@@ -1247,7 +1108,6 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
ar->global_ampdu_density = 6;
ar->global_ampdu_factor = 3;
- atomic_set(&ar->tx_ampdu_pending, 0);
ar->bad_hw_nagger = jiffies;
err = ar->open(ar);
@@ -1310,40 +1170,10 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
skb_queue_purge(&ar->tx_pending[i]);
skb_queue_purge(&ar->tx_status[i]);
}
- skb_queue_purge(&ar->tx_status_ampdu);
mutex_unlock(&ar->mutex);
}
-static void ar9170_tx_indicate_immba(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ar9170_tx_control *txc = (void *) skb->data;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU);
-}
-
-static void ar9170_tx_copy_phy(struct ar9170 *ar, struct sk_buff *dst,
- struct sk_buff *src)
-{
- struct ar9170_tx_control *dst_txc, *src_txc;
- struct ieee80211_tx_info *dst_info, *src_info;
- struct ar9170_tx_info *dst_arinfo, *src_arinfo;
-
- src_txc = (void *) src->data;
- src_info = IEEE80211_SKB_CB(src);
- src_arinfo = (void *) src_info->rate_driver_data;
-
- dst_txc = (void *) dst->data;
- dst_info = IEEE80211_SKB_CB(dst);
- dst_arinfo = (void *) dst_info->rate_driver_data;
-
- dst_txc->phy_control = src_txc->phy_control;
-
- /* same MCS for the whole aggregate */
- memcpy(dst_info->driver_rates, src_info->driver_rates,
- sizeof(dst_info->driver_rates));
-}
-
static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
@@ -1420,14 +1250,7 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |=
cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- if (unlikely(!info->control.sta))
- goto err_out;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
- } else {
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
- }
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
}
return 0;
@@ -1537,158 +1360,6 @@ static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
}
-static bool ar9170_tx_ampdu(struct ar9170 *ar)
-{
- struct sk_buff_head agg;
- struct ar9170_sta_tid *tid_info = NULL, *tmp;
- struct sk_buff *skb, *first = NULL;
- unsigned long flags, f2;
- unsigned int i = 0;
- u16 seq, queue, tmpssn;
- bool run = false;
-
- skb_queue_head_init(&agg);
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- if (list_empty(&ar->tx_ampdu_list)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: aggregation list is empty.\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- goto out_unlock;
- }
-
- list_for_each_entry_safe(tid_info, tmp, &ar->tx_ampdu_list, list) {
- if (tid_info->state != AR9170_TID_STATE_COMPLETE) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: dangling aggregation entry!\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- if (++i > 64) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: enough frames aggregated.\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- break;
- }
-
- queue = TID_TO_WME_AC(tid_info->tid);
-
- if (skb_queue_len(&ar->tx_pending[queue]) >=
- AR9170_NUM_TX_AGG_MAX) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: queue %d full.\n",
- wiphy_name(ar->hw->wiphy), queue);
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- list_del_init(&tid_info->list);
-
- spin_lock_irqsave(&tid_info->queue.lock, f2);
- tmpssn = seq = tid_info->ssn;
- first = skb_peek(&tid_info->queue);
-
- if (likely(first))
- tmpssn = ar9170_get_seq(first);
-
- if (unlikely(tmpssn != seq)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: ssn mismatch [%d != %d]\n.",
- wiphy_name(ar->hw->wiphy), seq, tmpssn);
-#endif /* AR9170_TXAGG_DEBUG */
- tid_info->ssn = tmpssn;
- }
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: generate A-MPDU for tid:%d ssn:%d with "
- "%d queued frames.\n", wiphy_name(ar->hw->wiphy),
- tid_info->tid, tid_info->ssn,
- skb_queue_len(&tid_info->queue));
- __ar9170_dump_txqueue(ar, &tid_info->queue);
-#endif /* AR9170_TXAGG_DEBUG */
-
- while ((skb = skb_peek(&tid_info->queue))) {
- if (unlikely(ar9170_get_seq(skb) != seq))
- break;
-
- __skb_unlink(skb, &tid_info->queue);
- tid_info->ssn = seq = GET_NEXT_SEQ(seq);
-
- if (unlikely(skb_get_queue_mapping(skb) != queue)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: tid:%d(q:%d) queue:%d "
- "!match.\n", wiphy_name(ar->hw->wiphy),
- tid_info->tid,
- TID_TO_WME_AC(tid_info->tid),
- skb_get_queue_mapping(skb));
-#endif /* AR9170_TXAGG_DEBUG */
- dev_kfree_skb_any(skb);
- continue;
- }
-
- if (unlikely(first == skb)) {
- ar9170_tx_prepare_phy(ar, skb);
- __skb_queue_tail(&agg, skb);
- first = skb;
- } else {
- ar9170_tx_copy_phy(ar, skb, first);
- __skb_queue_tail(&agg, skb);
- }
-
- if (unlikely(skb_queue_len(&agg) ==
- AR9170_NUM_TX_AGG_MAX))
- break;
- }
-
- if (skb_queue_empty(&tid_info->queue))
- tid_info->active = false;
- else
- list_add_tail(&tid_info->list,
- &ar->tx_ampdu_list);
-
- spin_unlock_irqrestore(&tid_info->queue.lock, f2);
-
- if (unlikely(skb_queue_empty(&agg))) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: queued empty list!\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- /*
- * tell the FW/HW that this is the last frame,
- * that way it will wait for the immediate block ack.
- */
- ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
- wiphy_name(ar->hw->wiphy));
- __ar9170_dump_txqueue(ar, &agg);
-#endif /* AR9170_TXAGG_DEBUG */
-
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
-
- spin_lock_irqsave(&ar->tx_pending[queue].lock, flags);
- skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
- spin_unlock_irqrestore(&ar->tx_pending[queue].lock, flags);
- run = true;
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- }
-
-out_unlock:
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- __skb_queue_purge(&agg);
-
- return run;
-}
-
static void ar9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
@@ -1728,7 +1399,7 @@ static void ar9170_tx(struct ar9170 *ar)
printk(KERN_DEBUG "%s: queue %d full\n",
wiphy_name(ar->hw->wiphy), i);
- printk(KERN_DEBUG "%s: stuck frames: ===> \n",
+ printk(KERN_DEBUG "%s: stuck frames: ===>\n",
wiphy_name(ar->hw->wiphy));
ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
ar9170_dump_txqueue(ar, &ar->tx_status[i]);
@@ -1763,9 +1434,6 @@ static void ar9170_tx(struct ar9170 *ar)
arinfo->timeout = jiffies +
msecs_to_jiffies(AR9170_TX_TIMEOUT);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- atomic_inc(&ar->tx_ampdu_pending);
-
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: send frame q:%d =>\n",
wiphy_name(ar->hw->wiphy), i);
@@ -1774,9 +1442,6 @@ static void ar9170_tx(struct ar9170 *ar)
err = ar->tx(ar, skb);
if (unlikely(err)) {
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- atomic_dec(&ar->tx_ampdu_pending);
-
frames_failed++;
dev_kfree_skb_any(skb);
} else {
@@ -1823,94 +1488,11 @@ static void ar9170_tx(struct ar9170 *ar)
msecs_to_jiffies(AR9170_JANITOR_DELAY));
}
-static bool ar9170_tx_ampdu_queue(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *txinfo;
- struct ar9170_sta_info *sta_info;
- struct ar9170_sta_tid *agg;
- struct sk_buff *iter;
- unsigned long flags, f2;
- unsigned int max;
- u16 tid, seq, qseq;
- bool run = false, queue = false;
-
- tid = ar9170_get_tid(skb);
- seq = ar9170_get_seq(skb);
- txinfo = IEEE80211_SKB_CB(skb);
- sta_info = (void *) txinfo->control.sta->drv_priv;
- agg = &sta_info->agg[tid];
- max = sta_info->ampdu_max_len;
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
-
- if (unlikely(agg->state != AR9170_TID_STATE_COMPLETE)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: BlockACK session not fully initialized "
- "for ESS:%pM tid:%d state:%d.\n",
- wiphy_name(ar->hw->wiphy), agg->addr, agg->tid,
- agg->state);
-#endif /* AR9170_TXAGG_DEBUG */
- goto err_unlock;
- }
-
- if (!agg->active) {
- agg->active = true;
- agg->ssn = seq;
- queue = true;
- }
-
- /* check if seq is within the BA window */
- if (unlikely(!BAW_WITHIN(agg->ssn, max, seq))) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: frame with tid:%d seq:%d does not "
- "fit into BA window (%d - %d)\n",
- wiphy_name(ar->hw->wiphy), tid, seq, agg->ssn,
- (agg->ssn + max) & 0xfff);
-#endif /* AR9170_TXAGG_DEBUG */
- goto err_unlock;
- }
-
- spin_lock_irqsave(&agg->queue.lock, f2);
-
- skb_queue_reverse_walk(&agg->queue, iter) {
- qseq = ar9170_get_seq(iter);
-
- if (GET_NEXT_SEQ(qseq) == seq) {
- __skb_queue_after(&agg->queue, iter, skb);
- goto queued;
- }
- }
-
- __skb_queue_head(&agg->queue, skb);
-
-queued:
- spin_unlock_irqrestore(&agg->queue.lock, f2);
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: new aggregate %p queued.\n",
- wiphy_name(ar->hw->wiphy), skb);
- __ar9170_dump_txqueue(ar, &agg->queue);
-#endif /* AR9170_TXAGG_DEBUG */
-
- if (skb_queue_len(&agg->queue) >= AR9170_NUM_TX_AGG_MAX)
- run = true;
-
- if (queue)
- list_add_tail(&agg->list, &ar->tx_ampdu_list);
-
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- return run;
-
-err_unlock:
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- dev_kfree_skb_irq(skb);
- return false;
-}
-
int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
+ unsigned int queue;
if (unlikely(!IS_STARTED(ar)))
goto err_free;
@@ -1918,18 +1500,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (unlikely(ar9170_tx_prepare(ar, skb)))
goto err_free;
+ queue = skb_get_queue_mapping(skb);
info = IEEE80211_SKB_CB(skb);
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- bool run = ar9170_tx_ampdu_queue(ar, skb);
-
- if (run || !atomic_read(&ar->tx_ampdu_pending))
- ar9170_tx_ampdu(ar);
- } else {
- unsigned int queue = skb_get_queue_mapping(skb);
-
- ar9170_tx_prepare_phy(ar, skb);
- skb_queue_tail(&ar->tx_pending[queue], skb);
- }
+ ar9170_tx_prepare_phy(ar, skb);
+ skb_queue_tail(&ar->tx_pending[queue], skb);
ar9170_tx(ar);
return NETDEV_TX_OK;
@@ -2046,21 +1620,17 @@ out:
return err;
}
-static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
- struct dev_addr_list *mclist)
+static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
u64 mchash;
- int i;
+ struct netdev_hw_addr *ha;
/* always get broadcast frames */
mchash = 1ULL << (0xff >> 2);
- for (i = 0; i < mc_count; i++) {
- if (WARN_ON(!mclist))
- break;
- mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
- mclist = mclist->next;
- }
+ netdev_hw_addr_list_for_each(ha, mc_list)
+ mchash |= 1ULL << (ha->addr[5] >> 2);
return mchash;
}
@@ -2330,57 +1900,6 @@ out:
return err;
}
-static int ar9170_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ar9170 *ar = hw->priv;
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- unsigned int i;
-
- memset(sta_info, 0, sizeof(*sta_info));
-
- if (!sta->ht_cap.ht_supported)
- return 0;
-
- if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
- ar->global_ampdu_density = sta->ht_cap.ampdu_density;
-
- if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
- ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
-
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
- sta_info->agg[i].active = false;
- sta_info->agg[i].ssn = 0;
- sta_info->agg[i].tid = i;
- INIT_LIST_HEAD(&sta_info->agg[i].list);
- skb_queue_head_init(&sta_info->agg[i].queue);
- }
-
- sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
-
- return 0;
-}
-
-static int ar9170_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- unsigned int i;
-
- if (!sta->ht_cap.ht_supported)
- return 0;
-
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
- skb_queue_purge(&sta_info->agg[i].queue);
- }
-
- return 0;
-}
-
static int ar9170_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -2423,55 +1942,7 @@ static int ar9170_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- struct ar9170 *ar = hw->priv;
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- struct ar9170_sta_tid *tid_info = &sta_info->agg[tid];
- unsigned long flags;
-
- if (!modparam_ht)
- return -EOPNOTSUPP;
-
switch (action) {
- case IEEE80211_AMPDU_TX_START:
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- if (tid_info->state != AR9170_TID_STATE_SHUTDOWN ||
- !list_empty(&tid_info->list)) {
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_INFO "%s: A-MPDU [ESS:[%pM] tid:[%d]] "
- "is in a very bad state!\n",
- wiphy_name(hw->wiphy), sta->addr, tid);
-#endif /* AR9170_TXAGG_DEBUG */
- return -EBUSY;
- }
-
- *ssn = tid_info->ssn;
- tid_info->state = AR9170_TID_STATE_PROGRESS;
- tid_info->active = false;
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
- case IEEE80211_AMPDU_TX_STOP:
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- tid_info->state = AR9170_TID_STATE_SHUTDOWN;
- list_del_init(&tid_info->list);
- tid_info->active = false;
- skb_queue_purge(&tid_info->queue);
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
- case IEEE80211_AMPDU_TX_OPERATIONAL:
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_INFO "%s: A-MPDU for %pM [tid:%d] Operational.\n",
- wiphy_name(hw->wiphy), sta->addr, tid);
-#endif /* AR9170_TXAGG_DEBUG */
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- sta_info->agg[tid].state = AR9170_TID_STATE_COMPLETE;
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- break;
-
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
/* Handled by firmware */
@@ -2497,8 +1968,6 @@ static const struct ieee80211_ops ar9170_ops = {
.bss_info_changed = ar9170_op_bss_info_changed,
.get_tsf = ar9170_op_get_tsf,
.set_key = ar9170_set_key,
- .sta_add = ar9170_sta_add,
- .sta_remove = ar9170_sta_remove,
.get_stats = ar9170_get_stats,
.ampdu_action = ar9170_ampdu_action,
};
@@ -2516,7 +1985,7 @@ void *ar9170_alloc(size_t priv_size)
* tends to split the streams into separate rx descriptors.
*/
- skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
+ skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
if (!skb)
goto err_nomem;
@@ -2531,8 +2000,6 @@ void *ar9170_alloc(size_t priv_size)
mutex_init(&ar->mutex);
spin_lock_init(&ar->cmdlock);
spin_lock_init(&ar->tx_stats_lock);
- spin_lock_init(&ar->tx_ampdu_list_lock);
- skb_queue_head_init(&ar->tx_status_ampdu);
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
skb_queue_head_init(&ar->tx_status[i]);
skb_queue_head_init(&ar->tx_pending[i]);
@@ -2540,7 +2007,6 @@ void *ar9170_alloc(size_t priv_size)
ar9170_rx_reset_rx_mpdu(ar);
INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
- INIT_LIST_HEAD(&ar->tx_ampdu_list);
/* all hw supports 2.4 GHz, so set channel to 1 by default */
ar->channel = &ar9170_2ghz_chantable[0];
@@ -2551,19 +2017,10 @@ void *ar9170_alloc(size_t priv_size)
BIT(NL80211_IFTYPE_ADHOC);
ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
-
- if (modparam_ht) {
- ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
- } else {
- ar9170_band_2GHz.ht_cap.ht_supported = false;
- ar9170_band_5GHz.ht_cap.ht_supported = false;
- }
+ IEEE80211_HW_SIGNAL_DBM;
ar->hw->queues = __AR9170_NUM_TXQ;
ar->hw->extra_tx_headroom = 8;
- ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
ar->hw->max_rates = 1;
ar->hw->max_rate_tries = 3;
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e1c2fca..82ab532 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -42,6 +42,7 @@
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
+#include <linux/device.h>
#include <net/mac80211.h>
#include "ar9170.h"
#include "cmd.h"
@@ -67,18 +68,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x1001) },
/* TP-Link TL-WN821N v2 */
{ USB_DEVICE(0x0cf3, 0x1002) },
+ /* 3Com Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1010) },
+ /* H3C Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1011) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160 A1 */
{ USB_DEVICE(0x07d1, 0x3c10) },
/* D-Link DWA 160 A2 */
{ USB_DEVICE(0x07d1, 0x3a09) },
+ /* Netgear WNA1000 */
+ { USB_DEVICE(0x0846, 0x9040) },
/* Netgear WNDA3100 */
{ USB_DEVICE(0x0846, 0x9010) },
/* Netgear WN111 v2 */
{ USB_DEVICE(0x0846, 0x9001) },
/* Zydas ZD1221 */
{ USB_DEVICE(0x0ace, 0x1221) },
+ /* Proxim ORiNOCO 802.11n USB */
+ { USB_DEVICE(0x1435, 0x0804) },
+ /* WNC Generic 11n USB Dongle */
+ { USB_DEVICE(0x1435, 0x0326) },
/* ZyXEL NWD271N */
{ USB_DEVICE(0x0586, 0x3417) },
/* Z-Com UB81 BG */
@@ -99,6 +110,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0409, 0x0249) },
/* AVM FRITZ!WLAN USB Stick N 2.4 */
{ USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
+ /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
+ { USB_DEVICE(0x1668, 0x1200) },
/* terminate */
{}
@@ -202,7 +215,7 @@ resubmit:
return;
free:
- usb_buffer_free(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
}
static void ar9170_usb_rx_completed(struct urb *urb)
@@ -283,7 +296,7 @@ static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
if (!urb)
goto out;
- ibuf = usb_buffer_alloc(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
+ ibuf = usb_alloc_coherent(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
if (!ibuf)
goto out;
@@ -296,8 +309,8 @@ static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
err = usb_submit_urb(urb, GFP_KERNEL);
if (err) {
usb_unanchor_urb(urb);
- usb_buffer_free(aru->udev, 64, urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(aru->udev, 64, urb->transfer_buffer,
+ urb->transfer_dma);
}
out:
@@ -731,10 +744,10 @@ static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
/* unbind anything failed */
if (parent)
- down(&parent->sem);
+ device_lock(parent);
device_release_driver(&aru->udev->dev);
if (parent)
- up(&parent->sem);
+ device_unlock(parent);
usb_put_dev(aru->udev);
}
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 71fc960..d32f282 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -48,6 +48,12 @@ enum ath_device_state {
ATH_HW_INITIALIZED,
};
+enum ath_bus_type {
+ ATH_PCI,
+ ATH_AHB,
+ ATH_USB,
+};
+
struct reg_dmn_pair_mapping {
u16 regDmnEnum;
u16 reg_5ghz_ctl;
@@ -65,17 +71,30 @@ struct ath_regulatory {
struct reg_dmn_pair_mapping *regpair;
};
+/**
+ * struct ath_ops - Register read/write operations
+ *
+ * @read: Register read
+ * @write: Register write
+ * @enable_write_buffer: Enable multiple register writes
+ * @disable_write_buffer: Disable multiple register writes
+ * @write_flush: Flush buffered register writes
+ */
struct ath_ops {
unsigned int (*read)(void *, u32 reg_offset);
- void (*write)(void *, u32 val, u32 reg_offset);
+ void (*write)(void *, u32 val, u32 reg_offset);
+ void (*enable_write_buffer)(void *);
+ void (*disable_write_buffer)(void *);
+ void (*write_flush) (void *);
};
struct ath_common;
struct ath_bus_ops {
- void (*read_cachesize)(struct ath_common *common, int *csz);
- bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
- void (*bt_coex_prep)(struct ath_common *common);
+ enum ath_bus_type ath_bus_type;
+ void (*read_cachesize)(struct ath_common *common, int *csz);
+ bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
+ void (*bt_coex_prep)(struct ath_common *common);
};
struct ath_common {
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 090dc6d..cc09595 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -12,5 +12,6 @@ ath5k-y += attach.o
ath5k-y += base.o
ath5k-y += led.o
ath5k-y += rfkill.o
+ath5k-y += ani.o
ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
new file mode 100644
index 0000000..f2311ab
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -0,0 +1,744 @@
+/*
+ * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath5k.h"
+#include "base.h"
+#include "reg.h"
+#include "debug.h"
+#include "ani.h"
+
+/**
+ * DOC: Basic ANI Operation
+ *
+ * Adaptive Noise Immunity (ANI) controls five noise immunity parameters
+ * depending on the amount of interference in the environment, increasing
+ * or reducing sensitivity as necessary.
+ *
+ * The parameters are:
+ * - "noise immunity"
+ * - "spur immunity"
+ * - "firstep level"
+ * - "OFDM weak signal detection"
+ * - "CCK weak signal detection"
+ *
+ * Basically we look at the amount of ODFM and CCK timing errors we get and then
+ * raise or lower immunity accordingly by setting one or more of these
+ * parameters.
+ * Newer chipsets have PHY error counters in hardware which will generate a MIB
+ * interrupt when they overflow. Older hardware has too enable PHY error frames
+ * by setting a RX flag and then count every single PHY error. When a specified
+ * threshold of errors has been reached we will raise immunity.
+ * Also we regularly check the amount of errors and lower or raise immunity as
+ * necessary.
+ */
+
+
+/*** ANI parameter control ***/
+
+/**
+ * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
+ *
+ * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
+ */
+void
+ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
+{
+ /* TODO:
+ * ANI documents suggest the following five levels to use, but the HAL
+ * and ath9k use only use the last two levels, making this
+ * essentially an on/off option. There *may* be a reason for this (???),
+ * so i stick with the HAL version for now...
+ */
+#if 0
+ const s8 hi[] = { -18, -18, -16, -14, -12 };
+ const s8 lo[] = { -52, -56, -60, -64, -70 };
+ const s8 sz[] = { -34, -41, -48, -55, -62 };
+ const s8 fr[] = { -70, -72, -75, -78, -80 };
+#else
+ const s8 sz[] = { -55, -62 };
+ const s8 lo[] = { -64, -70 };
+ const s8 hi[] = { -14, -12 };
+ const s8 fr[] = { -78, -80 };
+#endif
+ if (level < 0 || level >= ARRAY_SIZE(sz)) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
+ AR5K_PHY_DESIRED_SIZE_TOT, sz[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
+ AR5K_PHY_AGCCOARSE_LO, lo[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
+ AR5K_PHY_AGCCOARSE_HI, hi[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
+ AR5K_PHY_SIG_FIRPWR, fr[level]);
+
+ ah->ah_sc->ani_state.noise_imm_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
+ *
+ * @level: level between 0 and @max_spur_level (the maximum level is dependent
+ * on the chip revision).
+ */
+void
+ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
+{
+ const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+
+ if (level < 0 || level >= ARRAY_SIZE(val) ||
+ level > ah->ah_sc->ani_state.max_spur_level) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
+ AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
+
+ ah->ah_sc->ani_state.spur_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_firstep_level() - Set "firstep" level
+ *
+ * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
+ */
+void
+ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
+{
+ const int val[] = { 0, 4, 8 };
+
+ if (level < 0 || level >= ARRAY_SIZE(val)) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
+ AR5K_PHY_SIG_FIRSTEP, val[level]);
+
+ ah->ah_sc->ani_state.firstep_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
+ * detection
+ *
+ * @on: turn on or off
+ */
+void
+ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
+{
+ const int m1l[] = { 127, 50 };
+ const int m2l[] = { 127, 40 };
+ const int m1[] = { 127, 0x4d };
+ const int m2[] = { 127, 0x40 };
+ const int m2cnt[] = { 31, 16 };
+ const int m2lcnt[] = { 63, 48 };
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M2, m2l[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M1, m1[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M2, m2[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT, m2cnt[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT, m2lcnt[on]);
+
+ if (on)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
+
+ ah->ah_sc->ani_state.ofdm_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
+ on ? "on" : "off");
+}
+
+
+/**
+ * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
+ *
+ * @on: turn on or off
+ */
+void
+ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
+{
+ const int val[] = { 8, 6 };
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
+ AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
+ ah->ah_sc->ani_state.cck_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
+ on ? "on" : "off");
+}
+
+
+/*** ANI algorithm ***/
+
+/**
+ * ath5k_ani_raise_immunity() - Increase noise immunity
+ *
+ * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
+ * the algorithm will tune more parameters then.
+ *
+ * Try to raise noise immunity (=decrease sensitivity) in several steps
+ * depending on the average RSSI of the beacons we received.
+ */
+static void
+ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
+ bool ofdm_trigger)
+{
+ int rssi = ah->ah_beacon_rssi_avg.avg;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
+ ofdm_trigger ? "ODFM" : "CCK");
+
+ /* first: raise noise immunity */
+ if (as->noise_imm_level < ATH5K_ANI_MAX_NOISE_IMM_LVL) {
+ ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level + 1);
+ return;
+ }
+
+ /* only OFDM: raise spur immunity level */
+ if (ofdm_trigger &&
+ as->spur_level < ah->ah_sc->ani_state.max_spur_level) {
+ ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
+ return;
+ }
+
+ /* AP mode */
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ }
+
+ /* STA and IBSS mode */
+
+ /* TODO: for IBSS mode it would be better to keep a beacon RSSI average
+ * per each neighbour node and use the minimum of these, to make sure we
+ * don't shut out a remote node by raising immunity too high. */
+
+ if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI high");
+ /* only OFDM: beacon RSSI is high, we can disable ODFM weak
+ * signal detection */
+ if (ofdm_trigger && as->ofdm_weak_sig == true) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ return;
+ }
+ /* as a last resort or CCK: raise firstep level */
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) {
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ }
+ } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
+ /* beacon RSSI in mid range, we need OFDM weak signal detect,
+ * but can raise firstep level */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI mid");
+ if (ofdm_trigger && as->ofdm_weak_sig == false)
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
+ /* beacon RSSI is low. in B/G mode turn of OFDM weak signal
+ * detect and zero firstep level to maximize CCK sensitivity */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI low, 2GHz");
+ if (ofdm_trigger && as->ofdm_weak_sig == true)
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ if (as->firstep_level > 0)
+ ath5k_ani_set_firstep_level(ah, 0);
+ return;
+ }
+
+ /* TODO: why not?:
+ if (as->cck_weak_sig == true) {
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ }
+ */
+}
+
+
+/**
+ * ath5k_ani_lower_immunity() - Decrease noise immunity
+ *
+ * Try to lower noise immunity (=increase sensitivity) in several steps
+ * depending on the average RSSI of the beacons we received.
+ */
+static void
+ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ int rssi = ah->ah_beacon_rssi_avg.avg;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
+
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
+ /* AP mode */
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
+ return;
+ }
+ } else {
+ /* STA and IBSS mode (see TODO above) */
+ if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
+ /* beacon signal is high, leave OFDM weak signal
+ * detection off or it may oscillate
+ * TODO: who said it's off??? */
+ } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
+ /* beacon RSSI is mid-range: turn on ODFM weak signal
+ * detection and next, lower firstep level */
+ if (as->ofdm_weak_sig == false) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah,
+ true);
+ return;
+ }
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah,
+ as->firstep_level - 1);
+ return;
+ }
+ } else {
+ /* beacon signal is low: only reduce firstep level */
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah,
+ as->firstep_level - 1);
+ return;
+ }
+ }
+ }
+
+ /* all modes */
+ if (as->spur_level > 0) {
+ ath5k_ani_set_spur_immunity_level(ah, as->spur_level - 1);
+ return;
+ }
+
+ /* finally, reduce noise immunity */
+ if (as->noise_imm_level > 0) {
+ ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level - 1);
+ return;
+ }
+}
+
+
+/**
+ * ath5k_hw_ani_get_listen_time() - Calculate time spent listening
+ *
+ * Return an approximation of the time spent "listening" in milliseconds (ms)
+ * since the last call of this function by deducting the cycles spent
+ * transmitting and receiving from the total cycle count.
+ * Save profile count values for debugging/statistics and because we might want
+ * to use them later.
+ *
+ * We assume no one else clears these registers!
+ */
+static int
+ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ int listen;
+
+ /* freeze */
+ ath5k_hw_reg_write(ah, AR5K_MIBC_FMC, AR5K_MIBC);
+ /* read */
+ as->pfc_cycles = ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE);
+ as->pfc_busy = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR);
+ as->pfc_tx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX);
+ as->pfc_rx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX);
+ /* clear */
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
+ /* un-freeze */
+ ath5k_hw_reg_write(ah, 0, AR5K_MIBC);
+
+ /* TODO: where does 44000 come from? (11g clock rate?) */
+ listen = (as->pfc_cycles - as->pfc_rx - as->pfc_tx) / 44000;
+
+ if (as->pfc_cycles == 0 || listen < 0)
+ return 0;
+ return listen;
+}
+
+
+/**
+ * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ *
+ * Clear the PHY error counters as soon as possible, since this might be called
+ * from a MIB interrupt and we want to make sure we don't get interrupted again.
+ * Add the count of CCK and OFDM errors to our internal state, so it can be used
+ * by the algorithm later.
+ *
+ * Will be called from interrupt and tasklet context.
+ * Returns 0 if both counters are zero.
+ */
+static int
+ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
+ struct ath5k_ani_state *as)
+{
+ unsigned int ofdm_err, cck_err;
+
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ return 0;
+
+ ofdm_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1);
+ cck_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2);
+
+ /* reset counters first, we might be in a hurry (interrupt) */
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
+ AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
+ AR5K_PHYERR_CNT2);
+
+ ofdm_err = ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ofdm_err);
+ cck_err = ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - cck_err);
+
+ /* sometimes both can be zero, especially when there is a superfluous
+ * second interrupt. detect that here and return an error. */
+ if (ofdm_err <= 0 && cck_err <= 0)
+ return 0;
+
+ /* avoid negative values should one of the registers overflow */
+ if (ofdm_err > 0) {
+ as->ofdm_errors += ofdm_err;
+ as->sum_ofdm_errors += ofdm_err;
+ }
+ if (cck_err > 0) {
+ as->cck_errors += cck_err;
+ as->sum_cck_errors += cck_err;
+ }
+ return 1;
+}
+
+
+/**
+ * ath5k_ani_period_restart() - Restart ANI period
+ *
+ * Just reset counters, so they are clear for the next "ani period".
+ */
+static void
+ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ /* keep last values for debugging */
+ as->last_ofdm_errors = as->ofdm_errors;
+ as->last_cck_errors = as->cck_errors;
+ as->last_listen = as->listen_time;
+
+ as->ofdm_errors = 0;
+ as->cck_errors = 0;
+ as->listen_time = 0;
+}
+
+
+/**
+ * ath5k_ani_calibration() - The main ANI calibration function
+ *
+ * We count OFDM and CCK errors relative to the time where we did not send or
+ * receive ("listen" time) and raise or lower immunity accordingly.
+ * This is called regularly (every second) from the calibration timer, but also
+ * when an error threshold has been reached.
+ *
+ * In order to synchronize access from different contexts, this should be
+ * called only indirectly by scheduling the ANI tasklet!
+ */
+void
+ath5k_ani_calibration(struct ath5k_hw *ah)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+ int listen, ofdm_high, ofdm_low, cck_high, cck_low;
+
+ if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
+ /* get listen time since last call and add it to the counter because we
+ * might not have restarted the "ani period" last time */
+ listen = ath5k_hw_ani_get_listen_time(ah, as);
+ as->listen_time += listen;
+
+ ath5k_ani_save_and_clear_phy_errors(ah, as);
+
+ ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
+ cck_high = as->listen_time * ATH5K_ANI_CCK_TRIG_HIGH / 1000;
+ ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
+ cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "listen %d (now %d)", as->listen_time, listen);
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "check high ofdm %d/%d cck %d/%d",
+ as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
+
+ if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
+ /* too many PHY errors - we have to raise immunity */
+ bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
+ ath5k_ani_raise_immunity(ah, as, ofdm_flag);
+ ath5k_ani_period_restart(ah, as);
+
+ } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
+ /* If more than 5 (TODO: why 5?) periods have passed and we got
+ * relatively little errors we can try to lower immunity */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "check low ofdm %d/%d cck %d/%d",
+ as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
+
+ if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
+ ath5k_ani_lower_immunity(ah, as);
+
+ ath5k_ani_period_restart(ah, as);
+ }
+}
+
+
+/*** INTERRUPT HANDLER ***/
+
+/**
+ * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ *
+ * Just read & reset the registers quickly, so they don't generate more
+ * interrupts, save the counters and schedule the tasklet to decide whether
+ * to raise immunity or not.
+ *
+ * We just need to handle PHY error counters, ath5k_hw_update_mib_counters()
+ * should take care of all "normal" MIB interrupts.
+ */
+void
+ath5k_ani_mib_intr(struct ath5k_hw *ah)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+
+ /* nothing to do here if HW does not have PHY error counters - they
+ * can't be the reason for the MIB interrupt then */
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ return;
+
+ /* not in use but clear anyways */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+
+ if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
+ /* if one of the errors triggered, we can get a superfluous second
+ * interrupt, even though we have already reset the register. the
+ * function detects that so we can return early */
+ if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
+ return;
+
+ if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
+ as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+}
+
+
+/**
+ * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ *
+ * This is used by hardware without PHY error counters to report PHY errors
+ * on a frame-by-frame basis, instead of the interrupt.
+ */
+void
+ath5k_ani_phy_error_report(struct ath5k_hw *ah,
+ enum ath5k_phy_error_code phyerr)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+
+ if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
+ as->ofdm_errors++;
+ if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ } else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
+ as->cck_errors++;
+ if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ }
+}
+
+
+/*** INIT ***/
+
+/**
+ * ath5k_enable_phy_err_counters() - Enable PHY error counters
+ *
+ * Enable PHY error counters for OFDM and CCK timing errors.
+ */
+static void
+ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
+ AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
+ AR5K_PHYERR_CNT2);
+ ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_OFDM, AR5K_PHYERR_CNT1_MASK);
+ ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_CCK, AR5K_PHYERR_CNT2_MASK);
+
+ /* not in use */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+}
+
+
+/**
+ * ath5k_disable_phy_err_counters() - Disable PHY error counters
+ *
+ * Disable PHY error counters for OFDM and CCK timing errors.
+ */
+static void
+ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1_MASK);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2_MASK);
+
+ /* not in use */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+}
+
+
+/**
+ * ath5k_ani_init() - Initialize ANI
+ * @mode: Which mode to use (auto, manual high, manual low, off)
+ *
+ * Initialize ANI according to mode.
+ */
+void
+ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
+{
+ /* ANI is only possible on 5212 and newer */
+ if (ah->ah_version < AR5K_AR5212)
+ return;
+
+ /* clear old state information */
+ memset(&ah->ah_sc->ani_state, 0, sizeof(ah->ah_sc->ani_state));
+
+ /* older hardware has more spur levels than newer */
+ if (ah->ah_mac_srev < AR5K_SREV_AR2414)
+ ah->ah_sc->ani_state.max_spur_level = 7;
+ else
+ ah->ah_sc->ani_state.max_spur_level = 2;
+
+ /* initial values for our ani parameters */
+ if (mode == ATH5K_ANI_MODE_OFF) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI off\n");
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "ANI manual low -> high sensitivity\n");
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, true);
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "ANI manual high -> low sensitivity\n");
+ ath5k_ani_set_noise_immunity_level(ah,
+ ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ ath5k_ani_set_spur_immunity_level(ah,
+ ah->ah_sc->ani_state.max_spur_level);
+ ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ } else if (mode == ATH5K_ANI_MODE_AUTO) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI auto\n");
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ }
+
+ /* newer hardware has PHY error counter registers which we can use to
+ * get OFDM and CCK error counts. older hardware has to set rxfilter and
+ * report every single PHY error by calling ath5k_ani_phy_error_report()
+ */
+ if (mode == ATH5K_ANI_MODE_AUTO) {
+ if (ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_enable_phy_err_counters(ah);
+ else
+ ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) |
+ AR5K_RX_FILTER_PHYERR);
+ } else {
+ if (ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_disable_phy_err_counters(ah);
+ else
+ ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) &
+ ~AR5K_RX_FILTER_PHYERR);
+ }
+
+ ah->ah_sc->ani_state.ani_mode = mode;
+}
+
+
+/*** DEBUG ***/
+
+#ifdef CONFIG_ATH5K_DEBUG
+
+void
+ath5k_ani_print_counters(struct ath5k_hw *ah)
+{
+ /* clears too */
+ printk(KERN_NOTICE "ACK fail\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
+ printk(KERN_NOTICE "RTS fail\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
+ printk(KERN_NOTICE "RTS success\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_RTS_OK));
+ printk(KERN_NOTICE "FCS error\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
+
+ /* no clear */
+ printk(KERN_NOTICE "tx\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
+ printk(KERN_NOTICE "rx\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
+ printk(KERN_NOTICE "busy\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
+ printk(KERN_NOTICE "cycles\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
+
+ printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
+ printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
+ printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
+ printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
new file mode 100644
index 0000000..55cf26d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef ANI_H
+#define ANI_H
+
+/* these thresholds are relative to the ATH5K_ANI_LISTEN_PERIOD */
+#define ATH5K_ANI_LISTEN_PERIOD 100
+#define ATH5K_ANI_OFDM_TRIG_HIGH 500
+#define ATH5K_ANI_OFDM_TRIG_LOW 200
+#define ATH5K_ANI_CCK_TRIG_HIGH 200
+#define ATH5K_ANI_CCK_TRIG_LOW 100
+
+/* average beacon RSSI thresholds */
+#define ATH5K_ANI_RSSI_THR_HIGH 40
+#define ATH5K_ANI_RSSI_THR_LOW 7
+
+/* maximum availabe levels */
+#define ATH5K_ANI_MAX_FIRSTEP_LVL 2
+#define ATH5K_ANI_MAX_NOISE_IMM_LVL 1
+
+
+/**
+ * enum ath5k_ani_mode - mode for ANI / noise sensitivity
+ *
+ * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
+ * algorithm after it has been on auto mode.
+ * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ * maximizing sensitivity. ANI will not run.
+ * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ * minimizing sensitivity. ANI will not run.
+ * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ * amount of OFDM and CCK frame errors (default).
+ */
+enum ath5k_ani_mode {
+ ATH5K_ANI_MODE_OFF = 0,
+ ATH5K_ANI_MODE_MANUAL_LOW = 1,
+ ATH5K_ANI_MODE_MANUAL_HIGH = 2,
+ ATH5K_ANI_MODE_AUTO = 3
+};
+
+
+/**
+ * struct ath5k_ani_state - ANI state and associated counters
+ *
+ * @max_spur_level: the maximum spur level is chip dependent
+ */
+struct ath5k_ani_state {
+ enum ath5k_ani_mode ani_mode;
+
+ /* state */
+ int noise_imm_level;
+ int spur_level;
+ int firstep_level;
+ bool ofdm_weak_sig;
+ bool cck_weak_sig;
+
+ int max_spur_level;
+
+ /* used by the algorithm */
+ unsigned int listen_time;
+ unsigned int ofdm_errors;
+ unsigned int cck_errors;
+
+ /* debug/statistics only: numbers from last ANI calibration */
+ unsigned int pfc_tx;
+ unsigned int pfc_rx;
+ unsigned int pfc_busy;
+ unsigned int pfc_cycles;
+ unsigned int last_listen;
+ unsigned int last_ofdm_errors;
+ unsigned int last_cck_errors;
+ unsigned int sum_ofdm_errors;
+ unsigned int sum_cck_errors;
+};
+
+void ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode);
+void ath5k_ani_mib_intr(struct ath5k_hw *ah);
+void ath5k_ani_calibration(struct ath5k_hw *ah);
+void ath5k_ani_phy_error_report(struct ath5k_hw *ah,
+ enum ath5k_phy_error_code phyerr);
+
+/* for manual control */
+void ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on);
+void ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on);
+
+void ath5k_ani_print_counters(struct ath5k_hw *ah);
+
+#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ac67f02..2785946 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -202,7 +202,8 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
-#define AR5K_TUNE_HWTXTRIES 4
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@@ -614,28 +615,6 @@ struct ath5k_rx_status {
#define AR5K_BEACON_ENA 0x00800000 /*enable beacon xmit*/
#define AR5K_BEACON_RESET_TSF 0x01000000 /*force a TSF reset*/
-#if 0
-/**
- * struct ath5k_beacon_state - Per-station beacon timer state.
- * @bs_interval: in TU's, can also include the above flags
- * @bs_cfp_max_duration: if non-zero hw is setup to coexist with a
- * Point Coordination Function capable AP
- */
-struct ath5k_beacon_state {
- u32 bs_next_beacon;
- u32 bs_next_dtim;
- u32 bs_interval;
- u8 bs_dtim_period;
- u8 bs_cfp_period;
- u16 bs_cfp_max_duration;
- u16 bs_cfp_du_remain;
- u16 bs_tim_offset;
- u16 bs_sleep_duration;
- u16 bs_bmiss_threshold;
- u32 bs_cfp_next;
-};
-#endif
-
/*
* TSF to TU conversion:
@@ -822,9 +801,9 @@ struct ath5k_athchan_2ghz {
* @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
* We currently do increments on interrupt by
* (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
- * @AR5K_INT_MIB: Indicates the Management Information Base counters should be
- * checked. We should do this with ath5k_hw_update_mib_counters() but
- * it seems we should also then do some noise immunity work.
+ * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
+ * one of the PHY error counters reached the maximum value and should be
+ * read and cleared.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
@@ -912,10 +891,11 @@ enum ath5k_int {
AR5K_INT_NOCARD = 0xffffffff
};
-/* Software interrupts used for calibration */
-enum ath5k_software_interrupt {
- AR5K_SWI_FULL_CALIBRATION = 0x01,
- AR5K_SWI_SHORT_CALIBRATION = 0x02,
+/* mask which calibration is active at the moment */
+enum ath5k_calibration_mask {
+ AR5K_CALIBRATION_FULL = 0x01,
+ AR5K_CALIBRATION_SHORT = 0x02,
+ AR5K_CALIBRATION_ANI = 0x04,
};
/*
@@ -1004,6 +984,8 @@ struct ath5k_capabilities {
struct {
u8 q_tx_num;
} cap_queues;
+
+ bool cap_has_phyerr_counters;
};
/* size of noise floor history (keep it a power of two) */
@@ -1014,6 +996,15 @@ struct ath5k_nfcal_hist
s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
};
+/**
+ * struct avg_val - Helper structure for average calculation
+ * @avg: contains the actual average value
+ * @avg_weight: is used internally during calculation to prevent rounding errors
+ */
+struct ath5k_avg_val {
+ int avg;
+ int avg_weight;
+};
/***************************************\
HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1028,7 +1019,6 @@ struct ath5k_nfcal_hist
/* TODO: Clean up and merge with ath5k_softc */
struct ath5k_hw {
- u32 ah_magic;
struct ath_common common;
struct ath5k_softc *ah_sc;
@@ -1036,7 +1026,6 @@ struct ath5k_hw {
enum ath5k_int ah_imr;
- enum nl80211_iftype ah_op_mode;
struct ieee80211_channel *ah_current_channel;
bool ah_turbo;
bool ah_calibration;
@@ -1049,7 +1038,6 @@ struct ath5k_hw {
u32 ah_phy;
u32 ah_mac_srev;
u16 ah_mac_version;
- u16 ah_mac_revision;
u16 ah_phy_revision;
u16 ah_radio_5ghz_revision;
u16 ah_radio_2ghz_revision;
@@ -1071,8 +1059,6 @@ struct ath5k_hw {
u8 ah_def_ant;
bool ah_software_retry;
- int ah_gpio_npins;
-
struct ath5k_capabilities ah_capabilities;
struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES];
@@ -1123,17 +1109,18 @@ struct ath5k_hw {
struct ath5k_nfcal_hist ah_nfcal_hist;
+ /* average beacon RSSI in our BSS (used by ANI) */
+ struct ath5k_avg_val ah_beacon_rssi_avg;
+
/* noise floor from last periodic calibration */
s32 ah_noise_floor;
/* Calibration timestamp */
- unsigned long ah_cal_tstamp;
-
- /* Calibration interval (secs) */
- u8 ah_cal_intval;
+ unsigned long ah_cal_next_full;
+ unsigned long ah_cal_next_ani;
- /* Software interrupt mask */
- u8 ah_swi_mask;
+ /* Calibration mask */
+ u8 ah_cal_mask;
/*
* Function pointers
@@ -1141,9 +1128,9 @@ struct ath5k_hw {
int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
u32 size, unsigned int flags);
int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
- unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
+ unsigned int, unsigned int, int, enum ath5k_pkt_type,
unsigned int, unsigned int, unsigned int, unsigned int,
- unsigned int, unsigned int, unsigned int);
+ unsigned int, unsigned int, unsigned int, unsigned int);
int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
unsigned int, unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int);
@@ -1158,158 +1145,145 @@ struct ath5k_hw {
*/
/* Attach/Detach Functions */
-extern int ath5k_hw_attach(struct ath5k_softc *sc);
-extern void ath5k_hw_detach(struct ath5k_hw *ah);
+int ath5k_hw_attach(struct ath5k_softc *sc);
+void ath5k_hw_detach(struct ath5k_hw *ah);
/* LED functions */
-extern int ath5k_init_leds(struct ath5k_softc *sc);
-extern void ath5k_led_enable(struct ath5k_softc *sc);
-extern void ath5k_led_off(struct ath5k_softc *sc);
-extern void ath5k_unregister_leds(struct ath5k_softc *sc);
+int ath5k_init_leds(struct ath5k_softc *sc);
+void ath5k_led_enable(struct ath5k_softc *sc);
+void ath5k_led_off(struct ath5k_softc *sc);
+void ath5k_unregister_leds(struct ath5k_softc *sc);
/* Reset Functions */
-extern int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
-extern int ath5k_hw_on_hold(struct ath5k_hw *ah);
-extern int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel);
+int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
+int ath5k_hw_on_hold(struct ath5k_hw *ah);
+int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+ struct ieee80211_channel *channel, bool change_channel);
+int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+ bool is_set);
/* Power management functions */
-extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration);
/* DMA Related Functions */
-extern void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
-extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
-extern u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
-extern void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
-extern int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
-extern u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
+void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
+int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
+u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
+void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
+int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
+u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
u32 phys_addr);
-extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
+int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
/* Interrupt handling */
-extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
-extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
-extern enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum
-ath5k_int new_mask);
-extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats);
+bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
+int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
+enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask);
+void ath5k_hw_update_mib_counters(struct ath5k_hw *ah);
/* EEPROM access functions */
-extern int ath5k_eeprom_init(struct ath5k_hw *ah);
-extern void ath5k_eeprom_detach(struct ath5k_hw *ah);
-extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
-extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
+int ath5k_eeprom_init(struct ath5k_hw *ah);
+void ath5k_eeprom_detach(struct ath5k_hw *ah);
+int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
/* Protocol Control Unit Functions */
-extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
-extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
+extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
+void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
/* BSSID Functions */
-extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
-extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
-extern void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
+int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
+void ath5k_hw_set_associd(struct ath5k_hw *ah);
+void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
/* Receive start/stop functions */
-extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
-extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
+void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
+void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
/* RX Filter functions */
-extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
-extern int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
-extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
-extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
-extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
+void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
+u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
+void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
/* Beacon control functions */
-extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah);
-extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
-extern void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
-extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-extern void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
-#if 0
-extern int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah, const struct ath5k_beacon_state *state);
-extern void ath5k_hw_reset_beacon(struct ath5k_hw *ah);
-extern int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr);
-#endif
+u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
+void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
+void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
+void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
/* ACK bit rate */
void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high);
-/* ACK/CTS Timeouts */
-extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
-extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
-extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
-extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
/* Clock rate related functions */
unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
/* Key table (WEP) functions */
-extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
-extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
-extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac);
-extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
+int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
+int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
+ const struct ieee80211_key_conf *key, const u8 *mac);
+int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
/* Queue Control Unit, DFS Control Unit Functions */
-extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info);
-extern int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
- const struct ath5k_txq_info *queue_info);
-extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
- enum ath5k_tx_queue queue_type,
- struct ath5k_txq_info *queue_info);
-extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
-extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
-extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah);
-extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
+int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+ struct ath5k_txq_info *queue_info);
+int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+ const struct ath5k_txq_info *queue_info);
+int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
+ enum ath5k_tx_queue queue_type,
+ struct ath5k_txq_info *queue_info);
+u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
+void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
/* Hardware Descriptor Functions */
-extern int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
+int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
/* GPIO Functions */
-extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
-extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
-extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
-extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
-extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
-extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level);
+void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
+int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
+int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
+u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
+int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
+void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+ u32 interrupt_level);
/* rfkill Functions */
-extern void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
-extern void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
+void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
+void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
/* Misc functions */
int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
-extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
-extern int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
-extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
+int ath5k_hw_get_capability(struct ath5k_hw *ah,
+ enum ath5k_capability_type cap_type, u32 capability,
+ u32 *result);
+int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
+int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
/* Initial register settings functions */
-extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
+int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
/* Initialize RF */
-extern int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
- struct ieee80211_channel *channel,
- unsigned int mode);
-extern int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
-extern enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
-extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
+int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ unsigned int mode);
+int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
+enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
+int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
/* PHY/RF channel functions */
-extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
-extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
+bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
+int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
/* PHY calibration */
void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
-extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
-extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
-extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
-extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
+int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel);
/* Spur mitigation */
bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
- struct ieee80211_channel *channel);
+ struct ieee80211_channel *channel);
void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
- struct ieee80211_channel *channel);
+ struct ieee80211_channel *channel);
/* Misc PHY functions */
-extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
-extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
+int ath5k_hw_phy_disable(struct ath5k_hw *ah);
/* Antenna control */
-extern void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
-extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant);
-extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
+void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
/* TX power setup */
-extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower);
-extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
+int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+ u8 ee_mode, u8 txpower);
+int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
/*
* Functions used internaly
@@ -1335,29 +1309,6 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
iowrite32(val, ah->ah_iobase + reg);
}
-#if defined(_ATH5K_RESET) || defined(_ATH5K_PHY)
-/*
- * Check if a register write has been completed
- */
-static int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag,
- u32 val, bool is_set)
-{
- int i;
- u32 data;
-
- for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
- data = ath5k_hw_reg_read(ah, reg);
- if (is_set && (data & flag))
- break;
- else if ((data & flag) == val)
- break;
- udelay(15);
- }
-
- return (i <= 0) ? -EAGAIN : 0;
-}
-#endif
-
static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
{
u32 retval = 0, bit, i;
@@ -1370,9 +1321,27 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
return retval;
}
-static inline int ath5k_pad_size(int hdrlen)
+#define AVG_SAMPLES 8
+#define AVG_FACTOR 1000
+
+/**
+ * ath5k_moving_average - Exponentially weighted moving average
+ * @avg: average structure
+ * @val: current value
+ *
+ * This implementation make use of a struct ath5k_avg_val to prevent rounding
+ * errors.
+ */
+static inline struct ath5k_avg_val
+ath5k_moving_average(const struct ath5k_avg_val avg, const int val)
{
- return (hdrlen < 24) ? 0 : hdrlen & 3;
+ struct ath5k_avg_val new;
+ new.avg_weight = avg.avg_weight ?
+ (((avg.avg_weight * ((AVG_SAMPLES) - 1)) +
+ (val * (AVG_FACTOR))) / (AVG_SAMPLES)) :
+ (val * (AVG_FACTOR));
+ new.avg = new.avg_weight / (AVG_FACTOR);
+ return new;
}
#endif
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index dc0786c..e0c244b 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -114,7 +114,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/*
* HW information
*/
- ah->ah_op_mode = NL80211_IFTYPE_STATION;
ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
ah->ah_turbo = false;
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
@@ -124,6 +123,9 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ah->ah_cw_min = AR5K_TUNE_CWMIN;
ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
ah->ah_software_retry = false;
+ ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
+ ah->ah_noise_floor = -95; /* until first NF calibration is run */
+ sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
/*
* Find the mac version
@@ -149,7 +151,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Get MAC, PHY and RADIO revisions */
ah->ah_mac_srev = srev;
ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
- ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
0xffffffff;
ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
@@ -328,7 +329,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
ath5k_hw_set_associd(ah);
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, sc->opmode);
ath5k_hw_rfgain_opt_init(ah);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3abbe75..5f04cf3 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -59,8 +59,8 @@
#include "base.h"
#include "reg.h"
#include "debug.h"
+#include "ani.h"
-static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -199,7 +199,7 @@ static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
static int ath5k_pci_suspend(struct device *dev);
static int ath5k_pci_resume(struct device *dev);
-SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
+static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
#define ATH5K_PM_OPS (&ath5k_pm_ops)
#else
#define ATH5K_PM_OPS NULL
@@ -231,7 +231,7 @@ static void ath5k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mc_list);
+ struct netdev_hw_addr_list *mc_list);
static void ath5k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
@@ -242,6 +242,8 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key);
static int ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
+static int ath5k_get_survey(struct ieee80211_hw *hw,
+ int idx, struct survey_info *survey);
static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -267,6 +269,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.configure_filter = ath5k_configure_filter,
.set_key = ath5k_set_key,
.get_stats = ath5k_get_stats,
+ .get_survey = ath5k_get_survey,
.conf_tx = NULL,
.get_tsf = ath5k_get_tsf,
.set_tsf = ath5k_set_tsf,
@@ -308,7 +311,7 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
struct ath5k_buf *bf);
static int ath5k_txbuf_setup(struct ath5k_softc *sc,
struct ath5k_buf *bf,
- struct ath5k_txq *txq);
+ struct ath5k_txq *txq, int padsize);
static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
struct ath5k_buf *bf)
{
@@ -365,6 +368,7 @@ static void ath5k_beacon_send(struct ath5k_softc *sc);
static void ath5k_beacon_config(struct ath5k_softc *sc);
static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
static void ath5k_tasklet_beacon(unsigned long data);
+static void ath5k_tasklet_ani(unsigned long data);
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
@@ -544,8 +548,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
SET_IEEE80211_DEV(hw, &pdev->dev);
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
@@ -830,6 +833,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
+ tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
ret = ath5k_eeprom_read_mac(ah, mac);
if (ret) {
@@ -1138,8 +1142,6 @@ ath5k_mode_setup(struct ath5k_softc *sc)
struct ath5k_hw *ah = sc->ah;
u32 rfilt;
- ah->ah_op_mode = sc->opmode;
-
/* configure rx filter */
rfilt = sc->filter_flags;
ath5k_hw_set_rx_filter(ah, rfilt);
@@ -1148,8 +1150,9 @@ ath5k_mode_setup(struct ath5k_softc *sc)
ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
/* configure operational mode */
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, sc->opmode);
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode);
ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
@@ -1272,7 +1275,7 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
- struct ath5k_txq *txq)
+ struct ath5k_txq *txq, int padsize)
{
struct ath5k_hw *ah = sc->ah;
struct ath5k_desc *ds = bf->desc;
@@ -1324,7 +1327,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
sc->vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
- ieee80211_get_hdrlen_from_skb(skb),
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
get_hw_packet_type(skb),
(sc->power_level * 2),
hw_rate,
@@ -1636,7 +1639,6 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
sc->txqs[i].link);
}
}
- ieee80211_wake_queues(sc->hw); /* XXX move to callers */
for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
if (sc->txqs[i].setup)
@@ -1807,6 +1809,86 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
}
static void
+ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ath5k_hw *ah = sc->ah;
+ struct ath_common *common = ath5k_hw_common(ah);
+
+ /* only beacons from our BSSID */
+ if (!ieee80211_is_beacon(mgmt->frame_control) ||
+ memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
+ return;
+
+ ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
+ rssi);
+
+ /* in IBSS mode we should keep RSSI statistics per neighbour */
+ /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
+}
+
+/*
+ * Compute padding position. skb must contains an IEEE 802.11 frame
+ */
+static int ath5k_common_padpos(struct sk_buff *skb)
+{
+ struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 frame_control = hdr->frame_control;
+ int padpos = 24;
+
+ if (ieee80211_has_a4(frame_control)) {
+ padpos += ETH_ALEN;
+ }
+ if (ieee80211_is_data_qos(frame_control)) {
+ padpos += IEEE80211_QOS_CTL_LEN;
+ }
+
+ return padpos;
+}
+
+/*
+ * This function expects a 802.11 frame and returns the number of
+ * bytes added, or -1 if we don't have enought header room.
+ */
+
+static int ath5k_add_padding(struct sk_buff *skb)
+{
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len>padpos) {
+
+ if (skb_headroom(skb) < padsize)
+ return -1;
+
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data+padsize, padpos);
+ return padsize;
+ }
+
+ return 0;
+}
+
+/*
+ * This function expects a 802.11 frame and returns the number of
+ * bytes removed
+ */
+
+static int ath5k_remove_padding(struct sk_buff *skb)
+{
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len>=padpos+padsize) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ return padsize;
+ }
+
+ return 0;
+}
+
+static void
ath5k_tasklet_rx(unsigned long data)
{
struct ieee80211_rx_status *rxs;
@@ -1819,8 +1901,6 @@ ath5k_tasklet_rx(unsigned long data)
struct ath5k_buf *bf;
struct ath5k_desc *ds;
int ret;
- int hdrlen;
- int padsize;
int rx_flag;
spin_lock(&sc->rxbuflock);
@@ -1845,18 +1925,24 @@ ath5k_tasklet_rx(unsigned long data)
break;
else if (unlikely(ret)) {
ATH5K_ERR(sc, "error in processing rx descriptor\n");
+ sc->stats.rxerr_proc++;
spin_unlock(&sc->rxbuflock);
return;
}
- if (unlikely(rs.rs_more)) {
- ATH5K_WARN(sc, "unsupported jumbo\n");
- goto next;
- }
+ sc->stats.rx_all_count++;
if (unlikely(rs.rs_status)) {
- if (rs.rs_status & AR5K_RXERR_PHY)
+ if (rs.rs_status & AR5K_RXERR_CRC)
+ sc->stats.rxerr_crc++;
+ if (rs.rs_status & AR5K_RXERR_FIFO)
+ sc->stats.rxerr_fifo++;
+ if (rs.rs_status & AR5K_RXERR_PHY) {
+ sc->stats.rxerr_phy++;
+ if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
+ sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
goto next;
+ }
if (rs.rs_status & AR5K_RXERR_DECRYPT) {
/*
* Decrypt error. If the error occurred
@@ -1868,12 +1954,14 @@ ath5k_tasklet_rx(unsigned long data)
*
* XXX do key cache faulting
*/
+ sc->stats.rxerr_decrypt++;
if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
!(rs.rs_status & AR5K_RXERR_CRC))
goto accept;
}
if (rs.rs_status & AR5K_RXERR_MIC) {
rx_flag |= RX_FLAG_MMIC_ERROR;
+ sc->stats.rxerr_mic++;
goto accept;
}
@@ -1883,6 +1971,12 @@ ath5k_tasklet_rx(unsigned long data)
sc->opmode != NL80211_IFTYPE_MONITOR)
goto next;
}
+
+ if (unlikely(rs.rs_more)) {
+ sc->stats.rxerr_jumbo++;
+ goto next;
+
+ }
accept:
next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
@@ -1905,12 +1999,8 @@ accept:
* bytes and we can optimize this a bit. In addition, we must
* not try to remove padding from short control frames that do
* not have payload. */
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = ath5k_pad_size(hdrlen);
- if (padsize) {
- memmove(skb->data + padsize, skb->data, hdrlen);
- skb_pull(skb, padsize);
- }
+ ath5k_remove_padding(skb);
+
rxs = IEEE80211_SKB_RXCB(skb);
/*
@@ -1939,10 +2029,15 @@ accept:
rxs->freq = sc->curchan->center_freq;
rxs->band = sc->curband->band;
- rxs->noise = sc->ah->ah_noise_floor;
- rxs->signal = rxs->noise + rs.rs_rssi;
+ rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
rxs->antenna = rs.rs_antenna;
+
+ if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
+ sc->stats.antenna_rx[rs.rs_antenna]++;
+ else
+ sc->stats.antenna_rx[0]++; /* invalid */
+
rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@@ -1952,6 +2047,8 @@ accept:
ath5k_debug_dump_skb(sc, skb, "RX ", 0);
+ ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
+
/* check beacons in IBSS mode */
if (sc->opmode == NL80211_IFTYPE_ADHOC)
ath5k_check_ibss_tsf(sc, skb, rxs);
@@ -1988,6 +2085,17 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ds = bf->desc;
+ /*
+ * It's possible that the hardware can say the buffer is
+ * completed when it hasn't yet loaded the ds_link from
+ * host memory and moved on. If there are more TX
+ * descriptors in the queue, wait for TXDP to change
+ * before processing this one.
+ */
+ if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
+ !list_is_last(&bf->list, &txq->q))
+ break;
+
ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
@@ -1997,6 +2105,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
break;
}
+ sc->stats.tx_all_count++;
skb = bf->skb;
info = IEEE80211_SKB_CB(skb);
bf->skb = NULL;
@@ -2022,14 +2131,31 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
info->status.rates[ts.ts_final_idx].count++;
if (unlikely(ts.ts_status)) {
- sc->ll_stats.dot11ACKFailureCount++;
- if (ts.ts_status & AR5K_TXERR_FILT)
+ sc->stats.ack_fail++;
+ if (ts.ts_status & AR5K_TXERR_FILT) {
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ sc->stats.txerr_filt++;
+ }
+ if (ts.ts_status & AR5K_TXERR_XRETRY)
+ sc->stats.txerr_retry++;
+ if (ts.ts_status & AR5K_TXERR_FIFO)
+ sc->stats.txerr_fifo++;
} else {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ts.ts_rssi;
}
+ /*
+ * Remove MAC header padding before giving the frame
+ * back to mac80211.
+ */
+ ath5k_remove_padding(skb);
+
+ if (ts.ts_antenna > 0 && ts.ts_antenna < 5)
+ sc->stats.antenna_tx[ts.ts_antenna]++;
+ else
+ sc->stats.antenna_tx[0]++; /* invalid */
+
ieee80211_tx_status(sc->hw, skb);
spin_lock(&sc->txbuflock);
@@ -2073,6 +2199,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
int ret = 0;
u8 antenna;
u32 flags;
+ const int padsize = 0;
bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
@@ -2120,7 +2247,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
* from tx power (value is in dB units already) */
ds->ds_data = bf->skbaddr;
ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
- ieee80211_get_hdrlen_from_skb(skb),
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1, AR5K_TXKEYIX_INVALID,
@@ -2407,9 +2534,6 @@ ath5k_init(struct ath5k_softc *sc)
*/
ath5k_stop_locked(sc);
- /* Set PHY calibration interval */
- ah->ah_cal_intval = ath5k_calinterval;
-
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
@@ -2421,7 +2545,8 @@ ath5k_init(struct ath5k_softc *sc)
sc->curband = &sc->sbands[sc->curchan->band];
sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
- AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI;
+ AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+
ret = ath5k_reset(sc, NULL);
if (ret)
goto done;
@@ -2435,8 +2560,7 @@ ath5k_init(struct ath5k_softc *sc)
for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
ath5k_hw_reset_key(ah, i);
- /* Set ack to be sent at low bit-rates */
- ath5k_hw_set_ack_bitrate_high(ah, false);
+ ath5k_hw_set_ack_bitrate_high(ah, true);
ret = 0;
done:
mmiowb();
@@ -2533,12 +2657,33 @@ ath5k_stop_hw(struct ath5k_softc *sc)
tasklet_kill(&sc->restq);
tasklet_kill(&sc->calib);
tasklet_kill(&sc->beacontq);
+ tasklet_kill(&sc->ani_tasklet);
ath5k_rfkill_hw_stop(sc->ah);
return ret;
}
+static void
+ath5k_intr_calibration_poll(struct ath5k_hw *ah)
+{
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
+ /* run ANI only when full calibration is not active */
+ ah->ah_cal_next_ani = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+
+ } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ tasklet_schedule(&ah->ah_sc->calib);
+ }
+ /* we could use SWI to generate enough interrupts to meet our
+ * calibration interval requirements, if necessary:
+ * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
+}
+
static irqreturn_t
ath5k_intr(int irq, void *dev_id)
{
@@ -2562,7 +2707,20 @@ ath5k_intr(int irq, void *dev_id)
*/
tasklet_schedule(&sc->restq);
} else if (unlikely(status & AR5K_INT_RXORN)) {
- tasklet_schedule(&sc->restq);
+ /*
+ * Receive buffers are full. Either the bus is busy or
+ * the CPU is not fast enough to process all received
+ * frames.
+ * Older chipsets need a reset to come out of this
+ * condition, but we treat it as RX for newer chips.
+ * We don't know exactly which versions need a reset -
+ * this guess is copied from the HAL.
+ */
+ sc->stats.rxorn_intr++;
+ if (ah->ah_mac_srev < AR5K_SREV_AR5212)
+ tasklet_schedule(&sc->restq);
+ else
+ tasklet_schedule(&sc->rxtq);
} else {
if (status & AR5K_INT_SWBA) {
tasklet_hi_schedule(&sc->beacontq);
@@ -2587,15 +2745,10 @@ ath5k_intr(int irq, void *dev_id)
if (status & AR5K_INT_BMISS) {
/* TODO */
}
- if (status & AR5K_INT_SWI) {
- tasklet_schedule(&sc->calib);
- }
if (status & AR5K_INT_MIB) {
- /*
- * These stats are also used for ANI i think
- * so how about updating them more often ?
- */
- ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
+ sc->stats.mib_intr++;
+ ath5k_hw_update_mib_counters(ah);
+ ath5k_ani_mib_intr(ah);
}
if (status & AR5K_INT_GPIO)
tasklet_schedule(&sc->rf_kill.toggleq);
@@ -2606,7 +2759,7 @@ ath5k_intr(int irq, void *dev_id)
if (unlikely(!counter))
ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
- ath5k_hw_calibration_poll(ah);
+ ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
}
@@ -2630,8 +2783,7 @@ ath5k_tasklet_calibrate(unsigned long data)
struct ath5k_hw *ah = sc->ah;
/* Only full calibration for now */
- if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION)
- return;
+ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
/* Stop queues so that calibration
* doesn't interfere with tx */
@@ -2647,18 +2799,29 @@ ath5k_tasklet_calibrate(unsigned long data)
* to load new gain values.
*/
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
- ath5k_reset_wake(sc);
+ ath5k_reset(sc, sc->curchan);
}
if (ath5k_hw_phy_calibrate(ah, sc->curchan))
ATH5K_ERR(sc, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
sc->curchan->center_freq));
- ah->ah_swi_mask = 0;
-
/* Wake queues */
ieee80211_wake_queues(sc->hw);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+}
+
+
+static void
+ath5k_tasklet_ani(unsigned long data)
+{
+ struct ath5k_softc *sc = (void *)data;
+ struct ath5k_hw *ah = sc->ah;
+
+ ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
+ ath5k_ani_calibration(ah);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
}
@@ -2680,7 +2843,6 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_softc *sc = hw->priv;
struct ath5k_buf *bf;
unsigned long flags;
- int hdrlen;
int padsize;
ath5k_debug_dump_skb(sc, skb, "TX ", 1);
@@ -2692,17 +2854,11 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
* the hardware expects the header padded to 4 byte boundaries
* if this is not the case we add the padding after the header
*/
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = ath5k_pad_size(hdrlen);
- if (padsize) {
-
- if (skb_headroom(skb) < padsize) {
- ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
- " headroom to pad %d\n", hdrlen, padsize);
- goto drop_packet;
- }
- skb_push(skb, padsize);
- memmove(skb->data, skb->data+padsize, hdrlen);
+ padsize = ath5k_add_padding(skb);
+ if (padsize < 0) {
+ ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
+ " headroom to pad");
+ goto drop_packet;
}
spin_lock_irqsave(&sc->txbuflock, flags);
@@ -2721,7 +2877,7 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
bf->skb = skb;
- if (ath5k_txbuf_setup(sc, bf, txq)) {
+ if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
bf->skb = NULL;
spin_lock_irqsave(&sc->txbuflock, flags);
list_add_tail(&bf->list, &sc->txbuf);
@@ -2768,6 +2924,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
goto err;
}
+ ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
+
/*
* Change channels and update the h/w rate map if we're switching;
* e.g. 11a to 11b/g.
@@ -2836,6 +2994,8 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
goto end;
}
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", sc->opmode);
+
ath5k_hw_set_lladdr(sc->ah, vif->addr);
ath5k_mode_setup(sc);
@@ -2906,7 +3066,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
* then we must allow the user to set how many tx antennas we
* have available
*/
- ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
+ ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
unlock:
mutex_unlock(&sc->lock);
@@ -2914,22 +3074,20 @@ unlock:
}
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
u32 mfilt[2], val;
- int i;
u8 pos;
+ struct netdev_hw_addr *ha;
mfilt[0] = 0;
mfilt[1] = 1;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
/* calculate XOR of eight 6-bit values */
- val = get_unaligned_le32(mclist->dmi_addr + 0);
+ val = get_unaligned_le32(ha->addr + 0);
pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
- val = get_unaligned_le32(mclist->dmi_addr + 3);
+ val = get_unaligned_le32(ha->addr + 3);
pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
pos &= 0x3f;
mfilt[pos / 32] |= (1 << (pos % 32));
@@ -2937,8 +3095,7 @@ static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
* but not sure, needs testing, if we do use this we'd
* neet to inform below to not reset the mcast */
/* ath5k_hw_set_mcast_filterindex(ah,
- * mclist->dmi_addr[5]); */
- mclist = mclist->next;
+ * ha->addr[5]); */
}
return ((u64)(mfilt[1]) << 32) | mfilt[0];
@@ -3124,12 +3281,30 @@ ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
/* Force update */
- ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
+ ath5k_hw_update_mib_counters(sc->ah);
+
+ stats->dot11ACKFailureCount = sc->stats.ack_fail;
+ stats->dot11RTSFailureCount = sc->stats.rts_fail;
+ stats->dot11RTSSuccessCount = sc->stats.rts_ok;
+ stats->dot11FCSErrorCount = sc->stats.fcs_error;
+
+ return 0;
+}
+
+static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath5k_softc *sc = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
- memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats));
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = sc->ah->ah_noise_floor;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 7e1a88a..56221bc 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -50,6 +50,7 @@
#include "ath5k.h"
#include "debug.h"
+#include "ani.h"
#include "../regd.h"
#include "../ath.h"
@@ -105,6 +106,38 @@ struct ath5k_rfkill {
struct tasklet_struct toggleq;
};
+/* statistics */
+struct ath5k_statistics {
+ /* antenna use */
+ unsigned int antenna_rx[5]; /* frames count per antenna RX */
+ unsigned int antenna_tx[5]; /* frames count per antenna TX */
+
+ /* frame errors */
+ unsigned int rx_all_count; /* all RX frames, including errors */
+ unsigned int tx_all_count; /* all TX frames, including errors */
+ unsigned int rxerr_crc;
+ unsigned int rxerr_phy;
+ unsigned int rxerr_phy_code[32];
+ unsigned int rxerr_fifo;
+ unsigned int rxerr_decrypt;
+ unsigned int rxerr_mic;
+ unsigned int rxerr_proc;
+ unsigned int rxerr_jumbo;
+ unsigned int txerr_retry;
+ unsigned int txerr_fifo;
+ unsigned int txerr_filt;
+
+ /* MIB counters */
+ unsigned int ack_fail;
+ unsigned int rts_fail;
+ unsigned int rts_ok;
+ unsigned int fcs_error;
+ unsigned int beacons;
+
+ unsigned int mib_intr;
+ unsigned int rxorn_intr;
+};
+
#if CHAN_DEBUG
#define ATH_CHAN_MAX (26+26+26+200+200)
#else
@@ -117,7 +150,6 @@ struct ath5k_softc {
struct pci_dev *pdev; /* for dma mapping */
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
- struct ieee80211_low_level_stats ll_stats;
struct ieee80211_hw *hw; /* IEEE 802.11 common */
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
struct ieee80211_channel channels[ATH_CHAN_MAX];
@@ -191,6 +223,11 @@ struct ath5k_softc {
int power_level; /* Requested tx power in dbm */
bool assoc; /* associate state */
bool enable_beacon; /* true if beacons are on */
+
+ struct ath5k_statistics stats;
+
+ struct ath5k_ani_state ani_state;
+ struct tasklet_struct ani_tasklet; /* ANI calibration */
};
#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 367a6c7..74f0071 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -102,9 +102,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
}
}
- /* GPIO */
- ah->ah_gpio_npins = AR5K_NUM_GPIO;
-
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
ah->ah_capabilities.cap_queues.q_tx_num =
@@ -112,6 +109,12 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
else
ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+ /* newer hardware has PHY error counters */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
+ ah->ah_capabilities.cap_has_phyerr_counters = true;
+ else
+ ah->ah_capabilities.cap_has_phyerr_counters = false;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 747508c..6fb5c5f 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -69,6 +69,7 @@ module_param_named(debug, ath5k_debug, uint, 0);
#include <linux/seq_file.h>
#include "reg.h"
+#include "ani.h"
static struct dentry *ath5k_global_debugfs;
@@ -307,6 +308,7 @@ static const struct {
{ ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
{ ATH5K_DEBUG_TRACE, "trace", "trace function calls" },
+ { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
{ ATH5K_DEBUG_ANY, "all", "show all debug levels" },
};
@@ -364,6 +366,369 @@ static const struct file_operations fops_debug = {
};
+/* debugfs: antenna */
+
+static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ char buf[700];
+ unsigned int len = 0;
+ unsigned int i;
+ unsigned int v;
+
+ len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
+ sc->ah->ah_ant_mode);
+ len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
+ sc->ah->ah_def_ant);
+ len += snprintf(buf+len, sizeof(buf)-len, "tx antenna\t%d\n",
+ sc->ah->ah_tx_ant);
+
+ len += snprintf(buf+len, sizeof(buf)-len, "\nANTENNA\t\tRX\tTX\n");
+ for (i = 1; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "[antenna %d]\t%d\t%d\n",
+ i, sc->stats.antenna_rx[i], sc->stats.antenna_tx[i]);
+ }
+ len += snprintf(buf+len, sizeof(buf)-len, "[invalid]\t%d\t%d\n",
+ sc->stats.antenna_rx[0], sc->stats.antenna_tx[0]);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_DEFAULT_ANTENNA);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_STA_ID1);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
+ (v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_AGCCTL);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
+ (v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_RESTART);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
+ (v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_FAST_ANT_DIV);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
+ (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_antenna(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ unsigned int i;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "diversity", 9) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
+ printk(KERN_INFO "ath5k debug: enable diversity\n");
+ } else if (strncmp(buf, "fixed-a", 7) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
+ printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
+ } else if (strncmp(buf, "fixed-b", 7) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
+ printk(KERN_INFO "ath5k debug: fixed antenna B\n");
+ } else if (strncmp(buf, "clear", 5) == 0) {
+ for (i = 0; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
+ sc->stats.antenna_rx[i] = 0;
+ sc->stats.antenna_tx[i] = 0;
+ }
+ printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_antenna = {
+ .read = read_file_antenna,
+ .write = write_file_antenna,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
+/* debugfs: frameerrors */
+
+static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ char buf[700];
+ unsigned int len = 0;
+ int i;
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "RX\n---------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%d\t(%d%%)\n",
+ st->rxerr_crc,
+ st->rx_all_count > 0 ?
+ st->rxerr_crc*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%d\t(%d%%)\n",
+ st->rxerr_phy,
+ st->rx_all_count > 0 ?
+ st->rxerr_phy*100/st->rx_all_count : 0);
+ for (i = 0; i < 32; i++) {
+ if (st->rxerr_phy_code[i])
+ len += snprintf(buf+len, sizeof(buf)-len,
+ " phy_err[%d]\t%d\n",
+ i, st->rxerr_phy_code[i]);
+ }
+
+ len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+ st->rxerr_fifo,
+ st->rx_all_count > 0 ?
+ st->rxerr_fifo*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%d\t(%d%%)\n",
+ st->rxerr_decrypt,
+ st->rx_all_count > 0 ?
+ st->rxerr_decrypt*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%d\t(%d%%)\n",
+ st->rxerr_mic,
+ st->rx_all_count > 0 ?
+ st->rxerr_mic*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "process\t%d\t(%d%%)\n",
+ st->rxerr_proc,
+ st->rx_all_count > 0 ?
+ st->rxerr_proc*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%d\t(%d%%)\n",
+ st->rxerr_jumbo,
+ st->rx_all_count > 0 ?
+ st->rxerr_jumbo*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n",
+ st->rx_all_count);
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nTX\n---------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "retry\t%d\t(%d%%)\n",
+ st->txerr_retry,
+ st->tx_all_count > 0 ?
+ st->txerr_retry*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+ st->txerr_fifo,
+ st->tx_all_count > 0 ?
+ st->txerr_fifo*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "filter\t%d\t(%d%%)\n",
+ st->txerr_filt,
+ st->tx_all_count > 0 ?
+ st->txerr_filt*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n",
+ st->tx_all_count);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_frameerrors(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "clear", 5) == 0) {
+ st->rxerr_crc = 0;
+ st->rxerr_phy = 0;
+ st->rxerr_fifo = 0;
+ st->rxerr_decrypt = 0;
+ st->rxerr_mic = 0;
+ st->rxerr_proc = 0;
+ st->rxerr_jumbo = 0;
+ st->rx_all_count = 0;
+ st->txerr_retry = 0;
+ st->txerr_fifo = 0;
+ st->txerr_filt = 0;
+ st->tx_all_count = 0;
+ printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_frameerrors = {
+ .read = read_file_frameerrors,
+ .write = write_file_frameerrors,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
+/* debugfs: ani */
+
+static ssize_t read_file_ani(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ struct ath5k_ani_state *as = &sc->ani_state;
+
+ char buf[700];
+ unsigned int len = 0;
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "HW has PHY error counters:\t%s\n",
+ sc->ah->ah_capabilities.cap_has_phyerr_counters ?
+ "yes" : "no");
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "HW max spur immunity level:\t%d\n",
+ as->max_spur_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nANI state\n--------------------------------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "operating mode:\t\t\t");
+ switch (as->ani_mode) {
+ case ATH5K_ANI_MODE_OFF:
+ len += snprintf(buf+len, sizeof(buf)-len, "OFF\n");
+ break;
+ case ATH5K_ANI_MODE_MANUAL_LOW:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "MANUAL LOW\n");
+ break;
+ case ATH5K_ANI_MODE_MANUAL_HIGH:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "MANUAL HIGH\n");
+ break;
+ case ATH5K_ANI_MODE_AUTO:
+ len += snprintf(buf+len, sizeof(buf)-len, "AUTO\n");
+ break;
+ default:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "??? (not good)\n");
+ break;
+ }
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "noise immunity level:\t\t%d\n",
+ as->noise_imm_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "spur immunity level:\t\t%d\n",
+ as->spur_level);
+ len += snprintf(buf+len, sizeof(buf)-len, "firstep level:\t\t\t%d\n",
+ as->firstep_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "OFDM weak signal detection:\t%s\n",
+ as->ofdm_weak_sig ? "on" : "off");
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "CCK weak signal detection:\t%s\n",
+ as->cck_weak_sig ? "on" : "off");
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nMIB INTERRUPTS:\t\t%u\n",
+ st->mib_intr);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "beacon RSSI average:\t%d\n",
+ sc->ah->ah_beacon_rssi_avg.avg);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt tx\t\t%u\t(%d%%)\n",
+ as->pfc_tx,
+ as->pfc_cycles > 0 ?
+ as->pfc_tx*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt rx\t\t%u\t(%d%%)\n",
+ as->pfc_rx,
+ as->pfc_cycles > 0 ?
+ as->pfc_rx*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt busy\t\t%u\t(%d%%)\n",
+ as->pfc_busy,
+ as->pfc_cycles > 0 ?
+ as->pfc_busy*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt cycles\t\t%u\n",
+ as->pfc_cycles);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "listen time\t\t%d\tlast: %d\n",
+ as->listen_time, as->last_listen);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
+ as->ofdm_errors, as->last_ofdm_errors,
+ as->sum_ofdm_errors);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "CCK errors\t\t%u\tlast: %u\tsum: %u\n",
+ as->cck_errors, as->last_cck_errors,
+ as->sum_cck_errors);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1),
+ ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1)));
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2),
+ ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_ani(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "sens-low", 8) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_HIGH);
+ } else if (strncmp(buf, "sens-high", 9) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_LOW);
+ } else if (strncmp(buf, "ani-off", 7) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_OFF);
+ } else if (strncmp(buf, "ani-on", 6) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_AUTO);
+ } else if (strncmp(buf, "noise-low", 9) == 0) {
+ ath5k_ani_set_noise_immunity_level(sc->ah, 0);
+ } else if (strncmp(buf, "noise-high", 10) == 0) {
+ ath5k_ani_set_noise_immunity_level(sc->ah,
+ ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ } else if (strncmp(buf, "spur-low", 8) == 0) {
+ ath5k_ani_set_spur_immunity_level(sc->ah, 0);
+ } else if (strncmp(buf, "spur-high", 9) == 0) {
+ ath5k_ani_set_spur_immunity_level(sc->ah,
+ sc->ani_state.max_spur_level);
+ } else if (strncmp(buf, "fir-low", 7) == 0) {
+ ath5k_ani_set_firstep_level(sc->ah, 0);
+ } else if (strncmp(buf, "fir-high", 8) == 0) {
+ ath5k_ani_set_firstep_level(sc->ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ } else if (strncmp(buf, "ofdm-off", 8) == 0) {
+ ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, false);
+ } else if (strncmp(buf, "ofdm-on", 7) == 0) {
+ ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, true);
+ } else if (strncmp(buf, "cck-off", 7) == 0) {
+ ath5k_ani_set_cck_weak_signal_detection(sc->ah, false);
+ } else if (strncmp(buf, "cck-on", 6) == 0) {
+ ath5k_ani_set_cck_weak_signal_detection(sc->ah, true);
+ }
+ return count;
+}
+
+static const struct file_operations fops_ani = {
+ .read = read_file_ani,
+ .write = write_file_ani,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
/* init */
void
@@ -393,6 +758,20 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR,
sc->debug.debugfs_phydir, sc, &fops_reset);
+
+ sc->debug.debugfs_antenna = debugfs_create_file("antenna",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc, &fops_antenna);
+
+ sc->debug.debugfs_frameerrors = debugfs_create_file("frameerrors",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc,
+ &fops_frameerrors);
+
+ sc->debug.debugfs_ani = debugfs_create_file("ani",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc,
+ &fops_ani);
}
void
@@ -408,6 +787,9 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
debugfs_remove(sc->debug.debugfs_registers);
debugfs_remove(sc->debug.debugfs_beacon);
debugfs_remove(sc->debug.debugfs_reset);
+ debugfs_remove(sc->debug.debugfs_antenna);
+ debugfs_remove(sc->debug.debugfs_frameerrors);
+ debugfs_remove(sc->debug.debugfs_ani);
debugfs_remove(sc->debug.debugfs_phydir);
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 66f69f0..ddd5b3a 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -74,6 +74,9 @@ struct ath5k_dbg_info {
struct dentry *debugfs_registers;
struct dentry *debugfs_beacon;
struct dentry *debugfs_reset;
+ struct dentry *debugfs_antenna;
+ struct dentry *debugfs_frameerrors;
+ struct dentry *debugfs_ani;
};
/**
@@ -113,6 +116,7 @@ enum ath5k_debug_level {
ATH5K_DEBUG_DUMP_TX = 0x00000200,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
ATH5K_DEBUG_TRACE = 0x00001000,
+ ATH5K_DEBUG_ANI = 0x00002000,
ATH5K_DEBUG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index dc30a2b..7d7b646 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -35,7 +35,8 @@
*/
static int
ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type,
+ unsigned int pkt_len, unsigned int hdr_len, int padsize,
+ enum ath5k_pkt_type type,
unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
@@ -71,7 +72,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
/* Verify and set frame length */
/* remove padding we might have added before */
- frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN;
+ frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
@@ -100,7 +101,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
}
- /*Diferences between 5210-5211*/
+ /*Differences between 5210-5211*/
if (ah->ah_version == AR5K_AR5210) {
switch (type) {
case AR5K_PKT_TYPE_BEACON:
@@ -165,6 +166,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
*/
static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
unsigned int tx_tries0, unsigned int key_index,
unsigned int antenna_mode, unsigned int flags,
@@ -206,7 +208,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
/* Verify and set frame length */
/* remove padding we might have added before */
- frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN;
+ frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
@@ -229,7 +231,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
- tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
+ tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0,
AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
@@ -643,6 +645,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
+ ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
}
if (rx_status->rx_status_1 &
@@ -668,12 +671,6 @@ int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
ah->ah_version != AR5K_AR5212)
return -ENOTSUPP;
- /* XXX: What is this magic value and where is it used ? */
- if (ah->ah_version == AR5K_AR5212)
- ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
- else if (ah->ah_version == AR5K_AR5211)
- ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
-
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 56158c8..64538fb 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -112,15 +112,32 @@ struct ath5k_hw_rx_error {
#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
-/* PHY Error codes */
-#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00
-#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20
-#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40
-#define AR5K_DESC_RX_PHY_ERROR_RATE 0x60
-#define AR5K_DESC_RX_PHY_ERROR_LENGTH 0x80
-#define AR5K_DESC_RX_PHY_ERROR_64QAM 0xa0
-#define AR5K_DESC_RX_PHY_ERROR_SERVICE 0xc0
-#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR 0xe0
+/**
+ * enum ath5k_phy_error_code - PHY Error codes
+ */
+enum ath5k_phy_error_code {
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */
+ AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
+ AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
+ AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
+ AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
+ AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */
+ AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
+ AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
+ /* these are specific to the 5212 */
+ AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
+ AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
+ AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
+ AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL = 20,
+ AR5K_RX_PHY_ERROR_OFDM_POWER_DROP = 21,
+ AR5K_RX_PHY_ERROR_OFDM_SERVICE = 22,
+ AR5K_RX_PHY_ERROR_OFDM_RESTART = 23,
+ AR5K_RX_PHY_ERROR_CCK_TIMING = 25,
+ AR5K_RX_PHY_ERROR_CCK_HEADER_CRC = 26,
+ AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL = 27,
+ AR5K_RX_PHY_ERROR_CCK_SERVICE = 30,
+ AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
+};
/*
* 5210/5211 hardware 2-word TX control descriptor
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 67665cd..ed02636 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -331,7 +331,8 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
ee->ee_x_gain[mode] = (val >> 1) & 0xf;
ee->ee_xpd[mode] = val & 0x1;
- if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0)
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
+ mode != AR5K_EEPROM_MODE_11B)
ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
@@ -341,6 +342,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
if (mode == AR5K_EEPROM_MODE_11A)
ee->ee_xr_power[mode] = val & 0x3f;
else {
+ /* b_DB_11[bg] and b_OB_11[bg] */
ee->ee_ob[mode][0] = val & 0x7;
ee->ee_db[mode][0] = (val >> 3) & 0x7;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 473a483..c4a6d5f 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -24,9 +24,6 @@
* SERDES infos are present */
#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
-#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */
-#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
-#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */
@@ -78,9 +75,9 @@
#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
-#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */
-#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */
-#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7)
+#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz */
+#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for < 2W power consumption */
+#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) /* Device type (1 Cardbus, 2 PCI, 3 MiniPCI, 4 AP) */
#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */
@@ -101,7 +98,7 @@
#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5)
#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
-#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1)
+#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) /* has 32KHz crystal for sleep mode */
#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1)
#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6)
@@ -114,26 +111,27 @@
#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8)
#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff)
-#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3)
-#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3)
+#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) /* modes supported by radio 0 (bit 1: G, bit 2: A) */
+#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) /* modes supported by radio 1 (bit 1: G, bit 2: A) */
#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9)
-#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1)
-#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1)
-#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1)
-#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1)
-#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf)
-#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1)
-#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf)
+#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) /* disable compression */
+#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) /* disable AES */
+#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) /* disable fast frames */
+#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) /* disable bursting */
+#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) /* max number of QCUs. defaults to 10 */
+#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) /* enable heayy clipping */
+#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) /* key cache size. defaults to 128 */
#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10)
-#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x8)
-#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x8)
-#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1)
-#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1)
-#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1)
-#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 9) & 0x1)
-#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 10) & 0x1)
+#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x7) /* MIMO chains disabled for TX bitmask */
+#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x7) /* MIMO chains disabled for RX bitmask */
+#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) /* 5.47-5.7GHz supported */
+#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) /* Japan UNII1 band (5.15-5.25GHz) on even channels (5180, 5200, 5220, 5240) supported */
+#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) /* Japan UNII2 band (5.25-5.35GHz) supported */
+#define AR5K_EEPROM_JAP_MID_EN (((_v) >> 9) & 0x1) /* Japan band from 5.47-5.7GHz supported */
+#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 10) & 0x1) /* Japan UNII2 band (5.15-5.25GHz) on odd channels (5170, 5190, 5210, 5230) supported */
+#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 11) & 0x1) /* Japan A mode enabled (using even channels) */
/* calibration settings */
#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
@@ -389,7 +387,49 @@ struct ath5k_edge_power {
bool flag;
};
-/* EEPROM calibration data */
+/**
+ * struct ath5k_eeprom_info - EEPROM calibration data
+ *
+ * @ee_regdomain: ath/regd.c takes care of COUNTRY_ERD and WORLDWIDE_ROAMING
+ * flags
+ * @ee_ant_gain: Antenna gain in 0.5dB steps signed [5211 only?]
+ * @ee_cck_ofdm_gain_delta: difference in gainF to output the same power for
+ * OFDM and CCK packets
+ * @ee_cck_ofdm_power_delta: power difference between OFDM (6Mbps) and CCK
+ * (11Mbps) rate in G mode. 0.1dB steps
+ * @ee_scaled_cck_delta: for Japan Channel 14: 0.1dB resolution
+ *
+ * @ee_i_cal: Initial I coefficient to correct I/Q mismatch in the receive path
+ * @ee_q_cal: Initial Q coefficient to correct I/Q mismatch in the receive path
+ * @ee_fixed_bias: use ee_ob and ee_db settings or use automatic control
+ * @ee_switch_settling: RX/TX Switch settling time
+ * @ee_atn_tx_rx: Difference in attenuation between TX and RX in 1dB steps
+ * @ee_ant_control: Antenna Control Settings
+ * @ee_ob: Bias current for Output stage of PA
+ * B/G mode: Index [0] is used for AR2112/5112, otherwise [1]
+ * A mode: [0] 5.15-5.25 [1] 5.25-5.50 [2] 5.50-5.70 [3] 5.70-5.85 GHz
+ * @ee_db: Bias current for Output stage of PA. see @ee_ob
+ * @ee_tx_end2xlna_enable: Time difference from when BB finishes sending a frame
+ * to when the external LNA is activated
+ * @ee_tx_end2xpa_disable: Time difference from when BB finishes sending a frame
+ * to when the external PA switch is deactivated
+ * @ee_tx_frm2xpa_enable: Time difference from when MAC sends frame to when
+ * external PA switch is activated
+ * @ee_thr_62: Clear Channel Assessment (CCA) sensitivity
+ * (IEEE802.11a section 17.3.10.5 )
+ * @ee_xlna_gain: Total gain of the LNA (information only)
+ * @ee_xpd: Use external (1) or internal power detector
+ * @ee_x_gain: Gain for external power detector output (differences in EEMAP
+ * versions!)
+ * @ee_i_gain: Initial gain value after reset
+ * @ee_margin_tx_rx: Margin in dB when final attenuation stage should be used
+ *
+ * @ee_false_detect: Backoff in Sensitivity (dB) on channels with spur signals
+ * @ee_noise_floor_thr: Noise floor threshold in 1dB steps
+ * @ee_adc_desired_size: Desired amplitude for ADC, used by AGC; in 0.5 dB steps
+ * @ee_pga_desired_size: Desired output of PGA (for BB gain) in 0.5 dB steps
+ * @ee_pd_gain_overlap: PD ADC curves need to overlap in 0.5dB steps (ee_map>=2)
+ */
struct ath5k_eeprom_info {
/* Header information */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index aefe84f..5212e27 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -39,16 +39,16 @@
* ath5k_hw_set_opmode - Set PCU operating mode
*
* @ah: The &struct ath5k_hw
+ * @op_mode: &enum nl80211_iftype operating mode
*
* Initialize PCU for the various operating modes (AP/STA etc)
- *
- * NOTE: ah->ah_op_mode must be set before calling this.
*/
-int ath5k_hw_set_opmode(struct ath5k_hw *ah)
+int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
/* Preserve rest settings */
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
@@ -61,7 +61,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
ATH5K_TRACE(ah->ah_sc);
- switch (ah->ah_op_mode) {
+ switch (op_mode) {
case NL80211_IFTYPE_ADHOC:
pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_ADHOC;
@@ -113,39 +113,26 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_update - Update mib counters (mac layer statistics)
+ * ath5k_hw_update - Update MIB counters (mac layer statistics)
*
* @ah: The &struct ath5k_hw
- * @stats: The &struct ieee80211_low_level_stats we use to track
- * statistics on the driver
*
- * Reads MIB counters from PCU and updates sw statistics. Must be
- * called after a MIB interrupt.
+ * Reads MIB counters from PCU and updates sw statistics. Is called after a
+ * MIB interrupt, because one of these counters might have reached their maximum
+ * and triggered the MIB interrupt, to let us read and clear the counter.
+ *
+ * Is called in interrupt context!
*/
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
- struct ieee80211_low_level_stats *stats)
+void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
+ struct ath5k_statistics *stats = &ah->ah_sc->stats;
/* Read-And-Clear */
- stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
- stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
- stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
- stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
-
- /* XXX: Should we use this to track beacon count ?
- * -we read it anyway to clear the register */
- ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
-
- /* Reset profile count registers on 5212*/
- if (ah->ah_version == AR5K_AR5212) {
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
- }
-
- /* TODO: Handle ANI stats */
+ stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
+ stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
+ stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
+ stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
+ stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
}
/**
@@ -167,9 +154,9 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
else {
u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
if (high)
- AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
- else
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
}
}
@@ -179,25 +166,12 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
\******************/
/**
- * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec
- *
- * @ah: The &struct ath5k_hw
- */
-unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
-
- return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
-}
-
-/**
* ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
*
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
@@ -211,24 +185,12 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
}
/**
- * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec
- *
- * @ah: The &struct ath5k_hw
- */
-unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
-}
-
-/**
* ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
*
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
@@ -290,7 +252,7 @@ unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
*
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+static unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
@@ -308,7 +270,7 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
*
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+static unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
@@ -417,7 +379,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
* (ACK etc).
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
- * TODO: Init ANI here
*/
void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
@@ -451,42 +412,6 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
-/*
- * Set multicast filter by index
- */
-int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
-{
-
- ATH5K_TRACE(ah->ah_sc);
- if (index >= 64)
- return -EINVAL;
- else if (index >= 32)
- AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
- (1 << (index - 32)));
- else
- AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
-
- return 0;
-}
-
-/*
- * Clear Multicast filter by index
- */
-int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
-{
-
- ATH5K_TRACE(ah->ah_sc);
- if (index >= 64)
- return -EINVAL;
- else if (index >= 32)
- AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
- (1 << (index - 32)));
- else
- AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
-
- return 0;
-}
-
/**
* ath5k_hw_get_rx_filter - Get current rx filter
*
@@ -571,18 +496,7 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
* Beacon control *
\****************/
-/**
- * ath5k_hw_get_tsf32 - Get a 32bit TSF
- *
- * @ah: The &struct ath5k_hw
- *
- * Returns lower 32 bits of current TSF
- */
-u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
-}
+#define ATH5K_MAX_TSF_READ 10
/**
* ath5k_hw_get_tsf64 - Get the full 64bit TSF
@@ -593,10 +507,35 @@ u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
*/
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
- u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ u32 tsf_lower, tsf_upper1, tsf_upper2;
+ int i;
+
+ /*
+ * While reading TSF upper and then lower part, the clock is still
+ * counting (or jumping in case of IBSS merge) so we might get
+ * inconsistent values. To avoid this, we read the upper part again
+ * and check it has not been changed. We make the hypothesis that a
+ * maximum of 3 changes can happens in a row (we use 10 as a safe
+ * value).
+ *
+ * Impact on performance is pretty small, since in most cases, only
+ * 3 register reads are needed.
+ */
+
+ tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ for (i = 0; i < ATH5K_MAX_TSF_READ; i++) {
+ tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
+ tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ if (tsf_upper2 == tsf_upper1)
+ break;
+ tsf_upper1 = tsf_upper2;
+ }
+
+ WARN_ON( i == ATH5K_MAX_TSF_READ );
+
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
+ return (((u64)tsf_upper1 << 32) | tsf_lower);
}
/**
@@ -651,7 +590,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
/*
* Set the additional timers by mode
*/
- switch (ah->ah_op_mode) {
+ switch (ah->ah_sc->opmode) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
/* In STA mode timer1 is used as next wakeup
@@ -688,8 +627,8 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
* Set the beacon register and enable all timers.
*/
/* When in AP or Mesh Point mode zero timer0 to start TSF */
- if (ah->ah_op_mode == NL80211_IFTYPE_AP ||
- ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT)
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP ||
+ ah->ah_sc->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
@@ -722,203 +661,6 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
}
-#if 0
-/*
- * Set beacon timers
- */
-int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
- const struct ath5k_beacon_state *state)
-{
- u32 cfp_period, next_cfp, dtim, interval, next_beacon;
-
- /*
- * TODO: should be changed through *state
- * review struct ath5k_beacon_state struct
- *
- * XXX: These are used for cfp period bellow, are they
- * ok ? Is it O.K. for tsf here to be 0 or should we use
- * get_tsf ?
- */
- u32 dtim_count = 0; /* XXX */
- u32 cfp_count = 0; /* XXX */
- u32 tsf = 0; /* XXX */
-
- ATH5K_TRACE(ah->ah_sc);
- /* Return on an invalid beacon state */
- if (state->bs_interval < 1)
- return -EINVAL;
-
- interval = state->bs_interval;
- dtim = state->bs_dtim_period;
-
- /*
- * PCF support?
- */
- if (state->bs_cfp_period > 0) {
- /*
- * Enable PCF mode and set the CFP
- * (Contention Free Period) and timer registers
- */
- cfp_period = state->bs_cfp_period * state->bs_dtim_period *
- state->bs_interval;
- next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
- state->bs_interval;
-
- AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA |
- AR5K_STA_ID1_PCF);
- ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
- ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
- AR5K_CFP_DUR);
- ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
- next_cfp)) << 3, AR5K_TIMER2);
- } else {
- /* Disable PCF mode */
- AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA |
- AR5K_STA_ID1_PCF);
- }
-
- /*
- * Enable the beacon timer register
- */
- ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
-
- /*
- * Start the beacon timers
- */
- ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &
- ~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
- AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
- AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
- AR5K_BEACON_PERIOD), AR5K_BEACON);
-
- /*
- * Write new beacon miss threshold, if it appears to be valid
- * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
- * and return if its not in range. We can test this by reading value and
- * setting value to a largest value and seeing which values register.
- */
-
- AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
- state->bs_bmiss_threshold);
-
- /*
- * Set sleep control register
- * XXX: Didn't find this in 5210 code but since this register
- * exists also in ar5k's 5210 headers i leave it as common code.
- */
- AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
- (state->bs_sleep_duration - 3) << 3);
-
- /*
- * Set enhanced sleep registers on 5212
- */
- if (ah->ah_version == AR5K_AR5212) {
- if (state->bs_sleep_duration > state->bs_interval &&
- roundup(state->bs_sleep_duration, interval) ==
- state->bs_sleep_duration)
- interval = state->bs_sleep_duration;
-
- if (state->bs_sleep_duration > dtim && (dtim == 0 ||
- roundup(state->bs_sleep_duration, dtim) ==
- state->bs_sleep_duration))
- dtim = state->bs_sleep_duration;
-
- if (interval > dtim)
- return -EINVAL;
-
- next_beacon = interval == dtim ? state->bs_next_dtim :
- state->bs_next_beacon;
-
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
- AR5K_SLEEP0_NEXT_DTIM) |
- AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
- AR5K_SLEEP0_ENH_SLEEP_EN |
- AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
-
- ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
- AR5K_SLEEP1_NEXT_TIM) |
- AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
-
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
- AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
- }
-
- return 0;
-}
-
-/*
- * Reset beacon timers
- */
-void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- /*
- * Disable beacon timer
- */
- ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
-
- /*
- * Disable some beacon register values
- */
- AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
- ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
-}
-
-/*
- * Wait for beacon queue to finish
- */
-int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
-{
- unsigned int i;
- int ret;
-
- ATH5K_TRACE(ah->ah_sc);
-
- /* 5210 doesn't have QCU*/
- if (ah->ah_version == AR5K_AR5210) {
- /*
- * Wait for beaconn queue to finish by checking
- * Control Register and Beacon Status Register.
- */
- for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
- if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
- ||
- !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
- break;
- udelay(10);
- }
-
- /* Timeout... */
- if (i <= 0) {
- /*
- * Re-schedule the beacon queue
- */
- ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
- ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
- AR5K_BCR);
-
- return -EIO;
- }
- ret = 0;
- } else {
- /*5211/5212*/
- ret = ath5k_hw_register_timeout(ah,
- AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
- AR5K_QCU_STS_FRMPENDCNT, 0, false);
-
- if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
- return -EIO;
- }
-
- return ret;
-}
-#endif
-
/*********************\
* Key table functions *
@@ -971,19 +713,6 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
return 0;
}
-/*
- * Check if a table entry is valid
- */
-int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
-{
- ATH5K_TRACE(ah->ah_sc);
- AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
-
- /* Check the validation flag at the end of the entry */
- return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
- AR5K_KEYTABLE_VALID;
-}
-
static
int ath5k_keycache_type(const struct ieee80211_key_conf *key)
{
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 68e2bcc..1b81c47 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -20,8 +20,6 @@
*
*/
-#define _ATH5K_PHY
-
#include <linux/delay.h>
#include <linux/slab.h>
@@ -982,7 +980,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
- } else if ((c - (c % 5)) != 2 || c > 5435) {
+ } else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
data2 = ath5k_hw_bitswap(3, 2);
@@ -995,7 +993,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
} else
return -EINVAL;
} else {
- data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
+ data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
@@ -1023,7 +1021,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
data0 = ath5k_hw_bitswap((c - 2272), 8);
data2 = 0;
/* ? 5GHz ? */
- } else if ((c - (c % 5)) != 2 || c > 5435) {
+ } else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c < 5120)
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
else if (!(c % 10))
@@ -1034,7 +1032,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
return -EINVAL;
data2 = ath5k_hw_bitswap(1, 2);
} else {
- data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
+ data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
@@ -1105,28 +1103,6 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
PHY calibration
\*****************/
-void
-ath5k_hw_calibration_poll(struct ath5k_hw *ah)
-{
- /* Calibration interval in jiffies */
- unsigned long cal_intval;
-
- cal_intval = msecs_to_jiffies(ah->ah_cal_intval * 1000);
-
- /* Initialize timestamp if needed */
- if (!ah->ah_cal_tstamp)
- ah->ah_cal_tstamp = jiffies;
-
- /* For now we always do full calibration
- * Mark software interrupt mask and fire software
- * interrupt (bit gets auto-cleared) */
- if (time_is_before_eq_jiffies(ah->ah_cal_tstamp + cal_intval)) {
- ah->ah_cal_tstamp = jiffies;
- ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
- AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
- }
-}
-
static int sign_extend(int val, const int nbits)
{
int order = BIT(nbits-1);
@@ -1191,7 +1167,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
* The median of the values in the history is then loaded into the
* hardware for its own use for RSSI and CCA measurements.
*/
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 val;
@@ -1400,7 +1376,11 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
}
i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
- q_coffd = q_pwr >> 7;
+
+ if (ah->ah_version == AR5K_AR5211)
+ q_coffd = q_pwr >> 6;
+ else
+ q_coffd = q_pwr >> 7;
/* protect against divide by 0 and loss of sign bits */
if (i_coffd == 0 || q_coffd < 2)
@@ -1409,7 +1389,10 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
- q_coff = (i_pwr / q_coffd) - 128;
+ if (ah->ah_version == AR5K_AR5211)
+ q_coff = (i_pwr / q_coffd) - 64;
+ else
+ q_coff = (i_pwr / q_coffd) - 128;
q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
@@ -1769,7 +1752,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
* Antenna control *
\*****************/
-void /*TODO:Boundary check*/
+static void /*TODO:Boundary check*/
ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
ATH5K_TRACE(ah->ah_sc);
@@ -1778,16 +1761,6 @@ ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
-unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
-
- if (ah->ah_version != AR5K_AR5210)
- return ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA) & 0x7;
-
- return false; /*XXX: What do we return for 5210 ?*/
-}
-
/*
* Enable/disable fast rx antenna diversity
*/
@@ -1931,6 +1904,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
ah->ah_tx_ant = tx_ant;
ah->ah_ant_mode = ant_mode;
+ ah->ah_def_ant = def_ant;
sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0;
sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0;
@@ -2171,8 +2145,6 @@ ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah,
done:
*pcinfo_l = &pcinfo[idx_l];
*pcinfo_r = &pcinfo[idx_r];
-
- return;
}
/*
@@ -2441,19 +2413,6 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
pcdac_tmp = pcdac_high_pwr;
edge_flag = 0x40;
-#if 0
- /* If both min and max power limits are in lower
- * power curve's range, only use the low power curve.
- * TODO: min/max levels are related to target
- * power values requested from driver/user
- * XXX: Is this really needed ? */
- if (min_pwr < table_max[1] &&
- max_pwr < table_max[1]) {
- edge_flag = 0;
- pcdac_tmp = pcdac_low_pwr;
- max_pwr_idx = (table_max[1] - table_min[1])/2;
- }
-#endif
} else {
pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
pcdac_high_pwr = ah->ah_txpower.tmpL[0];
@@ -2600,7 +2559,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
/* Fill pdadc_out table */
- while (pdadc_0 < max_idx)
+ while (pdadc_0 < max_idx && pdadc_i < 128)
pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
/* Need to extrapolate above this pdgain? */
@@ -3144,5 +3103,3 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
return ath5k_hw_txpower(ah, channel, ee_mode, txpower);
}
-
-#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 9122a85..f5831da 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -517,23 +517,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
}
/*
- * Get slot time from DCU
- */
-unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
-{
- unsigned int slot_time_clock;
-
- ATH5K_TRACE(ah->ah_sc);
-
- if (ah->ah_version == AR5K_AR5210)
- slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
- else
- slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
-
- return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
-}
-
-/*
* Set slot time on DCU
*/
int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 1464f89..55b4ac6d 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -212,10 +212,10 @@
* MIB control register
*/
#define AR5K_MIBC 0x0040 /* Register Address */
-#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */
+#define AR5K_MIBC_COW 0x00000001 /* Counter Overflow Warning */
#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
-#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */
-#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */
+#define AR5K_MIBC_CMC 0x00000004 /* Clear MIB Counters */
+#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe, increment all */
/*
* Timeout prescale register
@@ -1139,8 +1139,8 @@
#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
-#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */
-#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate for ACK/CTS [5211+] */
+#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Rate to use for ACK/CTS. 0: highest mandatory rate <= RX rate; 1: 1Mbps in B mode */
+#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* 802.11b base rate. 0: 1, 2, 5.5 and 11Mbps; 1: 1 and 2Mbps. [5211+] */
#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */
#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */
@@ -1516,7 +1516,14 @@
AR5K_NAV_5210 : AR5K_NAV_5211)
/*
- * RTS success register
+ * MIB counters:
+ *
+ * max value is 0xc000, if this is reached we get a MIB interrupt.
+ * they can be controlled via AR5K_MIBC and are cleared on read.
+ */
+
+/*
+ * RTS success (MIB counter)
*/
#define AR5K_RTS_OK_5210 0x8090
#define AR5K_RTS_OK_5211 0x8088
@@ -1524,7 +1531,7 @@
AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211)
/*
- * RTS failure register
+ * RTS failure (MIB counter)
*/
#define AR5K_RTS_FAIL_5210 0x8094
#define AR5K_RTS_FAIL_5211 0x808c
@@ -1532,7 +1539,7 @@
AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211)
/*
- * ACK failure register
+ * ACK failure (MIB counter)
*/
#define AR5K_ACK_FAIL_5210 0x8098
#define AR5K_ACK_FAIL_5211 0x8090
@@ -1540,7 +1547,7 @@
AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211)
/*
- * FCS failure register
+ * FCS failure (MIB counter)
*/
#define AR5K_FCS_FAIL_5210 0x809c
#define AR5K_FCS_FAIL_5211 0x8094
@@ -1667,11 +1674,17 @@
/*
* Profile count registers
+ *
+ * These registers can be cleared and freezed with ATH5K_MIBC, but they do not
+ * generate a MIB interrupt.
+ * Instead of overflowing, they shift by one bit to the right. All registers
+ * shift together, i.e. when one reaches the max, all shift at the same time by
+ * one bit to the right. This way we should always get consistent values.
*/
#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
-#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */
-#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */
+#define AR5K_PROFCNT_RXCLR 0x80f4 /* Busy count */
+#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle counter */
/*
* Quiet period control registers
@@ -1758,7 +1771,7 @@
#define AR5K_CCK_FIL_CNT 0x8128
/*
- * PHY Error Counters (?)
+ * PHY Error Counters (same masks as AR5K_PHY_ERR_FIL)
*/
#define AR5K_PHYERR_CNT1 0x812c
#define AR5K_PHYERR_CNT1_MASK 0x8130
@@ -1766,6 +1779,9 @@
#define AR5K_PHYERR_CNT2 0x8134
#define AR5K_PHYERR_CNT2_MASK 0x8138
+/* if the PHY Error Counters reach this maximum, we get MIB interrupts */
+#define ATH5K_PHYERR_CNT_MAX 0x00c00000
+
/*
* TSF Threshold register (?)
*/
@@ -1974,7 +1990,7 @@
#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */
#define AR5K_PHY_SETTLING_AGC_S 0
-#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settlig time */
+#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settling time */
#define AR5K_PHY_SETTLING_SWITCH_S 7
/*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index cbf28e3..307f80e 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,8 +19,6 @@
*
*/
-#define _ATH5K_RESET
-
/*****************************\
Reset functions and helpers
\*****************************/
@@ -34,6 +32,27 @@
#include "base.h"
#include "debug.h"
+/*
+ * Check if a register write has been completed
+ */
+int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+ bool is_set)
+{
+ int i;
+ u32 data;
+
+ for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
+ data = ath5k_hw_reg_read(ah, reg);
+ if (is_set && (data & flag))
+ break;
+ else if ((data & flag) == val)
+ break;
+ udelay(15);
+ }
+
+ return (i <= 0) ? -EAGAIN : 0;
+}
+
/**
* ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
*
@@ -221,8 +240,8 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
/*
* Sleep control
*/
-int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
- bool set_chip, u16 sleep_duration)
+static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+ bool set_chip, u16 sleep_duration)
{
unsigned int i;
u32 staid, data;
@@ -608,7 +627,6 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
}
- return;
}
/* TODO: Half/Quarter rate */
@@ -864,8 +882,6 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
/* Heavy clipping -disable for now */
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_1)
ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE);
-
- return;
}
/*
@@ -1017,11 +1033,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (ret)
return ret;
- /*
- * Initialize operating mode
- */
- ah->ah_op_mode = op_mode;
-
/* PHY access enable */
if (ah->ah_mac_srev >= AR5K_SREV_AR5211)
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
@@ -1192,7 +1203,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_set_associd(ah);
/* Set PCU config */
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, op_mode);
/* Clear any pending interrupts
* PISR/SISR Not available on 5210 */
@@ -1378,7 +1389,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
* external 32KHz crystal when sleeping if one
* exists */
if (ah->ah_version == AR5K_AR5212 &&
- ah->ah_op_mode != NL80211_IFTYPE_AP)
+ op_mode != NL80211_IFTYPE_AP)
ath5k_hw_set_sleep_clock(ah, true);
/*
@@ -1388,5 +1399,3 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_reset_tsf(ah);
return 0;
}
-
-#undef _ATH5K_RESET
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 5774cea..35f23bd 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -32,3 +32,24 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time.
+config ATH9K_HTC
+ tristate "Atheros HTC based wireless cards support"
+ depends on USB && MAC80211
+ select ATH9K_HW
+ select MAC80211_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ select ATH9K_COMMON
+ ---help---
+ Support for Atheros HTC based cards.
+ Chipsets supported: AR9271
+
+ For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
+
+ The built module will be ath9k_htc.
+
+config ATH9K_HTC_DEBUGFS
+ bool "Atheros ath9k_htc debugging"
+ depends on ATH9K_HTC && DEBUG_FS
+ ---help---
+ Say Y, if you need access to ath9k_htc's statistics.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 6b50d5e..dd112be 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -13,18 +13,38 @@ ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
obj-$(CONFIG_ATH9K) += ath9k.o
-ath9k_hw-y:= hw.o \
+ath9k_hw-y:= \
+ ar9002_hw.o \
+ ar9003_hw.o \
+ hw.o \
+ ar9003_phy.o \
+ ar9002_phy.o \
+ ar5008_phy.o \
+ ar9002_calib.o \
+ ar9003_calib.o \
+ calib.o \
eeprom.o \
eeprom_def.o \
eeprom_4k.o \
eeprom_9287.o \
- calib.o \
ani.o \
- phy.o \
btcoex.o \
mac.o \
+ ar9002_mac.o \
+ ar9003_mac.o \
+ ar9003_eeprom.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
ath9k_common-y:= common.o
+
+ath9k_htc-y += htc_hst.o \
+ hif_usb.o \
+ wmi.o \
+ htc_drv_txrx.o \
+ htc_drv_main.o \
+ htc_drv_beacon.o \
+ htc_drv_init.o
+
+obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index ca4994f..85fdd26 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -47,6 +47,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
}
static struct ath_bus_ops ath_ahb_bus_ops = {
+ .ath_bus_type = ATH_AHB,
.read_cachesize = ath_ahb_read_cachesize,
.eeprom_read = ath_ahb_eeprom_read,
};
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2a0cd64..ba8b20f 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "hw-ops.h"
static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
struct ath9k_channel *chan)
@@ -37,190 +38,6 @@ static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
return 0;
}
-static bool ath9k_hw_ani_control(struct ath_hw *ah,
- enum ath9k_ani_cmd cmd, int param)
-{
- struct ar5416AniState *aniState = ah->curani;
- struct ath_common *common = ath9k_hw_common(ah);
-
- switch (cmd & ah->ani_function) {
- case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
- u32 level = param;
-
- if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
- return false;
- }
-
- REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
- AR_PHY_DESIRED_SZ_TOT_DES,
- ah->totalSizeDesired[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_LOW,
- ah->coarse_low[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_HIGH,
- ah->coarse_high[level]);
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRPWR,
- ah->firpwr[level]);
-
- if (level > aniState->noiseImmunityLevel)
- ah->stats.ast_ani_niup++;
- else if (level < aniState->noiseImmunityLevel)
- ah->stats.ast_ani_nidown++;
- aniState->noiseImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
- const int m1ThreshLow[] = { 127, 50 };
- const int m2ThreshLow[] = { 127, 40 };
- const int m1Thresh[] = { 127, 0x4d };
- const int m2Thresh[] = { 127, 0x40 };
- const int m2CountThr[] = { 31, 16 };
- const int m2CountThrLow[] = { 63, 48 };
- u32 on = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
- m1ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
- m2ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M1_THRESH,
- m1Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2_THRESH,
- m2Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2COUNT_THR,
- m2CountThr[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
- m2CountThrLow[on]);
-
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
- m1ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
- m2ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH,
- m1Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH,
- m2Thresh[on]);
-
- if (on)
- REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
- else
- REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
-
- if (!on != aniState->ofdmWeakSigDetectOff) {
- if (on)
- ah->stats.ast_ani_ofdmon++;
- else
- ah->stats.ast_ani_ofdmoff++;
- aniState->ofdmWeakSigDetectOff = !on;
- }
- break;
- }
- case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
- const int weakSigThrCck[] = { 8, 6 };
- u32 high = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
- weakSigThrCck[high]);
- if (high != aniState->cckWeakSigThreshold) {
- if (high)
- ah->stats.ast_ani_cckhigh++;
- else
- ah->stats.ast_ani_ccklow++;
- aniState->cckWeakSigThreshold = high;
- }
- break;
- }
- case ATH9K_ANI_FIRSTEP_LEVEL:{
- const int firstep[] = { 0, 4, 8 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(firstep)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(firstep));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRSTEP,
- firstep[level]);
- if (level > aniState->firstepLevel)
- ah->stats.ast_ani_stepup++;
- else if (level < aniState->firstepLevel)
- ah->stats.ast_ani_stepdown++;
- aniState->firstepLevel = level;
- break;
- }
- case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
- const int cycpwrThr1[] =
- { 2, 4, 6, 8, 10, 12, 14, 16 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(cycpwrThr1)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(cycpwrThr1));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_TIMING5,
- AR_PHY_TIMING5_CYCPWR_THR1,
- cycpwrThr1[level]);
- if (level > aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurup++;
- else if (level < aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurdown++;
- aniState->spurImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_PRESENT:
- break;
- default:
- ath_print(common, ATH_DBG_ANI,
- "invalid cmd %u\n", cmd);
- return false;
- }
-
- ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
- ath_print(common, ATH_DBG_ANI,
- "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
- "ofdmWeakSigDetectOff=%d\n",
- aniState->noiseImmunityLevel,
- aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff);
- ath_print(common, ATH_DBG_ANI,
- "cckWeakSigThreshold=%d, "
- "firstepLevel=%d, listenTime=%d\n",
- aniState->cckWeakSigThreshold,
- aniState->firstepLevel,
- aniState->listenTime);
- ath_print(common, ATH_DBG_ANI,
- "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
- aniState->cycleCount,
- aniState->ofdmPhyErrCount,
- aniState->cckPhyErrCount);
-
- return true;
-}
-
static void ath9k_hw_update_mibstats(struct ath_hw *ah,
struct ath9k_mib_stats *stats)
{
@@ -262,11 +79,17 @@ static void ath9k_ani_restart(struct ath_hw *ah)
"Writing ofdmbase=%u cckbase=%u\n",
aniState->ofdmPhyErrBase,
aniState->cckPhyErrBase);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
aniState->ofdmPhyErrCount = 0;
@@ -540,8 +363,14 @@ void ath9k_ani_reset(struct ath_hw *ah)
ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
~ATH9K_RX_FILTER_PHYERR);
ath9k_ani_restart(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
void ath9k_hw_ani_monitor(struct ath_hw *ah,
@@ -639,6 +468,8 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_FILT_OFDM, 0);
REG_WRITE(ah, AR_FILT_CCK, 0);
REG_WRITE(ah, AR_MIBC,
@@ -646,6 +477,9 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
& 0x0f);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
/* Freeze the MIB counters, get the stats and then clear them */
@@ -809,20 +643,17 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
ah->ani[0].cckPhyErrBase);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ath9k_enable_mib_counters(ah);
ah->aniperiod = ATH9K_ANI_PERIOD;
if (ah->config.enable_ani)
ah->proc_phyerr |= HAL_PROCESS_ANI;
}
-
-void ath9k_hw_ani_disable(struct ath_hw *ah)
-{
- ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, "Disabling ANI\n");
-
- ath9k_hw_disable_mib_counters(ah);
- REG_WRITE(ah, AR_PHY_ERR_1, 0);
- REG_WRITE(ah, AR_PHY_ERR_2, 0);
-}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 4e1ab94..3356762 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -118,6 +118,5 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
void ath9k_hw_procmibevent(struct ath_hw *ah);
void ath9k_hw_ani_setup(struct ath_hw *ah);
void ath9k_hw_ani_init(struct ath_hw *ah);
-void ath9k_hw_ani_disable(struct ath_hw *ah);
#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
new file mode 100644
index 0000000..025c31ac
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -0,0 +1,742 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_AR5008_H
+#define INITVALS_AR5008_H
+
+static const u32 ar5416Modes[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
+ { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
+ { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
+ { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
+ { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
+ { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
+ { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
+ { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00007010, 0x00000000 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0xffffffff },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008264, 0x88000010 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00070000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a002e },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5d50e188 },
+ { 0x00009958, 0x00081fff },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x001fff00 },
+ { 0x000099ac, 0x00000000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x000000aa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x00000bb5 },
+ { 0x0000a22c, 0x00000011 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000000 },
+ { 0x0000a26c, 0x0e79e5c6 },
+ { 0x0000b26c, 0x0e79e5c6 },
+ { 0x0000c26c, 0x0e79e5c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x051701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa1f },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x08000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1[][2] = {
+ { 0x000098b0, 0x02108421 },
+ { 0x000098ec, 0x00000008 },
+};
+
+static const u32 ar5416Bank2[][2] = {
+ { 0x000098b0, 0x0e73ff17 },
+ { 0x000098e0, 0x00000420 },
+};
+
+static const u32 ar5416Bank3[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014008f, 0x0014008f },
+ { 0x0000989c, 0x00c40003, 0x00c40003 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000f1, 0x000000f1 },
+ { 0x0000989c, 0x00002081, 0x00002081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank6TPC[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x201400df, 0x201400df },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007081, 0x00007081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000003 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x0000000c },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000030 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000060 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000058 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Modes_9100[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
+ { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
+ { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
+ { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
+#ifdef TB243
+ { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
+#else
+ { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
+#endif
+ { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+#endif /* INITVALS_AR5008_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
new file mode 100644
index 0000000..b2c17c9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -0,0 +1,1374 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "../regd.h"
+#include "ar9002_phy.h"
+
+/* All code below is for non single-chip solutions */
+
+/**
+ * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
+ * @rfbuf:
+ * @reg32:
+ * @numBits:
+ * @firstBit:
+ * @column:
+ *
+ * Performs analog "swizzling" of parameters into their location.
+ * Used on external AR2133/AR5133 radios.
+ */
+static void ar5008_hw_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
+ u32 numBits, u32 firstBit,
+ u32 column)
+{
+ u32 tmp32, mask, arrayEntry, lastBit;
+ int32_t bitPosition, bitsLeft;
+
+ tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
+ arrayEntry = (firstBit - 1) / 8;
+ bitPosition = (firstBit - 1) % 8;
+ bitsLeft = numBits;
+ while (bitsLeft > 0) {
+ lastBit = (bitPosition + bitsLeft > 8) ?
+ 8 : bitPosition + bitsLeft;
+ mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
+ (column * 8);
+ rfBuf[arrayEntry] &= ~mask;
+ rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
+ (column * 8)) & mask;
+ bitsLeft -= 8 - bitPosition;
+ tmp32 = tmp32 >> (8 - bitPosition);
+ bitPosition = 0;
+ arrayEntry++;
+ }
+}
+
+/*
+ * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
+ * rf_pwd_icsyndiv.
+ *
+ * Theoretical Rules:
+ * if 2 GHz band
+ * if forceBiasAuto
+ * if synth_freq < 2412
+ * bias = 0
+ * else if 2412 <= synth_freq <= 2422
+ * bias = 1
+ * else // synth_freq > 2422
+ * bias = 2
+ * else if forceBias > 0
+ * bias = forceBias & 7
+ * else
+ * no change, use value from ini file
+ * else
+ * no change, invalid band
+ *
+ * 1st Mod:
+ * 2422 also uses value of 2
+ * <approved>
+ *
+ * 2nd Mod:
+ * Less than 2412 uses value of 0, 2412 and above uses value of 2
+ */
+static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 tmp_reg;
+ int reg_writes = 0;
+ u32 new_bias = 0;
+
+ if (!AR_SREV_5416(ah) || synth_freq >= 3000)
+ return;
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ if (synth_freq < 2412)
+ new_bias = 0;
+ else if (synth_freq < 2422)
+ new_bias = 1;
+ else
+ new_bias = 2;
+
+ /* pre-reverse this field */
+ tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Force rf_pwd_icsyndiv to %1d on %4d\n",
+ new_bias, synth_freq);
+
+ /* swizzle rf_pwd_icsyndiv */
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
+
+ /* write Bank 6 with new params */
+ REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
+}
+
+/**
+ * ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
+ * @ah: atheros hardware stucture
+ * @chan:
+ *
+ * For the external AR2133/AR5133 radios, takes the MHz channel value and set
+ * the channel value. Assumes writes enabled to analog bus and bank6 register
+ * cache in ah->analogBank6Data.
+ */
+static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 channelSel = 0;
+ u32 bModeSynth = 0;
+ u32 aModeRefSel = 0;
+ u32 reg32 = 0;
+ u16 freq;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) {
+ u32 txctl;
+
+ if (((freq - 2192) % 5) == 0) {
+ channelSel = ((freq - 672) * 2 - 3040) / 10;
+ bModeSynth = 0;
+ } else if (((freq - 2224) % 5) == 0) {
+ channelSel = ((freq - 704) * 2 - 3040) / 10;
+ bModeSynth = 1;
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Invalid channel %u MHz\n", freq);
+ return -EINVAL;
+ }
+
+ channelSel = (channelSel << 2) & 0xff;
+ channelSel = ath9k_hw_reverse_bits(channelSel, 8);
+
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+
+ } else if ((freq % 20) == 0 && freq >= 5120) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 10) == 0) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
+ aModeRefSel = ath9k_hw_reverse_bits(2, 2);
+ else
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 5) == 0) {
+ channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Invalid channel %u MHz\n", freq);
+ return -EINVAL;
+ }
+
+ ar5008_hw_force_bias(ah, freq);
+
+ reg32 =
+ (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
+ (1 << 5) | 0x1;
+
+ REG_WRITE(ah, AR_PHY(0x37), reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For non single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int bin, cur_bin;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, new;
+ int i;
+ int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ int inc[4] = { 0, 100, 0, 0 };
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+ if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur)
+ return;
+
+ bin = bb_spur * 32;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+ new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+ new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+ spur_delta_phase = ((bb_spur * 524288) / 100) &
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+ spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+ new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp_v = abs(cur_vit_mask - bin);
+
+ if (tmp_v < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+}
+
+/**
+ * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
+ * @ah: atheros hardware structure
+ *
+ * Only required for older devices with external AR2133/AR5133 radios.
+ */
+static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+{
+#define ATH_ALLOC_BANK(bank, size) do { \
+ bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
+ if (!bank) { \
+ ath_print(common, ATH_DBG_FATAL, \
+ "Cannot allocate RF banks\n"); \
+ return -ENOMEM; \
+ } \
+ } while (0);
+
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
+ ATH_ALLOC_BANK(ah->addac5416_21,
+ ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
+ ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
+
+ return 0;
+#undef ATH_ALLOC_BANK
+}
+
+
+/**
+ * ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
+ * @ah: atheros hardware struture
+ * For the external AR2133/AR5133 radios banks.
+ */
+static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
+{
+#define ATH_FREE_BANK(bank) do { \
+ kfree(bank); \
+ bank = NULL; \
+ } while (0);
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ ATH_FREE_BANK(ah->analogBank0Data);
+ ATH_FREE_BANK(ah->analogBank1Data);
+ ATH_FREE_BANK(ah->analogBank2Data);
+ ATH_FREE_BANK(ah->analogBank3Data);
+ ATH_FREE_BANK(ah->analogBank6Data);
+ ATH_FREE_BANK(ah->analogBank6TPCData);
+ ATH_FREE_BANK(ah->analogBank7Data);
+ ATH_FREE_BANK(ah->addac5416_21);
+ ATH_FREE_BANK(ah->bank6Temp);
+
+#undef ATH_FREE_BANK
+}
+
+/* *
+ * ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
+ * @ah: atheros hardware structure
+ * @chan:
+ * @modesIndex:
+ *
+ * Used for the external AR2133/AR5133 radios.
+ *
+ * Reads the EEPROM header info from the device structure and programs
+ * all rf registers. This routine requires access to the analog
+ * rf device. This is not required for single-chip devices.
+ */
+static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ u32 eepMinorRev;
+ u32 ob5GHz = 0, db5GHz = 0;
+ u32 ob2GHz = 0, db2GHz = 0;
+ int regWrites = 0;
+
+ /*
+ * Software does not need to program bank data
+ * for single chip devices, that is AR9280 or anything
+ * after that.
+ */
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ return true;
+
+ /* Setup rf parameters */
+ eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
+
+ /* Setup Bank 0 Write */
+ RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
+
+ /* Setup Bank 1 Write */
+ RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
+
+ /* Setup Bank 2 Write */
+ RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
+
+ /* Setup Bank 6 Write */
+ RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
+ modesIndex);
+ {
+ int i;
+ for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
+ ah->analogBank6Data[i] =
+ INI_RA(&ah->iniBank6TPC, i, modesIndex);
+ }
+ }
+
+ /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
+ if (eepMinorRev >= 2) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
+ db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ ob2GHz, 3, 197, 0);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ db2GHz, 3, 194, 0);
+ } else {
+ ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
+ db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ ob5GHz, 3, 203, 0);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ db5GHz, 3, 200, 0);
+ }
+ }
+
+ /* Setup Bank 7 Setup */
+ RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
+
+ /* Write Analog registers */
+ REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
+ regWrites);
+
+ return true;
+}
+
+static void ar5008_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+}
+
+static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
+{
+ int rx_chainmask, tx_chainmask;
+
+ rx_chainmask = ah->rxchainmask;
+ tx_chainmask = ah->txchainmask;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ switch (rx_chainmask) {
+ case 0x5:
+ DISABLE_REGWRITE_BUFFER(ah);
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ ENABLE_REGWRITE_BUFFER(ah);
+ case 0x3:
+ if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
+ break;
+ }
+ case 0x1:
+ case 0x2:
+ case 0x7:
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (tx_chainmask == 0x5) {
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ }
+ if (AR_SREV_9100(ah))
+ REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
+ REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
+}
+
+static void ar5008_hw_override_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 val;
+
+ /*
+ * Set the RX_ABORT and RX_DIS and clear if off only after
+ * RXE is set for MAC. This prevents frames with corrupted
+ * descriptor status.
+ */
+ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ val = REG_READ(ah, AR_PCU_MISC_MODE2);
+
+ if (!AR_SREV_9271(ah))
+ val &= ~AR_PCU_MISC_MODE2_HWWAR1;
+
+ if (AR_SREV_9287_10_OR_LATER(ah))
+ val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
+ }
+
+ if (!AR_SREV_5416_20_OR_LATER(ah) ||
+ AR_SREV_9280_10_OR_LATER(ah))
+ return;
+ /*
+ * Disable BB clock gating
+ * Necessary to avoid issues on AR5416 2.0
+ */
+ REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
+
+ /*
+ * Disable RIFS search on some chips to avoid baseband
+ * hang issues.
+ */
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
+ val &= ~AR_PHY_RIFS_INIT_DELAY;
+ REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
+ }
+}
+
+static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 phymode;
+ u32 enableDacFifo = 0;
+
+ if (AR_SREV_9285_10_OR_LATER(ah))
+ enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
+ AR_PHY_FC_ENABLE_DAC_FIFO);
+
+ phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
+ | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
+
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_FC_DYN2040_EN;
+
+ if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
+ (chan->chanmode == CHANNEL_G_HT40PLUS))
+ phymode |= AR_PHY_FC_DYN2040_PRI_CH;
+
+ }
+ REG_WRITE(ah, AR_PHY_TURBO, phymode);
+
+ ath9k_hw_set11nmac2040(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+
+static int ar5008_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ int i, regWrites = 0;
+ struct ieee80211_channel *channel = chan->chan;
+ u32 modesIndex, freqIndex;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ modesIndex = 1;
+ freqIndex = 1;
+ break;
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ modesIndex = 2;
+ freqIndex = 1;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ modesIndex = 4;
+ freqIndex = 2;
+ break;
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ modesIndex = 3;
+ freqIndex = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ /* Enable ASYNC FIFO */
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
+ REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
+ REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ }
+
+ /*
+ * Set correct baseband to analog shift setting to
+ * access analog chips.
+ */
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ /* Write ADDAC shifts */
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
+ ah->eep_ops->set_addac(ah, chan);
+
+ if (AR_SREV_5416_22_OR_LATER(ah)) {
+ REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
+ } else {
+ struct ar5416IniArray temp;
+ u32 addacSize =
+ sizeof(u32) * ah->iniAddac.ia_rows *
+ ah->iniAddac.ia_columns;
+
+ /* For AR5416 2.0/2.1 */
+ memcpy(ah->addac5416_21,
+ ah->iniAddac.ia_array, addacSize);
+
+ /* override CLKDRV value at [row, column] = [31, 1] */
+ (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
+
+ temp.ia_array = ah->addac5416_21;
+ temp.ia_columns = ah->iniAddac.ia_columns;
+ temp.ia_rows = ah->iniAddac.ia_rows;
+ REG_WRITE_ARRAY(&temp, 1, regWrites);
+ }
+
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < ah->iniModes.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniModes, i, 0);
+ u32 val = INI_RA(&ah->iniModes, i, modesIndex);
+
+ if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
+ val &= ~AR_AN_TOP2_PWDCLKIND;
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->config.analog_shiftreg) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
+ REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
+
+ if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
+ AR_SREV_9287_10_OR_LATER(ah))
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+
+ if (AR_SREV_9271_10(ah))
+ REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
+ modesIndex, regWrites);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* Write common array parameters */
+ for (i = 0; i < ah->iniCommon.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniCommon, i, 0);
+ u32 val = INI_RA(&ah->iniCommon, i, 1);
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->config.analog_shiftreg) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9271(ah)) {
+ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1)
+ REG_WRITE_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
+ modesIndex, regWrites);
+ else
+ REG_WRITE_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
+ modesIndex, regWrites);
+ }
+
+ REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan)) {
+ REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
+ regWrites);
+ }
+
+ ar5008_hw_override_ini(ah, chan);
+ ar5008_hw_set_channel_regs(ah, chan);
+ ar5008_hw_init_chain_masks(ah);
+ ath9k_olc_init(ah);
+
+ /* Set TX power */
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(regulatory, chan),
+ channel->max_antenna_gain * 2,
+ channel->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) regulatory->power_limit));
+
+ /* Write analog registers */
+ if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "ar5416SetRfRegs failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
+ ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+
+ if (!AR_SREV_9280_10_OR_LATER(ah))
+ rfMode |= (IS_CHAN_5GHZ(chan)) ?
+ AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static void ar5008_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+static void ar5008_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
+}
+
+static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
+}
+
+static void ar5008_hw_rfbus_done(struct ath_hw *ah)
+{
+ u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(ah->curchan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+}
+
+static void ar5008_hw_enable_rfkill(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+ AR_GPIO_INPUT_MUX2_RFSILENT);
+
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+}
+
+static void ar5008_restore_chainmask(struct ath_hw *ah)
+{
+ int rx_chainmask = ah->rxchainmask;
+
+ if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ }
+}
+
+static void ar5008_set_diversity(struct ath_hw *ah, bool value)
+{
+ u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
+ if (value)
+ v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ else
+ v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
+}
+
+static u32 ar9100_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ if (chan && IS_CHAN_5GHZ(chan))
+ return 0x1450;
+ return 0x1458;
+}
+
+static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
+ else
+ pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
+
+ return pll;
+}
+
+static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0xa, AR_RTC_PLL_DIV);
+ else
+ pll |= SM(0xb, AR_RTC_PLL_DIV);
+
+ return pll;
+}
+
+static bool ar5008_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
+ return false;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_TOT_DES,
+ ah->totalSizeDesired[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
+ AR_PHY_AGC_CTL1_COARSE_LOW,
+ ah->coarse_low[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
+ AR_PHY_AGC_CTL1_COARSE_HIGH,
+ ah->coarse_high[level]);
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRPWR,
+ ah->firpwr[level]);
+
+ if (level > aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_niup++;
+ else if (level < aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_nidown++;
+ aniState->noiseImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ const int m1ThreshLow[] = { 127, 50 };
+ const int m2ThreshLow[] = { 127, 40 };
+ const int m1Thresh[] = { 127, 0x4d };
+ const int m2Thresh[] = { 127, 0x40 };
+ const int m2CountThr[] = { 31, 16 };
+ const int m2CountThrLow[] = { 63, 48 };
+ u32 on = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH,
+ m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH,
+ m2Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR,
+ m2CountThr[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow[on]);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH,
+ m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH,
+ m2Thresh[on]);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
+ const int weakSigThrCck[] = { 8, 6 };
+ u32 high = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
+ weakSigThrCck[high]);
+ if (high != aniState->cckWeakSigThreshold) {
+ if (high)
+ ah->stats.ast_ani_cckhigh++;
+ else
+ ah->stats.ast_ani_ccklow++;
+ aniState->cckWeakSigThreshold = high;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ const int firstep[] = { 0, 4, 8 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(firstep));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ firstep[level]);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(cycpwrThr1));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ cycpwrThr1[level]);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ ath_print(common, ATH_DBG_ANI,
+ "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
+ ath_print(common, ATH_DBG_ANI,
+ "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
+ "ofdmWeakSigDetectOff=%d\n",
+ aniState->noiseImmunityLevel,
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff);
+ ath_print(common, ATH_DBG_ANI,
+ "cckWeakSigThreshold=%d, "
+ "firstepLevel=%d, listenTime=%d\n",
+ aniState->cckWeakSigThreshold,
+ aniState->firstepLevel,
+ aniState->listenTime);
+ ath_print(common, ATH_DBG_ANI,
+ "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ aniState->cycleCount,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+
+ return true;
+}
+
+static void ar5008_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 2] is %d\n", nf);
+ nfarray[2] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+ nfarray[3] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 2] is %d\n", nf);
+ nfarray[5] = nf;
+}
+
+static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h;
+ int i, j;
+ int32_t val;
+ const u32 ar5416_cca_regs[6] = {
+ AR_PHY_CCA,
+ AR_PHY_CH1_CCA,
+ AR_PHY_CH2_CCA,
+ AR_PHY_EXT_CCA,
+ AR_PHY_CH1_EXT_CCA,
+ AR_PHY_CH2_EXT_CCA
+ };
+ u8 chainmask, rx_chain_status;
+
+ rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ chainmask = 0x9;
+ else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
+ if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ } else {
+ if (rx_chain_status & 0x4)
+ chainmask = 0x3F;
+ else if (rx_chain_status & 0x2)
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ }
+
+ h = ah->nfCalHist;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar5416_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+ REG_WRITE(ah, ar5416_cca_regs[i], val);
+ }
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+
+ for (j = 0; j < 5; j++) {
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_NF) == 0)
+ break;
+ udelay(50);
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar5416_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (-50) << 1) & 0x1ff);
+ REG_WRITE(ah, ar5416_cca_regs[i], val);
+ }
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->rf_set_freq = ar5008_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
+
+ priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks;
+ priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks;
+ priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
+ priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
+ priv_ops->init_bb = ar5008_hw_init_bb;
+ priv_ops->process_ini = ar5008_hw_process_ini;
+ priv_ops->set_rfmode = ar5008_hw_set_rfmode;
+ priv_ops->mark_phy_inactive = ar5008_hw_mark_phy_inactive;
+ priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
+ priv_ops->rfbus_req = ar5008_hw_rfbus_req;
+ priv_ops->rfbus_done = ar5008_hw_rfbus_done;
+ priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
+ priv_ops->restore_chainmask = ar5008_restore_chainmask;
+ priv_ops->set_diversity = ar5008_set_diversity;
+ priv_ops->ani_control = ar5008_hw_ani_control;
+ priv_ops->do_getnf = ar5008_hw_do_getnf;
+ priv_ops->loadnf = ar5008_hw_loadnf;
+
+ if (AR_SREV_9100(ah))
+ priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
+ else if (AR_SREV_9160_10_OR_LATER(ah))
+ priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
+ else
+ priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
new file mode 100644
index 0000000..0b94bd3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -0,0 +1,1254 @@
+
+static const u32 ar5416Common_9100[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00020010, 0x00000003 },
+ { 0x00020038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00004000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0x00000000 },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00000000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x001a0bb5 },
+ { 0x0000a22c, 0x00000000 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889ae },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000001 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa33 },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0_9100[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain_9100[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1_9100[][2] = {
+ { 0x000098b0, 0x02108421},
+ { 0x000098ec, 0x00000008},
+};
+
+static const u32 ar5416Bank2_9100[][2] = {
+ { 0x000098b0, 0x0e73ff17},
+ { 0x000098e0, 0x00000420},
+};
+
+static const u32 ar5416Bank3_9100[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6_9100[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014000f, 0x0014000f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x000180d6, 0x000180d6 },
+ { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
+ { 0x0000989c, 0x000000b1, 0x000000b1 },
+ { 0x0000989c, 0x00002000, 0x00002000 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+
+static const u32 ar5416Bank6TPC_9100[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x2014008f, 0x2014008f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007080, 0x00007080 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7_9100[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac_9100[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000010 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000015 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Modes_9160[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
+ { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
+ { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
+ { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common_9160[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00007010, 0x00000020 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0xffffffff },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00ff0000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5f3ca3de },
+ { 0x00009958, 0x2108ecff },
+ { 0x00009940, 0x00750604 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x001a0bb5 },
+ { 0x0000a22c, 0x00000000 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000e000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000001 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79bfaa03 },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0_9160[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain_9160[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1_9160[][2] = {
+ { 0x000098b0, 0x02108421 },
+ { 0x000098ec, 0x00000008 },
+};
+
+static const u32 ar5416Bank2_9160[][2] = {
+ { 0x000098b0, 0x0e73ff17 },
+ { 0x000098e0, 0x00000420 },
+};
+
+static const u32 ar5416Bank3_9160[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6_9160[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014008f, 0x0014008f },
+ { 0x0000989c, 0x00c40003, 0x00c40003 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000f1, 0x000000f1 },
+ { 0x0000989c, 0x00002081, 0x00002081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank6TPC_9160[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x2014008f, 0x2014008f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007080, 0x00007080 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7_9160[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac_9160[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000018 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000019 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000003 },
+ {0x0000989c, 0x00000008 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Addac_91601_1[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000018 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000019 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
new file mode 100644
index 0000000..5fdbb53
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9002_phy.h"
+
+#define AR9285_CLCAL_REDO_THRESH 1
+
+static void ar9002_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
+ break;
+ case ADC_GAIN_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting ADC Gain Calibration\n");
+ break;
+ case ADC_DC_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting ADC DC Calibration\n");
+ break;
+ case ADC_DC_INIT_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting Init ADC DC Calibration\n");
+ break;
+ case TEMP_COMP_CAL:
+ break; /* Not supported */
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_DO_CAL);
+}
+
+static bool ar9002_hw_per_calibration(struct ath_hw *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct ath9k_cal_list *currCal)
+{
+ bool iscaldone = false;
+
+ if (currCal->calState == CAL_RUNNING) {
+ if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
+ AR_PHY_TIMING_CTRL4_DO_CAL)) {
+
+ currCal->calData->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >=
+ currCal->calData->calNumSamples) {
+ int i, numChains = 0;
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ currCal->calData->calPostProc(ah, numChains);
+ ichan->CalValid |= currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ iscaldone = true;
+ } else {
+ ar9002_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(ichan->CalValid & currCal->calData->calType)) {
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+
+ return iscaldone;
+}
+
+/* Assumes you are talking about the currently configured channel */
+static bool ar9002_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+
+ switch (calType & ah->supp_cals) {
+ case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
+ return true;
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
+ conf_is_ht20(conf)))
+ return true;
+ break;
+ }
+ return false;
+}
+
+static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
+ }
+}
+
+static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalAdcIOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalAdcIEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalAdcQOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ah->totalAdcQEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+ "oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcIOddPhase[i],
+ ah->totalAdcIEvenPhase[i],
+ ah->totalAdcQOddPhase[i],
+ ah->totalAdcQEvenPhase[i]);
+ }
+}
+
+static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalAdcDcOffsetIOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalAdcDcOffsetIEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalAdcDcOffsetQOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ah->totalAdcDcOffsetQEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+ "oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcDcOffsetIOddPhase[i],
+ ah->totalAdcDcOffsetIEvenPhase[i],
+ ah->totalAdcDcOffsetQOddPhase[i],
+ ah->totalAdcDcOffsetQEvenPhase[i]);
+ }
+}
+
+static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ah->totalPowerMeasI[i];
+ powerMeasQ = ah->totalPowerMeasQ[i];
+ iqCorrMeas = ah->totalIqCorrMeas[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
+ qCoffDenom = powerMeasQ / 64;
+
+ if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
+ (qCoffDenom != 0)) {
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
+
+ iCoff = iCoff & 0x3f;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
+ if (iqCorrNeg == 0x0)
+ iCoff = 0x40 - iCoff;
+
+ if (qCoff > 15)
+ qCoff = 15;
+ else if (qCoff <= -16)
+ qCoff = 16;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
+ qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
+}
+
+static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
+ u32 qGainMismatch, iGainMismatch, val, i;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ah->totalAdcIOddPhase[i];
+ iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
+ qOddMeasOffset = ah->totalAdcQOddPhase[i];
+ qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting ADC Gain Cal for Chain %d\n", i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
+ iOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = 0x%08x\n", i,
+ iEvenMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
+ qOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = 0x%08x\n", i,
+ qEvenMeasOffset);
+
+ if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
+ iGainMismatch =
+ ((iEvenMeasOffset * 32) /
+ iOddMeasOffset) & 0x3f;
+ qGainMismatch =
+ ((qOddMeasOffset * 32) /
+ qEvenMeasOffset) & 0x3f;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_i = 0x%08x\n", i,
+ iGainMismatch);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_q = 0x%08x\n", i,
+ qGainMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xfffff000;
+ val |= (qGainMismatch) | (iGainMismatch << 6);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "ADC Gain Cal done for Chain %d\n", i);
+ }
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
+}
+
+static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, val, i;
+ int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
+ const struct ath9k_percal_data *calData =
+ ah->cal_list_curr->calData;
+ u32 numSamples =
+ (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
+ iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
+ qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
+ qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting ADC DC Offset Cal for Chain %d\n", i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = %d\n", i,
+ iOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = %d\n", i,
+ iEvenMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = %d\n", i,
+ qOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = %d\n", i,
+ qEvenMeasOffset);
+
+ iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+ qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
+ iDcMismatch);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
+ qDcMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xc0000fff;
+ val |= (qDcMismatch << 12) | (iDcMismatch << 21);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "ADC DC Offset Cal done for Chain %d\n", i);
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
+}
+
+static void ar9287_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ u32 rddata;
+ int32_t delta, currPDADC, slope;
+
+ rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ if (ah->initPDADC == 0 || currPDADC == 0) {
+ /*
+ * Zero value indicates that no frames have been transmitted
+ * yet, can't do temperature compensation until frames are
+ * transmitted.
+ */
+ return;
+ } else {
+ slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
+
+ if (slope == 0) { /* to avoid divide by zero case */
+ delta = 0;
+ } else {
+ delta = ((currPDADC - ah->initPDADC)*4) / slope;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ }
+}
+
+static void ar9280_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ u32 rddata, i;
+ int delta, currPDADC, regval;
+
+ rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ if (ah->initPDADC == 0 || currPDADC == 0)
+ return;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
+ delta = (currPDADC - ah->initPDADC + 4) / 8;
+ else
+ delta = (currPDADC - ah->initPDADC + 5) / 10;
+
+ if (delta != ah->PDADCdelta) {
+ ah->PDADCdelta = delta;
+ for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
+ regval = ah->originalGain[i] - delta;
+ if (regval < 0)
+ regval = 0;
+
+ REG_RMW_FIELD(ah,
+ AR_PHY_TX_GAIN_TBL1 + i * 4,
+ AR_PHY_TX_GAIN, regval);
+ }
+ }
+}
+
+static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ u32 regVal;
+ unsigned int i;
+ u32 regList[][2] = {
+ { 0x786c, 0 },
+ { 0x7854, 0 },
+ { 0x7820, 0 },
+ { 0x7824, 0 },
+ { 0x7868, 0 },
+ { 0x783c, 0 },
+ { 0x7838, 0 } ,
+ { 0x7828, 0 } ,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ regList[i][1] = REG_READ(ah, regList[i][0]);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1));
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal |= (0x1 << 27);
+ REG_WRITE(ah, 0x9808, regVal);
+
+ /* 786c,b23,1, pwddac=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+ /* 7854, b5,1, pdrxtxbb=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+ /* 7854, b7,1, pdv2i=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+ /* 7854, b8,1, pddacinterface=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+ /* 7824,b12,0, offcal=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+ /* 7838, b1,0, pwddb=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+ /* 7820,b11,0, enpacal=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+ /* 7820,b25,1, pdpadrv1=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+ /* 7820,b24,0, pdpadrv2=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+ /* 7820,b23,0, pdpaout=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+ /* 783c,b14-16,7, padrvgn2tab_0=7 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
+ /*
+ * 7838,b29-31,0, padrvgn1tab_0=0
+ * does not matter since we turn it off
+ */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
+
+ /* Set:
+ * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
+ * txon=1,paon=1,oscon=1,synthon_force=1
+ */
+ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
+ udelay(30);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
+
+ /* find off_6_1; */
+ for (i = 6; i > 0; i--) {
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= (1 << (20 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ udelay(1);
+ /* regVal = REG_READ(ah, 0x7834); */
+ regVal &= (~(0x1 << (20 + i)));
+ regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
+ << (20 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ }
+
+ regVal = (regVal >> 20) & 0x7f;
+
+ /* Update PA cal info */
+ if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
+ if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+ ah->pacal_info.max_skipcount =
+ 2 * ah->pacal_info.max_skipcount;
+ ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+ } else {
+ ah->pacal_info.max_skipcount = 1;
+ ah->pacal_info.skipcount = 0;
+ ah->pacal_info.prev_offset = regVal;
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= 0x1;
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal &= (~(0x1 << 27));
+ REG_WRITE(ah, 0x9808, regVal);
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ REG_WRITE(ah, regList[i][0], regList[i][1]);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 regVal;
+ int i, offset, offs_6_1, offs_0;
+ u32 ccomp_org, reg_field;
+ u32 regList[][2] = {
+ { 0x786c, 0 },
+ { 0x7854, 0 },
+ { 0x7820, 0 },
+ { 0x7824, 0 },
+ { 0x7868, 0 },
+ { 0x783c, 0 },
+ { 0x7838, 0 },
+ };
+
+ ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
+
+ /* PA CAL is not needed for high power solution */
+ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
+ AR5416_EEP_TXGAIN_HIGH_POWER)
+ return;
+
+ if (AR_SREV_9285_11(ah)) {
+ REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
+ udelay(10);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ regList[i][1] = REG_READ(ah, regList[i][0]);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1));
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal |= (0x1 << 27);
+ REG_WRITE(ah, 0x9808, regVal);
+
+ REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
+ ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
+
+ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
+ udelay(30);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
+
+ for (i = 6; i > 0; i--) {
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= (1 << (19 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ udelay(1);
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1 << (19 + i)));
+ reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
+ regVal |= (reg_field << (19 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ }
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
+ udelay(1);
+ reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
+ offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
+ offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
+
+ offset = (offs_6_1<<1) | offs_0;
+ offset = offset - 0;
+ offs_6_1 = offset>>1;
+ offs_0 = offset & 1;
+
+ if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
+ if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+ ah->pacal_info.max_skipcount =
+ 2 * ah->pacal_info.max_skipcount;
+ ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+ } else {
+ ah->pacal_info.max_skipcount = 1;
+ ah->pacal_info.skipcount = 0;
+ ah->pacal_info.prev_offset = offset;
+ }
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= 0x1;
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal &= (~(0x1 << 27));
+ REG_WRITE(ah, 0x9808, regVal);
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ REG_WRITE(ah, regList[i][0], regList[i][1]);
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
+
+ if (AR_SREV_9285_11(ah))
+ REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
+
+}
+
+static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ if (AR_SREV_9271(ah)) {
+ if (is_reset || !ah->pacal_info.skipcount)
+ ar9271_hw_pa_cal(ah, is_reset);
+ else
+ ah->pacal_info.skipcount--;
+ } else if (AR_SREV_9285_11_OR_LATER(ah)) {
+ if (is_reset || !ah->pacal_info.skipcount)
+ ar9285_hw_pa_cal(ah, is_reset);
+ else
+ ah->pacal_info.skipcount--;
+ }
+}
+
+static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ if (OLC_FOR_AR9287_10_LATER)
+ ar9287_hw_olc_temp_compensation(ah);
+ else if (OLC_FOR_AR9280_20_LATER)
+ ar9280_hw_olc_temp_compensation(ah);
+}
+
+static bool ar9002_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ bool iscaldone = true;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+
+ if (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING)) {
+ iscaldone = ar9002_hw_per_calibration(ah, chan,
+ rxchainmask, currCal);
+ if (iscaldone) {
+ ah->cal_list_curr = currCal = currCal->calNext;
+
+ if (currCal->calState == CAL_WAITING) {
+ iscaldone = false;
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+ }
+ }
+
+ /* Do NF cal only at longer intervals */
+ if (longcal) {
+ /* Do periodic PAOffset Cal */
+ ar9002_hw_pa_cal(ah, false);
+ ar9002_hw_olc_temp_compensation(ah);
+
+ /*
+ * Get the value from the previous NF cal and update
+ * history buffer.
+ */
+ ath9k_hw_getnf(ah, chan);
+
+ /*
+ * Load the NF from history buffer of the current channel.
+ * NF is slow time-variant, so it is OK to use a historical
+ * value.
+ */
+ ath9k_hw_loadnf(ah, ah->curchan);
+
+ ath9k_hw_start_nfcal(ah);
+ }
+
+ return iscaldone;
+}
+
+/* Carrier leakage Calibration fix */
+static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ if (IS_CHAN_HT20(chan)) {
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE, "offset "
+ "calibration failed to complete in "
+ "1ms; noisy ??\n");
+ return false;
+ }
+ REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ }
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
+ "failed to complete in 1ms; noisy ??\n");
+ return false;
+ }
+
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+
+ return true;
+}
+
+static bool ar9285_hw_clc(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ int i;
+ u_int32_t txgain_max;
+ u_int32_t clc_gain, gain_mask = 0, clc_num = 0;
+ u_int32_t reg_clc_I0, reg_clc_Q0;
+ u_int32_t i0_num = 0;
+ u_int32_t q0_num = 0;
+ u_int32_t total_num = 0;
+ u_int32_t reg_rf2g5_org;
+ bool retv = true;
+
+ if (!(ar9285_hw_cl_cal(ah, chan)))
+ return false;
+
+ txgain_max = MS(REG_READ(ah, AR_PHY_TX_PWRCTRL7),
+ AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX);
+
+ for (i = 0; i < (txgain_max+1); i++) {
+ clc_gain = (REG_READ(ah, (AR_PHY_TX_GAIN_TBL1+(i<<2))) &
+ AR_PHY_TX_GAIN_CLC) >> AR_PHY_TX_GAIN_CLC_S;
+ if (!(gain_mask & (1 << clc_gain))) {
+ gain_mask |= (1 << clc_gain);
+ clc_num++;
+ }
+ }
+
+ for (i = 0; i < clc_num; i++) {
+ reg_clc_I0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
+ & AR_PHY_CLC_I0) >> AR_PHY_CLC_I0_S;
+ reg_clc_Q0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
+ & AR_PHY_CLC_Q0) >> AR_PHY_CLC_Q0_S;
+ if (reg_clc_I0 == 0)
+ i0_num++;
+
+ if (reg_clc_Q0 == 0)
+ q0_num++;
+ }
+ total_num = i0_num + q0_num;
+ if (total_num > AR9285_CLCAL_REDO_THRESH) {
+ reg_rf2g5_org = REG_READ(ah, AR9285_RF2G5);
+ if (AR_SREV_9285E_20(ah)) {
+ REG_WRITE(ah, AR9285_RF2G5,
+ (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
+ AR9285_RF2G5_IC50TX_XE_SET);
+ } else {
+ REG_WRITE(ah, AR9285_RF2G5,
+ (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
+ AR9285_RF2G5_IC50TX_SET);
+ }
+ retv = ar9285_hw_cl_cal(ah, chan);
+ REG_WRITE(ah, AR9285_RF2G5, reg_rf2g5_org);
+ }
+ return retv;
+}
+
+static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
+ if (!ar9285_hw_clc(ah, chan))
+ return false;
+ } else {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (!AR_SREV_9287_10_OR_LATER(ah))
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
+ AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to "
+ "complete in 1ms; noisy environment?\n");
+ return false;
+ }
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (!AR_SREV_9287_10_OR_LATER(ah))
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL,
+ AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
+ }
+
+ /* Do PA Calibration */
+ ar9002_hw_pa_cal(ah, true);
+
+ /* Do NF Calibration after DC offset and other calibrations */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
+
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ /* Enable IQ, ADC Gain and ADC DC offset CALs */
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
+ if (ar9002_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
+ INIT_CAL(&ah->adcgain_caldata);
+ INSERT_CAL(ah, &ah->adcgain_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling ADC Gain Calibration.\n");
+ }
+ if (ar9002_hw_iscal_supported(ah, ADC_DC_CAL)) {
+ INIT_CAL(&ah->adcdc_caldata);
+ INSERT_CAL(ah, &ah->adcdc_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling ADC DC Calibration.\n");
+ }
+ if (ar9002_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling IQ Calibration.\n");
+ }
+
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+ }
+
+ chan->CalValid = 0;
+
+ return true;
+}
+
+static const struct ath9k_percal_data iq_cal_multi_sample = {
+ IQ_MISMATCH_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_iqcal_collect,
+ ar9002_hw_iqcalibrate
+};
+static const struct ath9k_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_iqcal_collect,
+ ar9002_hw_iqcalibrate
+};
+static const struct ath9k_percal_data adc_gain_cal_multi_sample = {
+ ADC_GAIN_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_adc_gaincal_collect,
+ ar9002_hw_adc_gaincal_calibrate
+};
+static const struct ath9k_percal_data adc_gain_cal_single_sample = {
+ ADC_GAIN_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_adc_gaincal_collect,
+ ar9002_hw_adc_gaincal_calibrate
+};
+static const struct ath9k_percal_data adc_dc_cal_multi_sample = {
+ ADC_DC_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+static const struct ath9k_percal_data adc_dc_cal_single_sample = {
+ ADC_DC_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+static const struct ath9k_percal_data adc_init_dc_cal = {
+ ADC_DC_INIT_CAL,
+ MIN_CAL_SAMPLES,
+ INIT_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+
+static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
+{
+ if (AR_SREV_9100(ah)) {
+ ah->iq_caldata.calData = &iq_cal_multi_sample;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+ return;
+ }
+
+ if (AR_SREV_9160_10_OR_LATER(ah)) {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+ ah->adcgain_caldata.calData =
+ &adc_gain_cal_single_sample;
+ ah->adcdc_caldata.calData =
+ &adc_dc_cal_single_sample;
+ ah->adcdc_calinitdata.calData =
+ &adc_init_dc_cal;
+ } else {
+ ah->iq_caldata.calData = &iq_cal_multi_sample;
+ ah->adcgain_caldata.calData =
+ &adc_gain_cal_multi_sample;
+ ah->adcdc_caldata.calData =
+ &adc_dc_cal_multi_sample;
+ ah->adcdc_calinitdata.calData =
+ &adc_init_dc_cal;
+ }
+ ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+ }
+}
+
+void ar9002_hw_attach_calib_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_cal_settings = ar9002_hw_init_cal_settings;
+ priv_ops->init_cal = ar9002_hw_init_cal;
+ priv_ops->setup_calibration = ar9002_hw_setup_calibration;
+ priv_ops->iscal_supported = ar9002_hw_iscal_supported;
+
+ ops->calibrate = ar9002_hw_calibrate;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
new file mode 100644
index 0000000..a8a8cdc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -0,0 +1,598 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar5008_initvals.h"
+#include "ar9001_initvals.h"
+#include "ar9002_initvals.h"
+
+/* General hardware code for the A5008/AR9001/AR9002 hadware families */
+
+static bool ar9002_hw_macversion_supported(u32 macversion)
+{
+ switch (macversion) {
+ case AR_SREV_VERSION_5416_PCI:
+ case AR_SREV_VERSION_5416_PCIE:
+ case AR_SREV_VERSION_9160:
+ case AR_SREV_VERSION_9100:
+ case AR_SREV_VERSION_9280:
+ case AR_SREV_VERSION_9285:
+ case AR_SREV_VERSION_9287:
+ case AR_SREV_VERSION_9271:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9271(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
+ ARRAY_SIZE(ar9271Modes_9271), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
+ ARRAY_SIZE(ar9271Common_9271), 2);
+ INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
+ ar9271Common_normal_cck_fir_coeff_9271,
+ ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2);
+ INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271,
+ ar9271Common_japan_2484_cck_fir_coeff_9271,
+ ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
+ INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
+ ar9271Modes_9271_1_0_only,
+ ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
+ INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
+ ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6);
+ INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
+ ar9271Modes_high_power_tx_gain_9271,
+ ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6);
+ INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
+ ar9271Modes_normal_power_tx_gain_9271,
+ ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6);
+ return;
+ }
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
+ ARRAY_SIZE(ar9287Common_9287_1_1), 2);
+ if (ah->config.pcie_clock_req)
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_off_L1_9287_1_1,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
+ 2);
+ } else if (AR_SREV_9287_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
+ ARRAY_SIZE(ar9287Common_9287_1_0), 2);
+
+ if (ah->config.pcie_clock_req)
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_off_L1_9287_1_0,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
+ 2);
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+
+
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
+ ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
+ ARRAY_SIZE(ar9285Common_9285_1_2), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_off_L1_9285_1_2,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
+ 2);
+ }
+ } else if (AR_SREV_9285_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
+ ARRAY_SIZE(ar9285Modes_9285), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
+ ARRAY_SIZE(ar9285Common_9285), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_off_L1_9285,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_always_on_L1_9285,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
+ }
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
+ ARRAY_SIZE(ar9280Modes_9280_2), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
+ ARRAY_SIZE(ar9280Common_9280_2), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_off_L1_9280,
+ ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_always_on_L1_9280,
+ ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
+ }
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9280Modes_fast_clock_9280_2,
+ ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
+ } else if (AR_SREV_9280_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
+ ARRAY_SIZE(ar9280Modes_9280), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
+ ARRAY_SIZE(ar9280Common_9280), 2);
+ } else if (AR_SREV_9160_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
+ ARRAY_SIZE(ar5416Modes_9160), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
+ ARRAY_SIZE(ar5416Common_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
+ ARRAY_SIZE(ar5416Bank0_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
+ ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
+ ARRAY_SIZE(ar5416Bank1_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
+ ARRAY_SIZE(ar5416Bank2_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
+ ARRAY_SIZE(ar5416Bank3_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
+ ARRAY_SIZE(ar5416Bank6_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
+ ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
+ ARRAY_SIZE(ar5416Bank7_9160), 2);
+ if (AR_SREV_9160_11(ah)) {
+ INIT_INI_ARRAY(&ah->iniAddac,
+ ar5416Addac_91601_1,
+ ARRAY_SIZE(ar5416Addac_91601_1), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
+ ARRAY_SIZE(ar5416Addac_9160), 2);
+ }
+ } else if (AR_SREV_9100_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
+ ARRAY_SIZE(ar5416Modes_9100), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
+ ARRAY_SIZE(ar5416Common_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
+ ARRAY_SIZE(ar5416Bank0_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
+ ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
+ ARRAY_SIZE(ar5416Bank1_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
+ ARRAY_SIZE(ar5416Bank2_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
+ ARRAY_SIZE(ar5416Bank3_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
+ ARRAY_SIZE(ar5416Bank6_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
+ ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
+ ARRAY_SIZE(ar5416Bank7_9100), 2);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
+ ARRAY_SIZE(ar5416Addac_9100), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
+ ARRAY_SIZE(ar5416Modes), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
+ ARRAY_SIZE(ar5416Common), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
+ ARRAY_SIZE(ar5416Bank0), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
+ ARRAY_SIZE(ar5416BB_RfGain), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
+ ARRAY_SIZE(ar5416Bank1), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
+ ARRAY_SIZE(ar5416Bank2), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
+ ARRAY_SIZE(ar5416Bank3), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
+ ARRAY_SIZE(ar5416Bank6), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
+ ARRAY_SIZE(ar5416Bank6TPC), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
+ ARRAY_SIZE(ar5416Bank7), 2);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
+ ARRAY_SIZE(ar5416Addac), 2);
+ }
+}
+
+/* Support for Japan ch.14 (2484) spread */
+void ar9002_hw_cck_chan14_spread(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniCckfirNormal,
+ ar9287Common_normal_cck_fir_coeff_92871_1,
+ ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1),
+ 2);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9287Common_japan_2484_cck_fir_coeff_92871_1,
+ ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1),
+ 2);
+ }
+}
+
+static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
+{
+ u32 rxgain_type;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
+ AR5416_EEP_MINOR_VER_17) {
+ rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
+
+ if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_backoff_13db_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
+ else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_backoff_23db_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_original_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_original_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
+ }
+}
+
+static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah)
+{
+ u32 txgain_type;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
+ AR5416_EEP_MINOR_VER_19) {
+ txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
+
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_high_power_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_original_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_original_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
+ }
+}
+
+static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9287Modes_rx_gain_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
+ else if (AR_SREV_9287_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9287Modes_rx_gain_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
+ else if (AR_SREV_9280_20(ah))
+ ar9280_20_hw_init_rxgain_ini(ah);
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9287Modes_tx_gain_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
+ } else if (AR_SREV_9287_10(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9287Modes_tx_gain_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
+ } else if (AR_SREV_9280_20(ah)) {
+ ar9280_20_hw_init_txgain_ini(ah);
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+ u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
+
+ /* txgain table */
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
+ if (AR_SREV_9285E_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_XE2_0_high_power,
+ ARRAY_SIZE(
+ ar9285Modes_XE2_0_high_power), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_high_power_tx_gain_9285_1_2,
+ ARRAY_SIZE(
+ ar9285Modes_high_power_tx_gain_9285_1_2), 6);
+ }
+ } else {
+ if (AR_SREV_9285E_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_XE2_0_normal_power,
+ ARRAY_SIZE(
+ ar9285Modes_XE2_0_normal_power), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_original_tx_gain_9285_1_2,
+ ARRAY_SIZE(
+ ar9285Modes_original_tx_gain_9285_1_2), 6);
+ }
+ }
+ }
+}
+
+/*
+ * Helper for ASPM support.
+ *
+ * Disable PLL when in L0s as well as receiver clock when in L1.
+ * This power saving option must be enabled through the SerDes.
+ *
+ * Programming the SerDes must go through the same 288 bit serial shift
+ * register as the other analog registers. Hence the 9 writes.
+ */
+static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ u8 i;
+ u32 val;
+
+ if (ah->is_pciexpress != true)
+ return;
+
+ /* Do not touch SerDes registers */
+ if (ah->config.pcie_powersave_enable == 2)
+ return;
+
+ /* Nothing to do on restore for 11N */
+ if (!restore) {
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ /*
+ * AR9280 2.0 or later chips use SerDes values from the
+ * initvals.h initialized depending on chipset during
+ * __ath9k_hw_init()
+ */
+ for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
+ REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
+ INI_RA(&ah->iniPcieSerdes, i, 1));
+ }
+ } else if (AR_SREV_9280(ah) &&
+ (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
+
+ /* Shut off CLKREQ active in L1 */
+ if (ah->config.pcie_clock_req)
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
+ else
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
+
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ } else {
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
+
+ /*
+ * Ignore ah->ah_config.pcie_clock_req setting for
+ * pre-AR9280 11n
+ */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
+
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+ }
+
+ udelay(1000);
+
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ /* Several PCIe massages to ensure proper behaviour */
+ if (ah->config.pcie_waen) {
+ val = ah->config.pcie_waen;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else {
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) {
+ val = AR9285_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else if (AR_SREV_9280(ah)) {
+ /*
+ * On AR9280 chips bit 22 of 0x4004 needs to be
+ * set otherwise card may disappear.
+ */
+ val = AR9280_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else
+ val = AR_WA_DEFAULT;
+ }
+
+ REG_WRITE(ah, AR_WA, val);
+ }
+
+ if (power_off) {
+ /*
+ * Set PCIe workaround bits
+ * bit 14 in WA register (disable L1) should only
+ * be set when device enters D3 and be cleared
+ * when device comes back to D0.
+ */
+ if (ah->config.pcie_waen) {
+ if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ } else {
+ if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) &&
+ (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
+ (AR_SREV_9280(ah) &&
+ (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ }
+ }
+ }
+}
+
+static int ar9002_hw_get_radiorev(struct ath_hw *ah)
+{
+ u32 val;
+ int i;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
+ for (i = 0; i < 8; i++)
+ REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
+ val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
+
+ return ath9k_hw_reverse_bits(val, 8);
+}
+
+int ar9002_hw_rf_claim(struct ath_hw *ah)
+{
+ u32 val;
+
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ val = ar9002_hw_get_radiorev(ah);
+ switch (val & AR_RADIO_SREV_MAJOR) {
+ case 0:
+ val = AR_RAD5133_SREV_MAJOR;
+ break;
+ case AR_RAD5133_SREV_MAJOR:
+ case AR_RAD5122_SREV_MAJOR:
+ case AR_RAD2133_SREV_MAJOR:
+ case AR_RAD2122_SREV_MAJOR:
+ break;
+ default:
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Radio Chip Rev 0x%02X not supported\n",
+ val & AR_RADIO_SREV_MAJOR);
+ return -EOPNOTSUPP;
+ }
+
+ ah->hw_version.analog5GhzRev = val;
+
+ return 0;
+}
+
+/*
+ * Enable ASYNC FIFO
+ *
+ * If Async FIFO is enabled, the following counters change as MAC now runs
+ * at 117 Mhz instead of 88/44MHz when async FIFO is disabled.
+ *
+ * The values below tested for ht40 2 chain.
+ * Overwrite the delay/timeouts initialized in process ini.
+ */
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
+ AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
+ AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
+ AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
+
+ REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
+
+ REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
+ AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
+ REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
+ AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
+ }
+}
+
+/*
+ * We don't enable WEP aggregation on mac80211 but we keep this
+ * around for HAL unification purposes.
+ */
+void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ }
+}
+
+/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
+void ar9002_hw_attach_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
+ priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
+ priv_ops->macversion_supported = ar9002_hw_macversion_supported;
+
+ ops->config_pci_powersave = ar9002_hw_configpcipowersave;
+
+ ar5008_hw_attach_phy_ops(ah);
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ ar9002_hw_attach_phy_ops(ah);
+
+ ar9002_hw_attach_calib_ops(ah);
+ ar9002_hw_attach_mac_ops(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 8a3bf3a..dae7f33 100644
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -14,1982 +14,9 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-static const u32 ar5416Modes[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
- { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
- { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
- { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
- { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
- { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
- { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
- { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
- { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00004030, 0x00000002 },
- { 0x0000403c, 0x00000002 },
- { 0x00007010, 0x00000000 },
- { 0x00007038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00000000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0xffffffff },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8000010 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00070000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a002e },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x00009954, 0x5d50e188 },
- { 0x00009958, 0x00081fff },
- { 0x0000c95c, 0x004b6a8e },
- { 0x0000c968, 0x000003ce },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x001fff00 },
- { 0x000099ac, 0x00000000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x000000aa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x00000bb5 },
- { 0x0000a22c, 0x00000011 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889af },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000a000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000000 },
- { 0x0000a26c, 0x0e79e5c6 },
- { 0x0000b26c, 0x0e79e5c6 },
- { 0x0000c26c, 0x0e79e5c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x051701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79a8aa1f },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x08000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1[][2] = {
- { 0x000098b0, 0x02108421 },
- { 0x000098ec, 0x00000008 },
-};
-
-static const u32 ar5416Bank2[][2] = {
- { 0x000098b0, 0x0e73ff17 },
- { 0x000098e0, 0x00000420 },
-};
-
-static const u32 ar5416Bank3[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014008f, 0x0014008f },
- { 0x0000989c, 0x00c40003, 0x00c40003 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000f1, 0x000000f1 },
- { 0x0000989c, 0x00002081, 0x00002081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank6TPC[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x201400df, 0x201400df },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007081, 0x00007081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
-
-static const u32 ar5416Addac[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000003 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x0000000c },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000030 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000060 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000058 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static const u32 ar5416Modes_9100[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
- { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
- { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
- { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
- { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
- { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
- { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
- { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
- { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
-#ifdef TB243
- { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
-#else
- { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
-#endif
- { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common_9100[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00020010, 0x00000003 },
- { 0x00020038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00004000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008120, 0x08f04800 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0x00000000 },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081d0, 0x00003210 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00000000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a01ae },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x0000c95c, 0x004b6a8e },
- { 0x0000c968, 0x000003ce },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x201fff00 },
- { 0x000099ac, 0x006f0000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x0cc80caa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x001a0bb5 },
- { 0x0000a22c, 0x00000000 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889ae },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000a000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000001 },
- { 0x0000a26c, 0x0ebae9c6 },
- { 0x0000b26c, 0x0ebae9c6 },
- { 0x0000c26c, 0x0ebae9c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x050701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79a8aa33 },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x0c000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0_9100[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain_9100[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1_9100[][2] = {
- { 0x000098b0, 0x02108421},
- { 0x000098ec, 0x00000008},
-};
-
-static const u32 ar5416Bank2_9100[][2] = {
- { 0x000098b0, 0x0e73ff17},
- { 0x000098e0, 0x00000420},
-};
-
-static const u32 ar5416Bank3_9100[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6_9100[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014000f, 0x0014000f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x000180d6, 0x000180d6 },
- { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
- { 0x0000989c, 0x000000b1, 0x000000b1 },
- { 0x0000989c, 0x00002000, 0x00002000 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-
-static const u32 ar5416Bank6TPC_9100[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x2014008f, 0x2014008f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007080, 0x00007080 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7_9100[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
-
-static const u32 ar5416Addac_9100[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000010 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000015 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static const u32 ar5416Modes_9160[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
- { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
- { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
- { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
- { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
- { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
- { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
- { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
- { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common_9160[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00004030, 0x00000002 },
- { 0x0000403c, 0x00000002 },
- { 0x00007010, 0x00000020 },
- { 0x00007038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00000000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008120, 0x08f04800 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0xffffffff },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081d0, 0x00003210 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00ff0000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a01ae },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x00009954, 0x5f3ca3de },
- { 0x00009958, 0x2108ecff },
- { 0x00009940, 0x00750604 },
- { 0x0000c95c, 0x004b6a8e },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x201fff00 },
- { 0x000099ac, 0x006f0000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x0cc80caa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x001a0bb5 },
- { 0x0000a22c, 0x00000000 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889af },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000e000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000001 },
- { 0x0000a26c, 0x0ebae9c6 },
- { 0x0000b26c, 0x0ebae9c6 },
- { 0x0000c26c, 0x0ebae9c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x050701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79bfaa03 },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x0c000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0_9160[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain_9160[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1_9160[][2] = {
- { 0x000098b0, 0x02108421 },
- { 0x000098ec, 0x00000008 },
-};
-
-static const u32 ar5416Bank2_9160[][2] = {
- { 0x000098b0, 0x0e73ff17 },
- { 0x000098e0, 0x00000420 },
-};
-
-static const u32 ar5416Bank3_9160[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6_9160[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014008f, 0x0014008f },
- { 0x0000989c, 0x00c40003, 0x00c40003 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000f1, 0x000000f1 },
- { 0x0000989c, 0x00002081, 0x00002081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank6TPC_9160[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x2014008f, 0x2014008f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007080, 0x00007080 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7_9160[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
+#ifndef INITVALS_9002_10_H
+#define INITVALS_9002_10_H
-static u32 ar5416Addac_9160[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000018 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000019 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000003 },
- {0x0000989c, 0x00000008 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static u32 ar5416Addac_91601_1[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000018 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000019 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-/* XXX 9280 1 */
static const u32 ar9280Modes_9280[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -2766,7 +793,7 @@ static const u32 ar9280Common_9280_2[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -3441,7 +1468,7 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
};
/* AR9285 Revsion 10*/
-static const u_int32_t ar9285Modes_9285[][6] = {
+static const u32 ar9285Modes_9285[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
{ 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -3763,7 +1790,7 @@ static const u_int32_t ar9285Modes_9285[][6] = {
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9285Common_9285[][2] = {
+static const u32 ar9285Common_9285[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -3936,7 +1963,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -4096,7 +2123,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
{ 0x00007870, 0x10142c00 },
};
-static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
+static const u32 ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4109,7 +2136,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
+static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4123,7 +2150,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
};
/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */
-static const u_int32_t ar9285Modes_9285_1_2[][6] = {
+static const u32 ar9285Modes_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4184,7 +2211,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4198,8 +2225,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4312,7 +2339,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4326,8 +2353,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4429,7 +2456,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9285Common_9285_1_2[][2] = {
+static const u32 ar9285Common_9285_1_2[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -4731,17 +2758,12 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
{ 0x00007808, 0x54214514 },
{ 0x0000780c, 0x02025830 },
{ 0x00007810, 0x71c0d388 },
- { 0x00007814, 0x924934a8 },
{ 0x0000781c, 0x00000000 },
{ 0x00007824, 0x00d86fff },
- { 0x00007828, 0x26d2491b },
{ 0x0000782c, 0x6e36d97b },
- { 0x00007830, 0xedb6d96e },
{ 0x00007834, 0x71400087 },
- { 0x0000783c, 0x0001fffe },
- { 0x00007840, 0xffeb1a20 },
{ 0x00007844, 0x000c0db6 },
- { 0x00007848, 0x6db61b6f },
+ { 0x00007848, 0x6db6246f },
{ 0x0000784c, 0x6d9b66db },
{ 0x00007850, 0x6d8c6dba },
{ 0x00007854, 0x00040000 },
@@ -4753,7 +2775,7 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
{ 0x00007870, 0x10142c00 },
};
-static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
+static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
@@ -4777,7 +2799,12 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
+ { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
+ { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
{ 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 },
+ { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
+ { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
{ 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
{ 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
{ 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
@@ -4789,7 +2816,7 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
{ 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
};
-static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
+static const u32 ar9285Modes_original_tx_gain_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
@@ -4813,7 +2840,52 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
+ { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
+ { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
{ 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 },
+ { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
+ { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
+ { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
+ { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
+ { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
+ { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c },
+ { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
+ { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
+};
+
+static const u32 ar9285Modes_XE2_0_normal_power[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
+ { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
+ { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae },
+ { 0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441 },
+ { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
+ { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
{ 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
{ 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
{ 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
@@ -4825,7 +2897,47 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
{ 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
};
-static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
+static const u32 ar9285Modes_XE2_0_high_power[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
+ { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
+ { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e },
+ { 0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443 },
+ { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
+ { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
+ { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
+ { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
+ { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
+ { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 },
+ { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
+ { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
+};
+
+static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4838,7 +2950,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
+static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4852,7 +2964,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
};
/* AR9287 Revision 10 */
-static const u_int32_t ar9287Modes_9287_1_0[][6] = {
+static const u32 ar9287Modes_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4899,7 +3011,7 @@ static const u_int32_t ar9287Modes_9287_1_0[][6] = {
{ 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
};
-static const u_int32_t ar9287Common_9287_1_0[][2] = {
+static const u32 ar9287Common_9287_1_0[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020015 },
{ 0x00000034, 0x00000005 },
@@ -5073,7 +3185,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -5270,7 +3382,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
{ 0x000078b8, 0x2a850160 },
};
-static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = {
+static const u32 ar9287Modes_tx_gain_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -5320,7 +3432,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = {
};
-static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = {
+static const u32 ar9287Modes_rx_gain_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
{ 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -5582,7 +3694,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = {
{ 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
};
-static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
+static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -5595,7 +3707,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
+static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -5610,7 +3722,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
/* AR9287 Revision 11 */
-static const u_int32_t ar9287Modes_9287_1_1[][6] = {
+static const u32 ar9287Modes_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -5657,7 +3769,7 @@ static const u_int32_t ar9287Modes_9287_1_1[][6] = {
{ 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
};
-static const u_int32_t ar9287Common_9287_1_1[][2] = {
+static const u32 ar9287Common_9287_1_1[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020015 },
{ 0x00000034, 0x00000005 },
@@ -6027,21 +4139,22 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
/*
* For Japanese regulatory requirements, 2484 MHz requires the following three
- * registers be programmed differently from the channel between 2412 and 2472 MHz.
+ * registers be programmed differently from the channel between 2412 and
+ * 2472 MHz.
*/
-static const u_int32_t ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
+static const u32 ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
{ 0x0000a1f4, 0x00fffeff },
{ 0x0000a1f8, 0x00f5f9ff },
{ 0x0000a1fc, 0xb79f6427 },
};
-static const u_int32_t ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
+static const u32 ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
{ 0x0000a1f4, 0x00000000 },
{ 0x0000a1f8, 0xefff0301 },
{ 0x0000a1fc, 0xca9228ee },
};
-static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
+static const u32 ar9287Modes_tx_gain_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -6090,7 +4203,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
{ 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 },
};
-static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
+static const u32 ar9287Modes_rx_gain_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
{ 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -6352,7 +4465,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
{ 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
};
-static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
+static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -6365,7 +4478,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
+static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -6380,7 +4493,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
/* AR9271 initialization values automaticaly created: 06/04/09 */
-static const u_int32_t ar9271Modes_9271[][6] = {
+static const u32 ar9271Modes_9271[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
{ 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -6441,7 +4554,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6455,8 +4568,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6569,7 +4682,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6583,8 +4696,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6683,29 +4796,10 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
{ 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
{ 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 },
- { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
- { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
- { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
- { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
- { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
- { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
- { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
- { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
- { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
- { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
- { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
- { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
- { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
- { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
- { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
- { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9271Common_9271[][2] = {
+static const u32 ar9271Common_9271[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -6910,13 +5004,10 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x00007810, 0x71c0d388 },
{ 0x00007814, 0x924934a8 },
{ 0x0000781c, 0x00000000 },
- { 0x00007820, 0x00000c04 },
- { 0x00007824, 0x00d8abff },
{ 0x00007828, 0x66964300 },
{ 0x0000782c, 0x8db6d961 },
{ 0x00007830, 0x8db6d96c },
{ 0x00007834, 0x6140008b },
- { 0x00007838, 0x00000029 },
{ 0x0000783c, 0x72ee0a72 },
{ 0x00007840, 0xbbfffffc },
{ 0x00007844, 0x000c0db6 },
@@ -6929,7 +5020,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x00007860, 0x21084210 },
{ 0x00007864, 0xf7d7ffde },
{ 0x00007868, 0xc2034080 },
- { 0x0000786c, 0x48609eb4 },
{ 0x00007870, 0x10142c00 },
{ 0x00009808, 0x00000000 },
{ 0x0000980c, 0xafe68e30 },
@@ -6982,9 +5072,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x000099e8, 0x3c466478 },
{ 0x000099ec, 0x0cc80caa },
{ 0x000099f0, 0x00000000 },
- { 0x0000a1f4, 0x00000000 },
- { 0x0000a1f8, 0x71733d01 },
- { 0x0000a1fc, 0xd0ad5c12 },
{ 0x0000a208, 0x803e68c8 },
{ 0x0000a210, 0x4080a333 },
{ 0x0000a214, 0x00206c10 },
@@ -7004,13 +5091,9 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000a260, 0xdfa90f01 },
{ 0x0000a268, 0x00000000 },
{ 0x0000a26c, 0x0ebae9e6 },
- { 0x0000a278, 0x3bdef7bd },
- { 0x0000a27c, 0x050e83bd },
{ 0x0000a388, 0x0c000000 },
{ 0x0000a38c, 0x20202020 },
{ 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x3bdef7bd },
- { 0x0000a398, 0x000003bd },
{ 0x0000a39c, 0x00000001 },
{ 0x0000a3a0, 0x00000000 },
{ 0x0000a3a4, 0x00000000 },
@@ -7025,8 +5108,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000a3cc, 0x20202020 },
{ 0x0000a3d0, 0x20202020 },
{ 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x3bdef7bd },
- { 0x0000a3e0, 0x000003bd },
{ 0x0000a3e4, 0x00000000 },
{ 0x0000a3e8, 0x18c43433 },
{ 0x0000a3ec, 0x00f70081 },
@@ -7046,7 +5127,104 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000d384, 0xf3307ff0 },
};
-static const u_int32_t ar9271Modes_9271_1_0_only[][6] = {
+static const u32 ar9271Common_normal_cck_fir_coeff_9271[][2] = {
+ { 0x0000a1f4, 0x00fffeff },
+ { 0x0000a1f8, 0x00f5f9ff },
+ { 0x0000a1fc, 0xb79f6427 },
+};
+
+static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = {
+ { 0x0000a1f4, 0x00000000 },
+ { 0x0000a1f8, 0xefff0301 },
+ { 0x0000a1fc, 0xca9228ee },
+};
+
+static const u32 ar9271Modes_9271_1_0_only[][6] = {
{ 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 },
{ 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
};
+
+static const u32 ar9271Modes_9271_ANI_reg[][6] = {
+ { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e },
+ { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
+ { 0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881 },
+ { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
+ { 0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8 },
+ { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
+ { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
+};
+
+static const u32 ar9271Modes_normal_power_tx_gain_9271[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
+ { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029 },
+ { 0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff },
+ { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
+ { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
+ { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
+ { 0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd },
+ { 0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
+ { 0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
+};
+
+static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000 },
+ { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b },
+ { 0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff },
+ { 0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6 },
+ { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
+ { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a212652, 0x0a212652, 0x0a22a652 },
+ { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063 },
+ { 0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
+ { 0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
+ { 0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
+ { 0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
+};
+
+#endif /* INITVALS_9002_10_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
new file mode 100644
index 0000000..2be20d2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+
+#define AR_BufLen 0x00000fff
+
+static void ar9002_hw_rx_enable(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_CR, AR_CR_RXE);
+}
+
+static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
+{
+ ((struct ath_desc*) ds)->ds_link = ds_link;
+}
+
+static void ar9002_hw_get_desc_link(void *ds, u32 **ds_link)
+{
+ *ds_link = &((struct ath_desc *)ds)->ds_link;
+}
+
+static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ u32 sync_cause = 0;
+ bool fatal_int = false;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!AR_SREV_9100(ah)) {
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON) {
+ isr = REG_READ(ah, AR_ISR);
+ }
+ }
+
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
+ AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause)
+ return false;
+ } else {
+ *masked = 0;
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+ if (isr) {
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+ if (isr2 & AR_ISR_S2_TIM)
+ mask2 |= ATH9K_INT_TIM;
+ if (isr2 & AR_ISR_S2_DTIM)
+ mask2 |= ATH9K_INT_DTIM;
+ if (isr2 & AR_ISR_S2_DTIMSYNC)
+ mask2 |= ATH9K_INT_DTIMSYNC;
+ if (isr2 & (AR_ISR_S2_CABEND))
+ mask2 |= ATH9K_INT_CABEND;
+ if (isr2 & AR_ISR_S2_GTT)
+ mask2 |= ATH9K_INT_GTT;
+ if (isr2 & AR_ISR_S2_CST)
+ mask2 |= ATH9K_INT_CST;
+ if (isr2 & AR_ISR_S2_TSFOOR)
+ mask2 |= ATH9K_INT_TSFOOR;
+ }
+
+ isr = REG_READ(ah, AR_ISR_RAC);
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (ah->config.rx_intr_mitigation) {
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
+ *masked |= ATH9K_INT_RX;
+ }
+
+ if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RX;
+ if (isr &
+ (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
+ AR_ISR_TXEOL)) {
+ u32 s0_s, s1_s;
+
+ *masked |= ATH9K_INT_TX;
+
+ s0_s = REG_READ(ah, AR_ISR_S0_S);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
+
+ s1_s = REG_READ(ah, AR_ISR_S1_S);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
+ }
+
+ if (isr & AR_ISR_RXORN) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "receive FIFO overrun interrupt\n");
+ }
+
+ if (!AR_SREV_9100(ah)) {
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
+ if (isr5 & AR_ISR_S5_TIM_TIMER)
+ *masked |= ATH9K_INT_TIM_TIMER;
+ }
+ }
+
+ *masked |= mask2;
+ }
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ if (isr & AR_ISR_GENTMR) {
+ u32 s5_s;
+
+ s5_s = REG_READ(ah, AR_ISR_S5_S);
+ if (isr & AR_ISR_GENTMR) {
+ ah->intr_gen_timer_trigger =
+ MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
+
+ ah->intr_gen_timer_thresh =
+ MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
+
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
+
+ }
+ }
+
+ if (sync_cause) {
+ fatal_int =
+ (sync_cause &
+ (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
+ ? true : false;
+
+ if (fatal_int) {
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
+ ath_print(common, ATH_DBG_ANY,
+ "received PCI FATAL interrupt\n");
+ }
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
+ ath_print(common, ATH_DBG_ANY,
+ "received PCI PERR interrupt\n");
+ }
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+ }
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+ }
+
+ return true;
+}
+
+static void ar9002_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_data = buf_addr;
+
+ if (is_firstseg) {
+ ads->ds_ctl1 |= seglen | (is_lastseg ? 0 : AR_TxMore);
+ } else if (is_lastseg) {
+ ads->ds_ctl0 = 0;
+ ads->ds_ctl1 = seglen;
+ ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
+ ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
+ } else {
+ ads->ds_ctl0 = 0;
+ ads->ds_ctl1 = seglen | AR_TxMore;
+ ads->ds_ctl2 = 0;
+ ads->ds_ctl3 = 0;
+ }
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+}
+
+static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if ((ads->ds_txstatus9 & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
+ ts->ts_tstamp = ads->AR_SendTimestamp;
+ ts->ts_status = 0;
+ ts->ts_flags = 0;
+
+ if (ads->ds_txstatus1 & AR_FrmXmitOK)
+ ts->ts_status |= ATH9K_TX_ACKED;
+ if (ads->ds_txstatus1 & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (ads->ds_txstatus1 & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus9 & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ if (ads->ds_txstatus1 & AR_TxTimerExpired)
+ ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+
+ if (ads->ds_txstatus1 & AR_DescCfgErr)
+ ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus0 & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->AR_BaBitmapLow;
+ ts->ba_high = ads->AR_BaBitmapHigh;
+ }
+
+ ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
+ switch (ts->ts_rateindex) {
+ case 0:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
+ break;
+ case 1:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
+ break;
+ case 2:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
+ break;
+ case 3:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
+ break;
+ }
+
+ ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
+ ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
+ ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
+ ts->evm0 = ads->AR_TxEVM0;
+ ts->evm1 = ads->AR_TxEVM1;
+ ts->evm2 = ads->AR_TxEVM2;
+ ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
+ ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
+ ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
+ ts->ts_antenna = 0;
+
+ return 0;
+}
+
+static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ txPower += ah->txpower_indexoffset;
+ if (txPower > 63)
+ txPower = 63;
+
+ ads->ds_ctl0 = (pktLen & AR_FrameLen)
+ | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(txPower, AR_XmitPower)
+ | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
+ | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
+
+ ads->ds_ctl1 =
+ (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
+ | SM(type, AR_FrameType)
+ | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ads->ds_ctl6 = SM(keyType, AR_EncrType);
+
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
+ ads->ds_ctl8 = 0;
+ ads->ds_ctl9 = 0;
+ ads->ds_ctl10 = 0;
+ ads->ds_ctl11 = 0;
+ }
+}
+
+static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ar5416_desc *last_ads = AR5416DESC(lastds);
+ u32 ds_ctl0;
+
+ if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
+ ds_ctl0 = ads->ds_ctl0;
+
+ if (flags & ATH9K_TXDESC_RTSENA) {
+ ds_ctl0 &= ~AR_CTSEnable;
+ ds_ctl0 |= AR_RTSEnable;
+ } else {
+ ds_ctl0 &= ~AR_RTSEnable;
+ ds_ctl0 |= AR_CTSEnable;
+ }
+
+ ads->ds_ctl0 = ds_ctl0;
+ } else {
+ ads->ds_ctl0 =
+ (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
+ }
+
+ ads->ds_ctl2 = set11nTries(series, 0)
+ | set11nTries(series, 1)
+ | set11nTries(series, 2)
+ | set11nTries(series, 3)
+ | (durUpdateEn ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ads->ds_ctl3 = set11nRate(series, 0)
+ | set11nRate(series, 1)
+ | set11nRate(series, 2)
+ | set11nRate(series, 3);
+
+ ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
+ | set11nPktDurRTSCTS(series, 1);
+
+ ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
+ | set11nPktDurRTSCTS(series, 3);
+
+ ads->ds_ctl7 = set11nRateFlags(series, 0)
+ | set11nRateFlags(series, 1)
+ | set11nRateFlags(series, 2)
+ | set11nRateFlags(series, 3)
+ | SM(rtsctsRate, AR_RTSCTSRate);
+ last_ads->ds_ctl2 = ads->ds_ctl2;
+ last_ads->ds_ctl3 = ads->ds_ctl3;
+}
+
+static void ar9002_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
+ ads->ds_ctl6 &= ~AR_AggrLen;
+ ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
+}
+
+static void ar9002_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ unsigned int ctl6;
+
+ ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
+
+ ctl6 = ads->ds_ctl6;
+ ctl6 &= ~AR_PadDelim;
+ ctl6 |= SM(numDelims, AR_PadDelim);
+ ads->ds_ctl6 = ctl6;
+}
+
+static void ar9002_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 |= AR_IsAggr;
+ ads->ds_ctl1 &= ~AR_MoreAggr;
+ ads->ds_ctl6 &= ~AR_PadDelim;
+}
+
+static void ar9002_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
+}
+
+static void ar9002_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl2 &= ~AR_BurstDur;
+ ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
+}
+
+static void ar9002_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if (vmf)
+ ads->ds_ctl0 |= AR_VirtMoreFrag;
+ else
+ ads->ds_ctl0 &= ~AR_VirtMoreFrag;
+}
+
+void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
+ u32 size, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+
+ ads->ds_ctl1 = size & AR_BufLen;
+ if (flags & ATH9K_RXDESC_INTREQ)
+ ads->ds_ctl1 |= AR_RxIntrReq;
+
+ ads->ds_rxstatus8 &= ~AR_RxDone;
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ memset(&(ads->u), 0, sizeof(ads->u));
+}
+EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
+
+void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
+{
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ ops->rx_enable = ar9002_hw_rx_enable;
+ ops->set_desc_link = ar9002_hw_set_desc_link;
+ ops->get_desc_link = ar9002_hw_get_desc_link;
+ ops->get_isr = ar9002_hw_get_isr;
+ ops->fill_txdesc = ar9002_hw_fill_txdesc;
+ ops->proc_txdesc = ar9002_hw_proc_txdesc;
+ ops->set11n_txdesc = ar9002_hw_set11n_txdesc;
+ ops->set11n_ratescenario = ar9002_hw_set11n_ratescenario;
+ ops->set11n_aggr_first = ar9002_hw_set11n_aggr_first;
+ ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle;
+ ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last;
+ ops->clr11n_aggr = ar9002_hw_clr11n_aggr;
+ ops->set11n_burstduration = ar9002_hw_set11n_burstduration;
+ ops->set11n_virtualmorefrag = ar9002_hw_set11n_virtualmorefrag;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
new file mode 100644
index 0000000..ed314e8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -0,0 +1,535 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: Programming Atheros 802.11n analog front end radios
+ *
+ * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
+ * devices have either an external AR2133 analog front end radio for single
+ * band 2.4 GHz communication or an AR5133 analog front end radio for dual
+ * band 2.4 GHz / 5 GHz communication.
+ *
+ * All devices after the AR5416 and AR5418 family starting with the AR9280
+ * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
+ * into a single-chip and require less programming.
+ *
+ * The following single-chips exist with a respective embedded radio:
+ *
+ * AR9280 - 11n dual-band 2x2 MIMO for PCIe
+ * AR9281 - 11n single-band 1x2 MIMO for PCIe
+ * AR9285 - 11n single-band 1x1 for PCIe
+ * AR9287 - 11n single-band 2x2 MIMO for PCIe
+ *
+ * AR9220 - 11n dual-band 2x2 MIMO for PCI
+ * AR9223 - 11n single-band 2x2 MIMO for PCI
+ *
+ * AR9287 - 11n single-band 1x1 MIMO for USB
+ */
+
+#include "hw.h"
+#include "ar9002_phy.h"
+
+/**
+ * ar9002_hw_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * all devices after ar9280.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ */
+static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode, aModeRefSel = 0;
+ u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
+ struct chan_centers centers;
+ u32 refDivA = 24;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
+ reg32 &= 0xc0000000;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ u32 txctl;
+ int regWrites = 0;
+
+ bMode = 1;
+ fracMode = 1;
+ aModeRefSel = 0;
+ channelSel = CHANSEL_2G(freq);
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
+ 1, regWrites);
+ } else {
+ REG_WRITE_ARRAY(&ah->iniCckfirNormal,
+ 1, regWrites);
+ }
+ } else {
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+ }
+ } else {
+ bMode = 0;
+ fracMode = 0;
+
+ switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
+ case 0:
+ if ((freq % 20) == 0)
+ aModeRefSel = 3;
+ else if ((freq % 10) == 0)
+ aModeRefSel = 2;
+ if (aModeRefSel)
+ break;
+ case 1:
+ default:
+ aModeRefSel = 0;
+ /*
+ * Enable 2G (fractional) mode for channels
+ * which are 5MHz spaced.
+ */
+ fracMode = 1;
+ refDivA = 1;
+ channelSel = CHANSEL_5G(freq);
+
+ /* RefDivA setting */
+ REG_RMW_FIELD(ah, AR_AN_SYNTH9,
+ AR_AN_SYNTH9_REFDIVA, refDivA);
+
+ }
+
+ if (!fracMode) {
+ ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
+ channelSel = ndiv & 0x1ff;
+ channelFrac = (ndiv & 0xfffffe00) * 2;
+ channelSel = (channelSel << 17) | channelFrac;
+ }
+ }
+
+ reg32 = reg32 |
+ (bMode << 29) |
+ (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
+
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar9002_hw_spur_mitigate - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int freq;
+ int bin, cur_bin;
+ int bb_spur_off, spur_subchannel_sd;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, newVal;
+ int i;
+ int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ int inc[4] = { 0, 100, 0, 0 };
+ struct chan_centers centers;
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ ah->config.spurmode = SPUR_ENABLE_EEPROM;
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+
+ if (is2GHz)
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
+ else
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
+
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - freq;
+
+ if (IS_CHAN_HT40(chan)) {
+ if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur) {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ return;
+ } else {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ }
+
+ bin = bb_spur * 320;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
+
+ newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
+
+ if (IS_CHAN_HT40(chan)) {
+ if (bb_spur < 0) {
+ spur_subchannel_sd = 1;
+ bb_spur_off = bb_spur + 10;
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur - 10;
+ }
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur;
+ }
+
+ if (IS_CHAN_HT40(chan))
+ spur_delta_phase =
+ ((bb_spur * 262144) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+ else
+ spur_delta_phase =
+ ((bb_spur * 524288) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
+ spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
+
+ newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, newVal);
+
+ newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
+ REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp_v = abs(cur_vit_mask - bin);
+
+ if (tmp_v < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static void ar9002_olc_init(struct ath_hw *ah)
+{
+ u32 i;
+
+ if (!OLC_FOR_AR9280_20_LATER)
+ return;
+
+ if (OLC_FOR_AR9287_10_LATER) {
+ REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
+ AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
+ ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
+ AR9287_AN_TXPC0_TXPCMODE,
+ AR9287_AN_TXPC0_TXPCMODE_S,
+ AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
+ udelay(100);
+ } else {
+ for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
+ ah->originalGain[i] =
+ MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
+ AR_PHY_TX_GAIN);
+ ah->PDADCdelta = 0;
+ }
+}
+
+static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan)) {
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ pll = 0x142c;
+ else if (AR_SREV_9280_20(ah))
+ pll = 0x2850;
+ else
+ pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
+ } else {
+ pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
+ }
+
+ return pll;
+}
+
+static void ar9002_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+
+ if (AR_SREV_9271(ah) && (nf >= -114))
+ nf = -116;
+
+ nfarray[0] = nf;
+
+ if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
+ AR9280_PHY_CH1_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+ }
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+
+ if (AR_SREV_9271(ah) && (nf >= -114))
+ nf = -116;
+
+ nfarray[3] = nf;
+
+ if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
+ AR9280_PHY_CH1_EXT_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+ }
+}
+
+void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->set_rf_regs = NULL;
+ priv_ops->rf_alloc_ext_banks = NULL;
+ priv_ops->rf_free_ext_banks = NULL;
+ priv_ops->rf_set_freq = ar9002_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
+ priv_ops->olc_init = ar9002_olc_init;
+ priv_ops->compute_pll_control = ar9002_hw_compute_pll_control;
+ priv_ops->do_getnf = ar9002_hw_do_getnf;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
new file mode 100644
index 0000000..81bf6e5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef AR9002_PHY_H
+#define AR9002_PHY_H
+
+#define AR_PHY_TEST 0x9800
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+
+#define AR_PHY_TURBO 0x9804
+#define AR_PHY_FC_TURBO_MODE 0x00000001
+#define AR_PHY_FC_TURBO_SHORT 0x00000002
+#define AR_PHY_FC_DYN2040_EN 0x00000004
+#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
+#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
+/* For 25 MHz channel spacing -- not used but supported by hw */
+#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
+#define AR_PHY_FC_HT_EN 0x00000040
+#define AR_PHY_FC_SHORT_GI_40 0x00000080
+#define AR_PHY_FC_WALSH 0x00000100
+#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
+#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
+
+#define AR_PHY_TEST2 0x9808
+
+#define AR_PHY_TIMING2 0x9810
+#define AR_PHY_TIMING3 0x9814
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+
+#define AR_PHY_CHIP_ID_REV_0 0x80
+#define AR_PHY_CHIP_ID_REV_1 0x81
+#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
+
+#define AR_PHY_ACTIVE 0x981C
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+
+#define AR_PHY_RF_CTL2 0x9824
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+
+#define AR_PHY_RF_CTL3 0x9828
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+
+#define AR_PHY_ADC_CTL 0x982C
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
+#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
+#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
+#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
+
+#define AR_PHY_ADC_SERIAL_CTL 0x9830
+#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
+#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
+
+#define AR_PHY_RF_CTL4 0x9834
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
+
+#define AR_PHY_TSTDAC_CONST 0x983c
+
+#define AR_PHY_SETTLING 0x9844
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+
+#define AR_PHY_RXGAIN 0x9848
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+
+#define AR_PHY_DESIRED_SZ 0x9850
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+
+#define AR_PHY_FIND_SIG 0x9858
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+
+#define AR_PHY_AGC_CTL1 0x985C
+#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
+#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
+
+#define AR_PHY_CCA 0x9864
+#define AR_PHY_MINCCA_PWR 0x0FF80000
+#define AR_PHY_MINCCA_PWR_S 19
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+
+#define AR_PHY_SFCORR_LOW 0x986C
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+
+#define AR_PHY_SFCORR 0x9868
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+
+#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
+#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
+#define AR_PHY_SYNTH_CONTROL 0x9874
+#define AR_PHY_SLEEP_SCAL 0x9878
+
+#define AR_PHY_PLL_CTL 0x987c
+#define AR_PHY_PLL_CTL_40 0xaa
+#define AR_PHY_PLL_CTL_40_5413 0x04
+#define AR_PHY_PLL_CTL_44 0xab
+#define AR_PHY_PLL_CTL_44_2133 0xeb
+#define AR_PHY_PLL_CTL_40_2133 0xea
+
+#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
+#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
+
+#define AR_PHY_RX_DELAY 0x9914
+#define AR_PHY_SEARCH_START_DELAY 0x9918
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+
+#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
+#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
+
+#define AR_PHY_TIMING5 0x9924
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+
+#define AR_PHY_FRAME_CTL 0x9944
+#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
+#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
+
+#define AR_PHY_TXPWRADJ 0x994C
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
+
+#define AR_PHY_RADAR_EXT 0x9940
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+
+#define AR_PHY_RADAR_0 0x9954
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+
+#define AR_PHY_RADAR_1 0x9958
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+
+#define AR_PHY_SWITCH_CHAIN_0 0x9960
+#define AR_PHY_SWITCH_COM 0x9964
+
+#define AR_PHY_SIGMA_DELTA 0x996C
+#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
+#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
+#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
+#define AR_PHY_SIGMA_DELTA_FILT2_S 3
+#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
+#define AR_PHY_SIGMA_DELTA_FILT1_S 8
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
+
+#define AR_PHY_RESTART 0x9970
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+
+#define AR_PHY_RFBUS_REQ 0x997C
+#define AR_PHY_RFBUS_REQ_EN 0x00000001
+
+#define AR_PHY_TIMING7 0x9980
+#define AR_PHY_TIMING8 0x9984
+#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
+
+#define AR_PHY_BIN_MASK2_1 0x9988
+#define AR_PHY_BIN_MASK2_2 0x998c
+#define AR_PHY_BIN_MASK2_3 0x9990
+#define AR_PHY_BIN_MASK2_4 0x9994
+
+#define AR_PHY_BIN_MASK_1 0x9900
+#define AR_PHY_BIN_MASK_2 0x9904
+#define AR_PHY_BIN_MASK_3 0x9908
+
+#define AR_PHY_MASK_CTL 0x990c
+
+#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
+#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
+
+#define AR_PHY_TIMING9 0x9998
+#define AR_PHY_TIMING10 0x999c
+#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
+
+#define AR_PHY_TIMING11 0x99a0
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
+
+#define AR_PHY_RX_CHAINMASK 0x99a4
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+
+#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
+#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
+#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
+#define AR_PHY_9285_ANT_DIV_CTL_S 24
+#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
+#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
+#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
+#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
+#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
+#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
+#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
+#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
+#define AR_PHY_9285_ANT_DIV_LNA1 2
+#define AR_PHY_9285_ANT_DIV_LNA2 1
+#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
+#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
+#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
+#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
+
+#define AR_PHY_EXT_CCA0 0x99b8
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+
+#define AR_PHY_EXT_CCA 0x99bc
+#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
+#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_SFCORR_EXT 0x99c0
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+
+#define AR_PHY_HALFGI 0x99D0
+#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_HALFGI_DSC_MAN_S 4
+#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
+#define AR_PHY_HALFGI_DSC_EXP_S 0
+
+#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+
+#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
+
+#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
+#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
+
+#define AR_PHY_M_SLEEP 0x99f0
+#define AR_PHY_REFCLKDLY 0x99f4
+#define AR_PHY_REFCLKPD 0x99f8
+
+#define AR_PHY_CALMODE 0x99f0
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+
+#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
+
+#define AR_PHY_CURRENT_RSSI 0x9c1c
+#define AR9280_PHY_CURRENT_RSSI 0x9c3c
+
+#define AR_PHY_RFBUS_GRANT 0x9C20
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001
+
+#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+
+#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
+
+#define AR_PHY_MODE 0xA200
+#define AR_PHY_MODE_ASYNCFIFO 0x80
+#define AR_PHY_MODE_AR2133 0x08
+#define AR_PHY_MODE_AR5111 0x00
+#define AR_PHY_MODE_AR5112 0x08
+#define AR_PHY_MODE_DYNAMIC 0x04
+#define AR_PHY_MODE_RF2GHZ 0x02
+#define AR_PHY_MODE_RF5GHZ 0x00
+#define AR_PHY_MODE_CCK 0x01
+#define AR_PHY_MODE_OFDM 0x00
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
+
+#define AR_PHY_CCK_TX_CTRL 0xA204
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
+#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
+
+#define AR_PHY_CCK_DETECT 0xA208
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+/* [12:6] settling time for antenna switch */
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
+
+#define AR_PHY_GAIN_2GHZ 0xA20C
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
+
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
+
+#define AR_PHY_CCK_RXCTRL4 0xA21C
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
+
+#define AR_PHY_DAG_CTRLCCK 0xA228
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
+#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
+
+#define AR_PHY_POWER_TX_RATE3 0xA234
+#define AR_PHY_POWER_TX_RATE4 0xA238
+
+#define AR_PHY_SCRM_SEQ_XR 0xA23C
+#define AR_PHY_HEADER_DETECT_XR 0xA240
+#define AR_PHY_CHIRP_DETECTED_XR 0xA244
+#define AR_PHY_BLUETOOTH 0xA254
+
+#define AR_PHY_TPCRG1 0xA258
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR_PHY_TX_PWRCTRL4 0xa264
+#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
+#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
+#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
+#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
+
+#define AR_PHY_TX_PWRCTRL6_0 0xa270
+#define AR_PHY_TX_PWRCTRL6_1 0xb270
+#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
+#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
+
+#define AR_PHY_TX_PWRCTRL7 0xa274
+#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
+#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
+
+#define AR_PHY_TX_PWRCTRL9 0xa27C
+#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
+#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
+#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
+#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
+
+#define AR_PHY_TX_GAIN_TBL1 0xa300
+#define AR_PHY_TX_GAIN 0x0007F000
+#define AR_PHY_TX_GAIN_S 12
+
+#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
+#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
+
+#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
+#define AR_PHY_MASK2_M_31_45 0xa3a4
+#define AR_PHY_MASK2_M_16_30 0xa3a8
+#define AR_PHY_MASK2_M_00_15 0xa3ac
+#define AR_PHY_MASK2_P_15_01 0xa3b8
+#define AR_PHY_MASK2_P_30_16 0xa3bc
+#define AR_PHY_MASK2_P_45_31 0xa3c0
+#define AR_PHY_MASK2_P_61_45 0xa3c4
+#define AR_PHY_SPUR_REG 0x994c
+
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
+#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+
+#define AR_PHY_PILOT_MASK_01_30 0xa3b0
+#define AR_PHY_PILOT_MASK_31_60 0xa3b4
+
+#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
+#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
+
+#define AR_PHY_ANALOG_SWAP 0xa268
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+
+#define AR_PHY_TPCRG5 0xA26C
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+
+/* Carrier leak calibration control, do it after AGC calibration */
+#define AR_PHY_CL_CAL_CTL 0xA358
+#define AR_PHY_CL_CAL_ENABLE 0x00000002
+#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
+
+#define AR_PHY_POWER_TX_RATE5 0xA38C
+#define AR_PHY_POWER_TX_RATE6 0xA390
+
+#define AR_PHY_CAL_CHAINMASK 0xA39C
+
+#define AR_PHY_POWER_TX_SUB 0xA3C8
+#define AR_PHY_POWER_TX_RATE7 0xA3CC
+#define AR_PHY_POWER_TX_RATE8 0xA3D0
+#define AR_PHY_POWER_TX_RATE9 0xA3D4
+
+#define AR_PHY_XPA_CFG 0xA3D8
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+
+#define AR_PHY_CH1_CCA 0xa864
+#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH1_MINCCA_PWR_S 19
+#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_CH1_MINCCA_PWR_S 20
+
+#define AR_PHY_CH2_CCA 0xb864
+#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH2_MINCCA_PWR_S 19
+
+#define AR_PHY_CH1_EXT_CCA 0xa9bc
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_CH2_EXT_CCA 0xb9bc
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
new file mode 100644
index 0000000..56a9e5f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_phy.h"
+
+static void ar9003_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Select calibration to run */
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ /*
+ * Start calibration with
+ * 2^(INIT_IQCAL_LOG_COUNT_MAX+1) samples
+ */
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
+
+ /* Kick-off cal */
+ REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
+ break;
+ case TEMP_COMP_CAL:
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
+ AR_PHY_65NM_CH0_THERM_LOCAL, 1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
+ AR_PHY_65NM_CH0_THERM_START, 1);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting Temperature Compensation Calibration\n");
+ break;
+ case ADC_DC_INIT_CAL:
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ /* Not yet */
+ break;
+ }
+}
+
+/*
+ * Generic calibration routine.
+ * Recalibrate the lower PHY chips to account for temperature/environment
+ * changes.
+ */
+static bool ar9003_hw_per_calibration(struct ath_hw *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct ath9k_cal_list *currCal)
+{
+ /* Cal is assumed not done until explicitly set below */
+ bool iscaldone = false;
+
+ /* Calibration in progress. */
+ if (currCal->calState == CAL_RUNNING) {
+ /* Check to see if it has finished. */
+ if (!(REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL)) {
+ /*
+ * Accumulate cal measures for active chains
+ */
+ currCal->calData->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >=
+ currCal->calData->calNumSamples) {
+ unsigned int i, numChains = 0;
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ /*
+ * Process accumulated data
+ */
+ currCal->calData->calPostProc(ah, numChains);
+
+ /* Calibration has finished. */
+ ichan->CalValid |= currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ iscaldone = true;
+ } else {
+ /*
+ * Set-up collection of another sub-sample until we
+ * get desired number
+ */
+ ar9003_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(ichan->CalValid & currCal->calData->calType)) {
+ /* If current cal is marked invalid in channel, kick it off */
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+
+ return iscaldone;
+}
+
+static bool ar9003_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ bool iscaldone = true;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+
+ /*
+ * For given calibration:
+ * 1. Call generic cal routine
+ * 2. When this cal is done (isCalDone) if we have more cals waiting
+ * (eg after reset), mask this to upper layers by not propagating
+ * isCalDone if it is set to TRUE.
+ * Instead, change isCalDone to FALSE and setup the waiting cal(s)
+ * to be run.
+ */
+ if (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING)) {
+ iscaldone = ar9003_hw_per_calibration(ah, chan,
+ rxchainmask, currCal);
+ if (iscaldone) {
+ ah->cal_list_curr = currCal = currCal->calNext;
+
+ if (currCal->calState == CAL_WAITING) {
+ iscaldone = false;
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+ }
+ }
+
+ /* Do NF cal only at longer intervals */
+ if (longcal) {
+ /*
+ * Load the NF from history buffer of the current channel.
+ * NF is slow time-variant, so it is OK to use a historical
+ * value.
+ */
+ ath9k_hw_loadnf(ah, ah->curchan);
+
+ /* start NF calibration, without updating BB NF register */
+ ath9k_hw_start_nfcal(ah);
+ }
+
+ return iscaldone;
+}
+
+static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ /* Accumulate IQ cal measures for active chains */
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
+ }
+}
+
+static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+ const u_int32_t offset_array[3] = {
+ AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B1,
+ AR_PHY_RX_IQCAL_CORR_B2,
+ };
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ah->totalPowerMeasI[i];
+ powerMeasQ = ah->totalPowerMeasQ[i];
+ iqCorrMeas = ah->totalIqCorrMeas[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
+ qCoffDenom = powerMeasQ / 64;
+
+ if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
+
+ /* Force bounds on iCoff */
+ if (iCoff >= 63)
+ iCoff = 63;
+ else if (iCoff <= -63)
+ iCoff = -63;
+
+ /* Negate iCoff if iqCorrNeg == 0 */
+ if (iqCorrNeg == 0x0)
+ iCoff = -iCoff;
+
+ /* Force bounds on qCoff */
+ if (qCoff >= 63)
+ qCoff = 63;
+ else if (qCoff <= -63)
+ qCoff = -63;
+
+ iCoff = iCoff & 0x7f;
+ qCoff = qCoff & 0x7f;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) "
+ "before update = 0x%x\n",
+ offset_array[i],
+ REG_READ(ah, offset_array[i]));
+
+ REG_RMW_FIELD(ah, offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) QI COFF "
+ "(bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ REG_READ(ah, offset_array[i]));
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) QQ COFF "
+ "(bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ REG_READ(ah, offset_array[i]));
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction (offset 0x%04x) enabled "
+ "(bit position 0x%08x). New Value 0x%08x\n",
+ (unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
+ REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
+}
+
+static const struct ath9k_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9003_hw_iqcal_collect,
+ ar9003_hw_iqcalibrate
+};
+
+static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+}
+
+static bool ar9003_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ switch (calType & ah->supp_cals) {
+ case IQ_MISMATCH_CAL:
+ /*
+ * XXX: Run IQ Mismatch for non-CCK only
+ * Note that CHANNEL_B is never set though.
+ */
+ return true;
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ return false;
+ case TEMP_COMP_CAL:
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * solve 4x4 linear equation used in loopback iq cal.
+ */
+static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
+ s32 sin_2phi_1,
+ s32 cos_2phi_1,
+ s32 sin_2phi_2,
+ s32 cos_2phi_2,
+ s32 mag_a0_d0,
+ s32 phs_a0_d0,
+ s32 mag_a1_d0,
+ s32 phs_a1_d0,
+ s32 solved_eq[])
+{
+ s32 f1 = cos_2phi_1 - cos_2phi_2,
+ f3 = sin_2phi_1 - sin_2phi_2,
+ f2;
+ s32 mag_tx, phs_tx, mag_rx, phs_rx;
+ const s32 result_shift = 1 << 15;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ f2 = (f1 * f1 + f3 * f3) / result_shift;
+
+ if (!f2) {
+ ath_print(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
+ return false;
+ }
+
+ /* mag mismatch, tx */
+ mag_tx = f1 * (mag_a0_d0 - mag_a1_d0) + f3 * (phs_a0_d0 - phs_a1_d0);
+ /* phs mismatch, tx */
+ phs_tx = f3 * (-mag_a0_d0 + mag_a1_d0) + f1 * (phs_a0_d0 - phs_a1_d0);
+
+ mag_tx = (mag_tx / f2);
+ phs_tx = (phs_tx / f2);
+
+ /* mag mismatch, rx */
+ mag_rx = mag_a0_d0 - (cos_2phi_1 * mag_tx + sin_2phi_1 * phs_tx) /
+ result_shift;
+ /* phs mismatch, rx */
+ phs_rx = phs_a0_d0 + (sin_2phi_1 * mag_tx - cos_2phi_1 * phs_tx) /
+ result_shift;
+
+ solved_eq[0] = mag_tx;
+ solved_eq[1] = phs_tx;
+ solved_eq[2] = mag_rx;
+ solved_eq[3] = phs_rx;
+
+ return true;
+}
+
+static s32 ar9003_hw_find_mag_approx(struct ath_hw *ah, s32 in_re, s32 in_im)
+{
+ s32 abs_i = abs(in_re),
+ abs_q = abs(in_im),
+ max_abs, min_abs;
+
+ if (abs_i > abs_q) {
+ max_abs = abs_i;
+ min_abs = abs_q;
+ } else {
+ max_abs = abs_q;
+ min_abs = abs_i;
+ }
+
+ return max_abs - (max_abs / 32) + (min_abs / 8) + (min_abs / 4);
+}
+
+#define DELPT 32
+
+static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
+ s32 chain_idx,
+ const s32 iq_res[],
+ s32 iqc_coeff[])
+{
+ s32 i2_m_q2_a0_d0, i2_p_q2_a0_d0, iq_corr_a0_d0,
+ i2_m_q2_a0_d1, i2_p_q2_a0_d1, iq_corr_a0_d1,
+ i2_m_q2_a1_d0, i2_p_q2_a1_d0, iq_corr_a1_d0,
+ i2_m_q2_a1_d1, i2_p_q2_a1_d1, iq_corr_a1_d1;
+ s32 mag_a0_d0, mag_a1_d0, mag_a0_d1, mag_a1_d1,
+ phs_a0_d0, phs_a1_d0, phs_a0_d1, phs_a1_d1,
+ sin_2phi_1, cos_2phi_1,
+ sin_2phi_2, cos_2phi_2;
+ s32 mag_tx, phs_tx, mag_rx, phs_rx;
+ s32 solved_eq[4], mag_corr_tx, phs_corr_tx, mag_corr_rx, phs_corr_rx,
+ q_q_coff, q_i_coff;
+ const s32 res_scale = 1 << 15;
+ const s32 delpt_shift = 1 << 8;
+ s32 mag1, mag2;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ i2_m_q2_a0_d0 = iq_res[0] & 0xfff;
+ i2_p_q2_a0_d0 = (iq_res[0] >> 12) & 0xfff;
+ iq_corr_a0_d0 = ((iq_res[0] >> 24) & 0xff) + ((iq_res[1] & 0xf) << 8);
+
+ if (i2_m_q2_a0_d0 > 0x800)
+ i2_m_q2_a0_d0 = -((0xfff - i2_m_q2_a0_d0) + 1);
+
+ if (i2_p_q2_a0_d0 > 0x800)
+ i2_p_q2_a0_d0 = -((0xfff - i2_p_q2_a0_d0) + 1);
+
+ if (iq_corr_a0_d0 > 0x800)
+ iq_corr_a0_d0 = -((0xfff - iq_corr_a0_d0) + 1);
+
+ i2_m_q2_a0_d1 = (iq_res[1] >> 4) & 0xfff;
+ i2_p_q2_a0_d1 = (iq_res[2] & 0xfff);
+ iq_corr_a0_d1 = (iq_res[2] >> 12) & 0xfff;
+
+ if (i2_m_q2_a0_d1 > 0x800)
+ i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
+
+ if (i2_p_q2_a0_d1 > 0x800)
+ i2_p_q2_a0_d1 = -((0xfff - i2_p_q2_a0_d1) + 1);
+
+ if (iq_corr_a0_d1 > 0x800)
+ iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
+
+ i2_m_q2_a1_d0 = ((iq_res[2] >> 24) & 0xff) + ((iq_res[3] & 0xf) << 8);
+ i2_p_q2_a1_d0 = (iq_res[3] >> 4) & 0xfff;
+ iq_corr_a1_d0 = iq_res[4] & 0xfff;
+
+ if (i2_m_q2_a1_d0 > 0x800)
+ i2_m_q2_a1_d0 = -((0xfff - i2_m_q2_a1_d0) + 1);
+
+ if (i2_p_q2_a1_d0 > 0x800)
+ i2_p_q2_a1_d0 = -((0xfff - i2_p_q2_a1_d0) + 1);
+
+ if (iq_corr_a1_d0 > 0x800)
+ iq_corr_a1_d0 = -((0xfff - iq_corr_a1_d0) + 1);
+
+ i2_m_q2_a1_d1 = (iq_res[4] >> 12) & 0xfff;
+ i2_p_q2_a1_d1 = ((iq_res[4] >> 24) & 0xff) + ((iq_res[5] & 0xf) << 8);
+ iq_corr_a1_d1 = (iq_res[5] >> 4) & 0xfff;
+
+ if (i2_m_q2_a1_d1 > 0x800)
+ i2_m_q2_a1_d1 = -((0xfff - i2_m_q2_a1_d1) + 1);
+
+ if (i2_p_q2_a1_d1 > 0x800)
+ i2_p_q2_a1_d1 = -((0xfff - i2_p_q2_a1_d1) + 1);
+
+ if (iq_corr_a1_d1 > 0x800)
+ iq_corr_a1_d1 = -((0xfff - iq_corr_a1_d1) + 1);
+
+ if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
+ (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0:\na0_d0=%d\n"
+ "a0_d1=%d\na2_d0=%d\na1_d1=%d\n",
+ i2_p_q2_a0_d0, i2_p_q2_a0_d1,
+ i2_p_q2_a1_d0, i2_p_q2_a1_d1);
+ return false;
+ }
+
+ mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0;
+ phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0;
+
+ mag_a0_d1 = (i2_m_q2_a0_d1 * res_scale) / i2_p_q2_a0_d1;
+ phs_a0_d1 = (iq_corr_a0_d1 * res_scale) / i2_p_q2_a0_d1;
+
+ mag_a1_d0 = (i2_m_q2_a1_d0 * res_scale) / i2_p_q2_a1_d0;
+ phs_a1_d0 = (iq_corr_a1_d0 * res_scale) / i2_p_q2_a1_d0;
+
+ mag_a1_d1 = (i2_m_q2_a1_d1 * res_scale) / i2_p_q2_a1_d1;
+ phs_a1_d1 = (iq_corr_a1_d1 * res_scale) / i2_p_q2_a1_d1;
+
+ /* w/o analog phase shift */
+ sin_2phi_1 = (((mag_a0_d0 - mag_a0_d1) * delpt_shift) / DELPT);
+ /* w/o analog phase shift */
+ cos_2phi_1 = (((phs_a0_d1 - phs_a0_d0) * delpt_shift) / DELPT);
+ /* w/ analog phase shift */
+ sin_2phi_2 = (((mag_a1_d0 - mag_a1_d1) * delpt_shift) / DELPT);
+ /* w/ analog phase shift */
+ cos_2phi_2 = (((phs_a1_d1 - phs_a1_d0) * delpt_shift) / DELPT);
+
+ /*
+ * force sin^2 + cos^2 = 1;
+ * find magnitude by approximation
+ */
+ mag1 = ar9003_hw_find_mag_approx(ah, cos_2phi_1, sin_2phi_1);
+ mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
+
+ if ((mag1 == 0) || (mag2 == 0)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag1=%d, mag2=%d\n",
+ mag1, mag2);
+ return false;
+ }
+
+ /* normalization sin and cos by mag */
+ sin_2phi_1 = (sin_2phi_1 * res_scale / mag1);
+ cos_2phi_1 = (cos_2phi_1 * res_scale / mag1);
+ sin_2phi_2 = (sin_2phi_2 * res_scale / mag2);
+ cos_2phi_2 = (cos_2phi_2 * res_scale / mag2);
+
+ /* calculate IQ mismatch */
+ if (!ar9003_hw_solve_iq_cal(ah,
+ sin_2phi_1, cos_2phi_1,
+ sin_2phi_2, cos_2phi_2,
+ mag_a0_d0, phs_a0_d0,
+ mag_a1_d0,
+ phs_a1_d0, solved_eq)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Call to ar9003_hw_solve_iq_cal() failed.\n");
+ return false;
+ }
+
+ mag_tx = solved_eq[0];
+ phs_tx = solved_eq[1];
+ mag_rx = solved_eq[2];
+ phs_rx = solved_eq[3];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "chain %d: mag mismatch=%d phase mismatch=%d\n",
+ chain_idx, mag_tx/res_scale, phs_tx/res_scale);
+
+ if (res_scale == mag_tx) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag_tx=%d, res_scale=%d\n",
+ mag_tx, res_scale);
+ return false;
+ }
+
+ /* calculate and quantize Tx IQ correction factor */
+ mag_corr_tx = (mag_tx * res_scale) / (res_scale - mag_tx);
+ phs_corr_tx = -phs_tx;
+
+ q_q_coff = (mag_corr_tx * 128 / res_scale);
+ q_i_coff = (phs_corr_tx * 256 / res_scale);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "tx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
+
+ if (q_i_coff < -63)
+ q_i_coff = -63;
+ if (q_i_coff > 63)
+ q_i_coff = 63;
+ if (q_q_coff < -63)
+ q_q_coff = -63;
+ if (q_q_coff > 63)
+ q_q_coff = 63;
+
+ iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "tx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[0]);
+
+ if (-mag_rx == res_scale) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag_rx=%d, res_scale=%d\n",
+ mag_rx, res_scale);
+ return false;
+ }
+
+ /* calculate and quantize Rx IQ correction factors */
+ mag_corr_rx = (-mag_rx * res_scale) / (res_scale + mag_rx);
+ phs_corr_rx = -phs_rx;
+
+ q_q_coff = (mag_corr_rx * 128 / res_scale);
+ q_i_coff = (phs_corr_rx * 256 / res_scale);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "rx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
+
+ if (q_i_coff < -63)
+ q_i_coff = -63;
+ if (q_i_coff > 63)
+ q_i_coff = 63;
+ if (q_q_coff < -63)
+ q_q_coff = -63;
+ if (q_q_coff > 63)
+ q_q_coff = 63;
+
+ iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "rx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[1]);
+
+ return true;
+}
+
+static void ar9003_hw_tx_iq_cal(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
+ AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B1,
+ AR_PHY_TX_IQCAL_STATUS_B2,
+ };
+ const u32 tx_corr_coeff[AR9300_MAX_CHAINS] = {
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B0,
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B1,
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B2,
+ };
+ const u32 rx_corr[AR9300_MAX_CHAINS] = {
+ AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B1,
+ AR_PHY_RX_IQCAL_CORR_B2,
+ };
+ const u_int32_t chan_info_tab[] = {
+ AR_PHY_CHAN_INFO_TAB_0,
+ AR_PHY_CHAN_INFO_TAB_1,
+ AR_PHY_CHAN_INFO_TAB_2,
+ };
+ s32 iq_res[6];
+ s32 iqc_coeff[2];
+ s32 i, j;
+ u32 num_chains = 0;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (ah->txchainmask & (1 << i))
+ num_chains++;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+ DELPT);
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL,
+ AR_PHY_TX_IQCAL_START_DO_CAL);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal not complete.\n");
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ for (i = 0; i < num_chains; i++) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Doing Tx IQ Cal for chain %d.\n", i);
+
+ if (REG_READ(ah, txiqcal_status[i]) &
+ AR_PHY_TX_IQCAL_STATUS_FAILED) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal failed for chain %d.\n", i);
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ for (j = 0; j < 3; j++) {
+ u_int8_t idx = 2 * j,
+ offset = 4 * j;
+
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ, 0);
+
+ /* 32 bits */
+ iq_res[idx] = REG_READ(ah, chan_info_tab[i] + offset);
+
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ, 1);
+
+ /* 16 bits */
+ iq_res[idx+1] = 0xffff & REG_READ(ah,
+ chan_info_tab[i] +
+ offset);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
+ idx, iq_res[idx], idx+1, iq_res[idx+1]);
+ }
+
+ if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, iqc_coeff)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Failed in calculation of IQ correction.\n");
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ_COEFF[0] = 0x%x IQ_COEFF[1] = 0x%x\n",
+ iqc_coeff[0], iqc_coeff[1]);
+
+ REG_RMW_FIELD(ah, tx_corr_coeff[i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
+ iqc_coeff[0]);
+ REG_RMW_FIELD(ah, rx_corr[i],
+ AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF,
+ iqc_coeff[1] >> 7);
+ REG_RMW_FIELD(ah, rx_corr[i],
+ AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF,
+ iqc_coeff[1]);
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
+ AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
+
+ return;
+
+TX_IQ_CAL_FAILED:
+ ath_print(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+}
+
+static bool ar9003_hw_init_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /*
+ * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain mode before
+ * running AGC/TxIQ cals
+ */
+ ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to "
+ "complete in 1ms; noisy environment?\n");
+ return false;
+ }
+
+ /* Do Tx IQ Calibration */
+ if (ah->config.tx_iq_calibration)
+ ar9003_hw_tx_iq_cal(ah);
+
+ /* Revert chainmasks to their original values before NF cal */
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ if (ar9003_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling IQ Calibration.\n");
+ }
+
+ if (ar9003_hw_iscal_supported(ah, TEMP_COMP_CAL)) {
+ INIT_CAL(&ah->tempCompCalData);
+ INSERT_CAL(ah, &ah->tempCompCalData);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling Temperature Compensation Calibration.\n");
+ }
+
+ /* Initialize current pointer to first element in list */
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+
+ chan->CalValid = 0;
+
+ return true;
+}
+
+void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
+ priv_ops->init_cal = ar9003_hw_init_cal;
+ priv_ops->setup_calibration = ar9003_hw_setup_calibration;
+ priv_ops->iscal_supported = ar9003_hw_iscal_supported;
+
+ ops->calibrate = ar9003_hw_calibrate;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
new file mode 100644
index 0000000..23eb60e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -0,0 +1,1838 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_eeprom.h"
+
+#define COMP_HDR_LEN 4
+#define COMP_CKSUM_LEN 2
+
+#define AR_CH0_TOP (0x00016288)
+#define AR_CH0_TOP_XPABIASLVL (0x3)
+#define AR_CH0_TOP_XPABIASLVL_S (8)
+
+#define AR_CH0_THERM (0x00016290)
+#define AR_CH0_THERM_SPARE (0x3f)
+#define AR_CH0_THERM_SPARE_S (0)
+
+#define AR_SWITCH_TABLE_COM_ALL (0xffff)
+#define AR_SWITCH_TABLE_COM_ALL_S (0)
+
+#define AR_SWITCH_TABLE_COM2_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM2_ALL_S (0)
+
+#define AR_SWITCH_TABLE_ALL (0xfff)
+#define AR_SWITCH_TABLE_ALL_S (0)
+
+#define LE16(x) __constant_cpu_to_le16(x)
+#define LE32(x) __constant_cpu_to_le32(x)
+
+static const struct ar9300_eeprom ar9300_default = {
+ .eepromVersion = 2,
+ .templateVersion = 2,
+ .macAddr = {1, 2, 3, 4, 5, 6},
+ .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0c,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 3,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 36,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {0, 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {1, 1, 1},/* 3 chain */
+ .db_stage2 = {1, 1, 1}, /* 3 chain */
+ .db_stage3 = {0, 0, 0},
+ .db_stage4 = {0, 0, 0},
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .futureModal = { /* [32] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2484, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {36, 36, 36, 36} },
+ { {36, 36, 36, 36} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {32, 32, 28, 24} },
+ { {32, 32, 28, 24} },
+ { {32, 32, 28, 24} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ },
+ .calTargetPower2GHT40 = {
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */
+ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
+
+ { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+
+ { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x000), LE16(0x000), LE16(0x000),
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 68,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {3, 3, 3}, /* 3 chain */
+ .db_stage2 = {3, 3, 3}, /* 3 chain */
+ .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
+ .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 0}, {60, 1}, {60, 0}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ }
+ },
+ {
+ {
+ {60, 0}, {60, 1}, {60, 1}, {60, 0},
+ {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ {60, 0}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 0}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ }
+ },
+ }
+};
+
+static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
+ enum eeprom_param param)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+
+ switch (param) {
+ case EEP_MAC_LSW:
+ return eep->macAddr[0] << 8 | eep->macAddr[1];
+ case EEP_MAC_MID:
+ return eep->macAddr[2] << 8 | eep->macAddr[3];
+ case EEP_MAC_MSW:
+ return eep->macAddr[4] << 8 | eep->macAddr[5];
+ case EEP_REG_0:
+ return le16_to_cpu(pBase->regDmn[0]);
+ case EEP_REG_1:
+ return le16_to_cpu(pBase->regDmn[1]);
+ case EEP_OP_CAP:
+ return pBase->deviceCap;
+ case EEP_OP_MODE:
+ return pBase->opCapFlags.opFlags;
+ case EEP_RF_SILENT:
+ return pBase->rfSilent;
+ case EEP_TX_MASK:
+ return (pBase->txrxMask >> 4) & 0xf;
+ case EEP_RX_MASK:
+ return pBase->txrxMask & 0xf;
+ case EEP_DRIVE_STRENGTH:
+#define AR9300_EEP_BASE_DRIV_STRENGTH 0x1
+ return pBase->miscConfiguration & AR9300_EEP_BASE_DRIV_STRENGTH;
+ case EEP_INTERNAL_REGULATOR:
+ /* Bit 4 is internal regulator flag */
+ return (pBase->featureEnable & 0x10) >> 4;
+ case EEP_SWREG:
+ return le32_to_cpu(pBase->swreg);
+ default:
+ return 0;
+ }
+}
+
+static bool ar9300_eeprom_read_byte(struct ath_common *common, int address,
+ u8 *buffer)
+{
+ u16 val;
+
+ if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val)))
+ return false;
+
+ *buffer = (val >> (8 * (address % 2))) & 0xff;
+ return true;
+}
+
+static bool ar9300_eeprom_read_word(struct ath_common *common, int address,
+ u8 *buffer)
+{
+ u16 val;
+
+ if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val)))
+ return false;
+
+ buffer[0] = val >> 8;
+ buffer[1] = val & 0xff;
+
+ return true;
+}
+
+static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
+ int count)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i;
+
+ if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "eeprom address not in range\n");
+ return false;
+ }
+
+ /*
+ * Since we're reading the bytes in reverse order from a little-endian
+ * word stream, an even address means we only use the lower half of
+ * the 16-bit word at that address
+ */
+ if (address % 2 == 0) {
+ if (!ar9300_eeprom_read_byte(common, address--, buffer++))
+ goto error;
+
+ count--;
+ }
+
+ for (i = 0; i < count / 2; i++) {
+ if (!ar9300_eeprom_read_word(common, address, buffer))
+ goto error;
+
+ address -= 2;
+ buffer += 2;
+ }
+
+ if (count % 2)
+ if (!ar9300_eeprom_read_byte(common, address, buffer))
+ goto error;
+
+ return true;
+
+error:
+ ath_print(common, ATH_DBG_EEPROM,
+ "unable to read eeprom region at offset %d\n", address);
+ return false;
+}
+
+static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
+ int *length, int *major, int *minor)
+{
+ unsigned long value[4];
+
+ value[0] = best[0];
+ value[1] = best[1];
+ value[2] = best[2];
+ value[3] = best[3];
+ *code = ((value[0] >> 5) & 0x0007);
+ *reference = (value[0] & 0x001f) | ((value[1] >> 2) & 0x0020);
+ *length = ((value[1] << 4) & 0x07f0) | ((value[2] >> 4) & 0x000f);
+ *major = (value[2] & 0x000f);
+ *minor = (value[3] & 0x00ff);
+}
+
+static u16 ar9300_comp_cksum(u8 *data, int dsize)
+{
+ int it, checksum = 0;
+
+ for (it = 0; it < dsize; it++) {
+ checksum += data[it];
+ checksum &= 0xffff;
+ }
+
+ return checksum;
+}
+
+static bool ar9300_uncompress_block(struct ath_hw *ah,
+ u8 *mptr,
+ int mdataSize,
+ u8 *block,
+ int size)
+{
+ int it;
+ int spot;
+ int offset;
+ int length;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ spot = 0;
+
+ for (it = 0; it < size; it += (length+2)) {
+ offset = block[it];
+ offset &= 0xff;
+ spot += offset;
+ length = block[it+1];
+ length &= 0xff;
+
+ if (length > 0 && spot >= 0 && spot+length < mdataSize) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Restore at %d: spot=%d "
+ "offset=%d length=%d\n",
+ it, spot, offset, length);
+ memcpy(&mptr[spot], &block[it+2], length);
+ spot += length;
+ } else if (length > 0) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Bad restore at %d: spot=%d "
+ "offset=%d length=%d\n",
+ it, spot, offset, length);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int ar9300_compress_decision(struct ath_hw *ah,
+ int it,
+ int code,
+ int reference,
+ u8 *mptr,
+ u8 *word, int length, int mdata_size)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 *dptr;
+
+ switch (code) {
+ case _CompressNone:
+ if (length != mdata_size) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "EEPROM structure size mismatch"
+ "memory=%d eeprom=%d\n", mdata_size, length);
+ return -1;
+ }
+ memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
+ ath_print(common, ATH_DBG_EEPROM, "restored eeprom %d:"
+ " uncompressed, length %d\n", it, length);
+ break;
+ case _CompressBlock:
+ if (reference == 0) {
+ dptr = mptr;
+ } else {
+ if (reference != 2) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "cant find reference eeprom"
+ "struct %d\n", reference);
+ return -1;
+ }
+ memcpy(mptr, &ar9300_default, mdata_size);
+ }
+ ath_print(common, ATH_DBG_EEPROM,
+ "restore eeprom %d: block, reference %d,"
+ " length %d\n", it, reference, length);
+ ar9300_uncompress_block(ah, mptr, mdata_size,
+ (u8 *) (word + COMP_HDR_LEN), length);
+ break;
+ default:
+ ath_print(common, ATH_DBG_EEPROM, "unknown compression"
+ " code %d\n", code);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Read the configuration data from the eeprom.
+ * The data can be put in any specified memory buffer.
+ *
+ * Returns -1 on error.
+ * Returns address of next memory location on success.
+ */
+static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
+ u8 *mptr, int mdata_size)
+{
+#define MDEFAULT 15
+#define MSTATE 100
+ int cptr;
+ u8 *word;
+ int code;
+ int reference, length, major, minor;
+ int osize;
+ int it;
+ u16 checksum, mchecksum;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ word = kzalloc(2048, GFP_KERNEL);
+ if (!word)
+ return -1;
+
+ memcpy(mptr, &ar9300_default, mdata_size);
+
+ cptr = AR9300_BASE_ADDR;
+ for (it = 0; it < MSTATE; it++) {
+ if (!ar9300_read_eeprom(ah, cptr, word, COMP_HDR_LEN))
+ goto fail;
+
+ if ((word[0] == 0 && word[1] == 0 && word[2] == 0 &&
+ word[3] == 0) || (word[0] == 0xff && word[1] == 0xff
+ && word[2] == 0xff && word[3] == 0xff))
+ break;
+
+ ar9300_comp_hdr_unpack(word, &code, &reference,
+ &length, &major, &minor);
+ ath_print(common, ATH_DBG_EEPROM,
+ "Found block at %x: code=%d ref=%d"
+ "length=%d major=%d minor=%d\n", cptr, code,
+ reference, length, major, minor);
+ if (length >= 1024) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Skipping bad header\n");
+ cptr -= COMP_HDR_LEN;
+ continue;
+ }
+
+ osize = length;
+ ar9300_read_eeprom(ah, cptr, word,
+ COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
+ mchecksum = word[COMP_HDR_LEN + osize] |
+ (word[COMP_HDR_LEN + osize + 1] << 8);
+ ath_print(common, ATH_DBG_EEPROM,
+ "checksum %x %x\n", checksum, mchecksum);
+ if (checksum == mchecksum) {
+ ar9300_compress_decision(ah, it, code, reference, mptr,
+ word, length, mdata_size);
+ } else {
+ ath_print(common, ATH_DBG_EEPROM,
+ "skipping block with bad checksum\n");
+ }
+ cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ }
+
+ kfree(word);
+ return cptr;
+
+fail:
+ kfree(word);
+ return -1;
+}
+
+/*
+ * Restore the configuration structure by reading the eeprom.
+ * This function destroys any existing in-memory structure
+ * content.
+ */
+static bool ath9k_hw_ar9300_fill_eeprom(struct ath_hw *ah)
+{
+ u8 *mptr = (u8 *) &ah->eeprom.ar9300_eep;
+
+ if (ar9300_eeprom_restore_internal(ah, mptr,
+ sizeof(struct ar9300_eeprom)) < 0)
+ return false;
+
+ return true;
+}
+
+/* XXX: review hardware docs */
+static int ath9k_hw_ar9300_get_eeprom_ver(struct ath_hw *ah)
+{
+ return ah->eeprom.ar9300_eep.eepromVersion;
+}
+
+/* XXX: could be read from the eepromVersion, not sure yet */
+static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
+ enum ieee80211_band freq_band)
+{
+ return 1;
+}
+
+static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return -EINVAL;
+}
+
+static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (is2ghz)
+ return eep->modalHeader2G.xpaBiasLvl;
+ else
+ return eep->modalHeader5G.xpaBiasLvl;
+}
+
+static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
+{
+ int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3));
+ REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE,
+ ((bias >> 2) & 0x3));
+}
+
+static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ __le32 val;
+
+ if (is2ghz)
+ val = eep->modalHeader2G.antCtrlCommon;
+ else
+ val = eep->modalHeader5G.antCtrlCommon;
+ return le32_to_cpu(val);
+}
+
+static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ __le32 val;
+
+ if (is2ghz)
+ val = eep->modalHeader2G.antCtrlCommon2;
+ else
+ val = eep->modalHeader5G.antCtrlCommon2;
+ return le32_to_cpu(val);
+}
+
+static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
+ int chain,
+ bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ __le16 val = 0;
+
+ if (chain >= 0 && chain < AR9300_MAX_CHAINS) {
+ if (is2ghz)
+ val = eep->modalHeader2G.antCtrlChain[chain];
+ else
+ val = eep->modalHeader5G.antCtrlChain[chain];
+ }
+
+ return le16_to_cpu(val);
+}
+
+static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+{
+ u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 0, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_0, AR_SWITCH_TABLE_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL, value);
+}
+
+static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
+{
+ int drive_strength;
+ unsigned long reg;
+
+ drive_strength = ath9k_hw_ar9300_get_eeprom(ah, EEP_DRIVE_STRENGTH);
+
+ if (!drive_strength)
+ return;
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS1);
+ reg &= ~0x00ffffc0;
+ reg |= 0x5 << 21;
+ reg |= 0x5 << 18;
+ reg |= 0x5 << 15;
+ reg |= 0x5 << 12;
+ reg |= 0x5 << 9;
+ reg |= 0x5 << 6;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS1, reg);
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS2);
+ reg &= ~0xffffffe0;
+ reg |= 0x5 << 29;
+ reg |= 0x5 << 26;
+ reg |= 0x5 << 23;
+ reg |= 0x5 << 20;
+ reg |= 0x5 << 17;
+ reg |= 0x5 << 14;
+ reg |= 0x5 << 11;
+ reg |= 0x5 << 8;
+ reg |= 0x5 << 5;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS2, reg);
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS4);
+ reg &= ~0xff800000;
+ reg |= 0x5 << 29;
+ reg |= 0x5 << 26;
+ reg |= 0x5 << 23;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
+}
+
+static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+{
+ int internal_regulator =
+ ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
+
+ if (internal_regulator) {
+ /* Internal regulator is ON. Write swreg register. */
+ int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah, AR_RTC_REG_CONTROL1) &
+ (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
+ REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg);
+ /* Set REG_CONTROL1.SWREG_PROGRAM */
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah,
+ AR_RTC_REG_CONTROL1) |
+ AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
+ } else {
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK,
+ (REG_READ(ah,
+ AR_RTC_SLEEP_CLK) |
+ AR_RTC_FORCE_SWREG_PRD));
+ }
+}
+
+static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
+ ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
+ ar9003_hw_drive_strength_apply(ah);
+ ar9003_hw_internal_regulator_apply(ah);
+}
+
+static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+}
+
+/*
+ * Returns the interpolated y value corresponding to the specified x value
+ * from the np ordered pairs of data (px,py).
+ * The pairs do not have to be in any order.
+ * If the specified x value is less than any of the px,
+ * the returned y value is equal to the py for the lowest px.
+ * If the specified x value is greater than any of the px,
+ * the returned y value is equal to the py for the highest px.
+ */
+static int ar9003_hw_power_interpolate(int32_t x,
+ int32_t *px, int32_t *py, u_int16_t np)
+{
+ int ip = 0;
+ int lx = 0, ly = 0, lhave = 0;
+ int hx = 0, hy = 0, hhave = 0;
+ int dx = 0;
+ int y = 0;
+
+ lhave = 0;
+ hhave = 0;
+
+ /* identify best lower and higher x calibration measurement */
+ for (ip = 0; ip < np; ip++) {
+ dx = x - px[ip];
+
+ /* this measurement is higher than our desired x */
+ if (dx <= 0) {
+ if (!hhave || dx > (x - hx)) {
+ /* new best higher x measurement */
+ hx = px[ip];
+ hy = py[ip];
+ hhave = 1;
+ }
+ }
+ /* this measurement is lower than our desired x */
+ if (dx >= 0) {
+ if (!lhave || dx < (x - lx)) {
+ /* new best lower x measurement */
+ lx = px[ip];
+ ly = py[ip];
+ lhave = 1;
+ }
+ }
+ }
+
+ /* the low x is good */
+ if (lhave) {
+ /* so is the high x */
+ if (hhave) {
+ /* they're the same, so just pick one */
+ if (hx == lx)
+ y = ly;
+ else /* interpolate */
+ y = ly + (((x - lx) * (hy - ly)) / (hx - lx));
+ } else /* only low is good, use it */
+ y = ly;
+ } else if (hhave) /* only high is good, use it */
+ y = hy;
+ else /* nothing is good,this should never happen unless np=0, ???? */
+ y = -(1 << 30);
+ return y;
+}
+
+static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex, u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_legacy *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2G;
+ pFreqBin = eep->calTarget_freqbin_2G;
+ } else {
+ numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5G;
+ pFreqBin = eep->calTarget_freqbin_5G;
+ }
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex,
+ u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_ht *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2GHT20;
+ pFreqBin = eep->calTarget_freqbin_2GHT20;
+ } else {
+ numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5GHT20;
+ pFreqBin = eep->calTarget_freqbin_5GHT20;
+ }
+
+ /*
+ * create array of channels and targetpower
+ * from targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex,
+ u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_40_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_40_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_ht *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_40_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2GHT40;
+ pFreqBin = eep->calTarget_freqbin_2GHT40;
+ } else {
+ numPiers = AR9300_NUM_5G_40_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5GHT40;
+ pFreqBin = eep->calTarget_freqbin_5GHT40;
+ }
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex, u16 freq)
+{
+ u16 numPiers = AR9300_NUM_2G_CCK_TARGET_POWERS, i;
+ s32 targetPowerArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_legacy *pEepromTargetPwr = eep->calTargetPowerCck;
+ u8 *pFreqBin = eep->calTarget_freqbin_Cck;
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], 1);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+/* Set tx power registers to array of values passed in */
+static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
+{
+#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
+ /* make sure forced gain is not set */
+ REG_WRITE(ah, 0xa458, 0);
+
+ /* Write the OFDM power per rate set */
+
+ /* 6 (LSB), 9, 12, 18 (MSB) */
+ REG_WRITE(ah, 0xa3c0,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
+
+ /* 24 (LSB), 36, 48, 54 (MSB) */
+ REG_WRITE(ah, 0xa3c4,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
+
+ /* Write the CCK power per rate set */
+
+ /* 1L (LSB), reserved, 2L, 2S (MSB) */
+ REG_WRITE(ah, 0xa3c8,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
+ /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
+
+ /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
+ REG_WRITE(ah, 0xa3cc,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
+ );
+
+ /* Write the HT20 power per rate set */
+
+ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
+ REG_WRITE(ah, 0xa3d0,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_0_8_16], 0)
+ );
+
+ /* 6 (LSB), 7, 12, 13 (MSB) */
+ REG_WRITE(ah, 0xa3d4,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_6], 0)
+ );
+
+ /* 14 (LSB), 15, 20, 21 */
+ REG_WRITE(ah, 0xa3e4,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_14], 0)
+ );
+
+ /* Mixed HT20 and HT40 rates */
+
+ /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
+ REG_WRITE(ah, 0xa3e8,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_22], 0)
+ );
+
+ /*
+ * Write the HT40 power per rate set
+ * correct PAR difference between HT40 and HT20/LEGACY
+ * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
+ */
+ REG_WRITE(ah, 0xa3d8,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_0_8_16], 0)
+ );
+
+ /* 6 (LSB), 7, 12, 13 (MSB) */
+ REG_WRITE(ah, 0xa3dc,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_6], 0)
+ );
+
+ /* 14 (LSB), 15, 20, 21 */
+ REG_WRITE(ah, 0xa3ec,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_14], 0)
+ );
+
+ return 0;
+#undef POW_SM
+}
+
+static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq)
+{
+ u8 targetPowerValT2[ar9300RateSize];
+ /* XXX: hard code for now, need to get from eeprom struct */
+ u8 ht40PowerIncForPdadc = 0;
+ bool is2GHz = false;
+ unsigned int i = 0;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (freq < 4000)
+ is2GHz = true;
+
+ targetPowerValT2[ALL_TARGET_LEGACY_6_24] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_36] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_36, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_48] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_48, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_54] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L,
+ freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_5S] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_5S, freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_11L] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_11S] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq);
+ targetPowerValT2[ALL_TARGET_HT20_0_8_16] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_1_3_9_11_17_19] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
+ freq, is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_4] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_5] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_6] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_7] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_12] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_13] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_14] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_15] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_20] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_21] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_22] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_23] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT40_0_8_16] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_1_3_9_11_17_19] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
+ freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_4] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_5] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_6] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_7] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_12] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_13] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_14] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_15] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_20] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_21] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_22] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_23] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+
+ while (i < ar9300RateSize) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ i++;
+ }
+
+ /* Write target power array to registers */
+ ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
+}
+
+static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
+ int mode,
+ int ipier,
+ int ichain,
+ int *pfrequency,
+ int *pcorrection,
+ int *ptemperature, int *pvoltage)
+{
+ u8 *pCalPier;
+ struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
+ int is2GHz;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (ichain >= AR9300_MAX_CHAINS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid chain index, must be less than %d\n",
+ AR9300_MAX_CHAINS);
+ return -1;
+ }
+
+ if (mode) { /* 5GHz */
+ if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid 5GHz cal pier index, must "
+ "be less than %d\n",
+ AR9300_NUM_5G_CAL_PIERS);
+ return -1;
+ }
+ pCalPier = &(eep->calFreqPier5G[ipier]);
+ pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
+ is2GHz = 0;
+ } else {
+ if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid 2GHz cal pier index, must "
+ "be less than %d\n", AR9300_NUM_2G_CAL_PIERS);
+ return -1;
+ }
+
+ pCalPier = &(eep->calFreqPier2G[ipier]);
+ pCalPierStruct = &(eep->calPierData2G[ichain][ipier]);
+ is2GHz = 1;
+ }
+
+ *pfrequency = FBIN2FREQ(*pCalPier, is2GHz);
+ *pcorrection = pCalPierStruct->refPower;
+ *ptemperature = pCalPierStruct->tempMeas;
+ *pvoltage = pCalPierStruct->voltMeas;
+
+ return 0;
+}
+
+static int ar9003_hw_power_control_override(struct ath_hw *ah,
+ int frequency,
+ int *correction,
+ int *voltage, int *temperature)
+{
+ int tempSlope = 0;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ REG_RMW(ah, AR_PHY_TPC_11_B0,
+ (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ REG_RMW(ah, AR_PHY_TPC_11_B1,
+ (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ REG_RMW(ah, AR_PHY_TPC_11_B2,
+ (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+
+ /* enable open loop power control on chip */
+ REG_RMW(ah, AR_PHY_TPC_6_B0,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ REG_RMW(ah, AR_PHY_TPC_6_B1,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ REG_RMW(ah, AR_PHY_TPC_6_B2,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+
+ /*
+ * enable temperature compensation
+ * Need to use register names
+ */
+ if (frequency < 4000)
+ tempSlope = eep->modalHeader2G.tempSlope;
+ else
+ tempSlope = eep->modalHeader5G.tempSlope;
+
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE,
+ temperature[0]);
+
+ return 0;
+}
+
+/* Apply the recorded correction values. */
+static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
+{
+ int ichain, ipier, npier;
+ int mode;
+ int lfrequency[AR9300_MAX_CHAINS],
+ lcorrection[AR9300_MAX_CHAINS],
+ ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS];
+ int hfrequency[AR9300_MAX_CHAINS],
+ hcorrection[AR9300_MAX_CHAINS],
+ htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS];
+ int fdiff;
+ int correction[AR9300_MAX_CHAINS],
+ voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS];
+ int pfrequency, pcorrection, ptemperature, pvoltage;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ mode = (frequency >= 4000);
+ if (mode)
+ npier = AR9300_NUM_5G_CAL_PIERS;
+ else
+ npier = AR9300_NUM_2G_CAL_PIERS;
+
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ lfrequency[ichain] = 0;
+ hfrequency[ichain] = 100000;
+ }
+ /* identify best lower and higher frequency calibration measurement */
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ for (ipier = 0; ipier < npier; ipier++) {
+ if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
+ &pfrequency, &pcorrection,
+ &ptemperature, &pvoltage)) {
+ fdiff = frequency - pfrequency;
+
+ /*
+ * this measurement is higher than
+ * our desired frequency
+ */
+ if (fdiff <= 0) {
+ if (hfrequency[ichain] <= 0 ||
+ hfrequency[ichain] >= 100000 ||
+ fdiff >
+ (frequency - hfrequency[ichain])) {
+ /*
+ * new best higher
+ * frequency measurement
+ */
+ hfrequency[ichain] = pfrequency;
+ hcorrection[ichain] =
+ pcorrection;
+ htemperature[ichain] =
+ ptemperature;
+ hvoltage[ichain] = pvoltage;
+ }
+ }
+ if (fdiff >= 0) {
+ if (lfrequency[ichain] <= 0
+ || fdiff <
+ (frequency - lfrequency[ichain])) {
+ /*
+ * new best lower
+ * frequency measurement
+ */
+ lfrequency[ichain] = pfrequency;
+ lcorrection[ichain] =
+ pcorrection;
+ ltemperature[ichain] =
+ ptemperature;
+ lvoltage[ichain] = pvoltage;
+ }
+ }
+ }
+ }
+ }
+
+ /* interpolate */
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "ch=%d f=%d low=%d %d h=%d %d\n",
+ ichain, frequency, lfrequency[ichain],
+ lcorrection[ichain], hfrequency[ichain],
+ hcorrection[ichain]);
+ /* they're the same, so just pick one */
+ if (hfrequency[ichain] == lfrequency[ichain]) {
+ correction[ichain] = lcorrection[ichain];
+ voltage[ichain] = lvoltage[ichain];
+ temperature[ichain] = ltemperature[ichain];
+ }
+ /* the low frequency is good */
+ else if (frequency - lfrequency[ichain] < 1000) {
+ /* so is the high frequency, interpolate */
+ if (hfrequency[ichain] - frequency < 1000) {
+
+ correction[ichain] = lcorrection[ichain] +
+ (((frequency - lfrequency[ichain]) *
+ (hcorrection[ichain] -
+ lcorrection[ichain])) /
+ (hfrequency[ichain] - lfrequency[ichain]));
+
+ temperature[ichain] = ltemperature[ichain] +
+ (((frequency - lfrequency[ichain]) *
+ (htemperature[ichain] -
+ ltemperature[ichain])) /
+ (hfrequency[ichain] - lfrequency[ichain]));
+
+ voltage[ichain] =
+ lvoltage[ichain] +
+ (((frequency -
+ lfrequency[ichain]) * (hvoltage[ichain] -
+ lvoltage[ichain]))
+ / (hfrequency[ichain] -
+ lfrequency[ichain]));
+ }
+ /* only low is good, use it */
+ else {
+ correction[ichain] = lcorrection[ichain];
+ temperature[ichain] = ltemperature[ichain];
+ voltage[ichain] = lvoltage[ichain];
+ }
+ }
+ /* only high is good, use it */
+ else if (hfrequency[ichain] - frequency < 1000) {
+ correction[ichain] = hcorrection[ichain];
+ temperature[ichain] = htemperature[ichain];
+ voltage[ichain] = hvoltage[ichain];
+ } else { /* nothing is good, presume 0???? */
+ correction[ichain] = 0;
+ temperature[ichain] = 0;
+ voltage[ichain] = 0;
+ }
+ }
+
+ ar9003_hw_power_control_override(ah, frequency, correction, voltage,
+ temperature);
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "for frequency=%d, calibration correction = %d %d %d\n",
+ frequency, correction[0], correction[1], correction[2]);
+
+ return 0;
+}
+
+static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan, u16 cfgCtl,
+ u8 twiceAntennaReduction,
+ u8 twiceMaxRegulatoryPower,
+ u8 powerLimit)
+{
+ ah->txpower_limit = powerLimit;
+ ar9003_hw_set_target_power_eeprom(ah, chan->channel);
+ ar9003_hw_calibration_apply(ah, chan->channel);
+}
+
+static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
+ u16 i, bool is2GHz)
+{
+ return AR_NO_SPUR;
+}
+
+s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ return (eep->baseEepHeader.txrxgain >> 4) & 0xf; /* bits 7:4 */
+}
+
+s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
+}
+
+const struct eeprom_ops eep_ar9300_ops = {
+ .check_eeprom = ath9k_hw_ar9300_check_eeprom,
+ .get_eeprom = ath9k_hw_ar9300_get_eeprom,
+ .fill_eeprom = ath9k_hw_ar9300_fill_eeprom,
+ .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver,
+ .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev,
+ .get_num_ant_config = ath9k_hw_ar9300_get_num_ant_config,
+ .get_eeprom_antenna_cfg = ath9k_hw_ar9300_get_eeprom_antenna_cfg,
+ .set_board_values = ath9k_hw_ar9300_set_board_values,
+ .set_addac = ath9k_hw_ar9300_set_addac,
+ .set_txpower = ath9k_hw_ar9300_set_txpower,
+ .get_spur_channel = ath9k_hw_ar9300_get_spur_channel
+};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
new file mode 100644
index 0000000..23fb353
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -0,0 +1,323 @@
+#ifndef AR9003_EEPROM_H
+#define AR9003_EEPROM_H
+
+#include <linux/types.h>
+
+#define AR9300_EEP_VER 0xD000
+#define AR9300_EEP_VER_MINOR_MASK 0xFFF
+#define AR9300_EEP_MINOR_VER_1 0x1
+#define AR9300_EEP_MINOR_VER AR9300_EEP_MINOR_VER_1
+
+/* 16-bit offset location start of calibration struct */
+#define AR9300_EEP_START_LOC 256
+#define AR9300_NUM_5G_CAL_PIERS 8
+#define AR9300_NUM_2G_CAL_PIERS 3
+#define AR9300_NUM_5G_20_TARGET_POWERS 8
+#define AR9300_NUM_5G_40_TARGET_POWERS 8
+#define AR9300_NUM_2G_CCK_TARGET_POWERS 2
+#define AR9300_NUM_2G_20_TARGET_POWERS 3
+#define AR9300_NUM_2G_40_TARGET_POWERS 3
+/* #define AR9300_NUM_CTLS 21 */
+#define AR9300_NUM_CTLS_5G 9
+#define AR9300_NUM_CTLS_2G 12
+#define AR9300_CTL_MODE_M 0xF
+#define AR9300_NUM_BAND_EDGES_5G 8
+#define AR9300_NUM_BAND_EDGES_2G 4
+#define AR9300_NUM_PD_GAINS 4
+#define AR9300_PD_GAINS_IN_MASK 4
+#define AR9300_PD_GAIN_ICEPTS 5
+#define AR9300_EEPROM_MODAL_SPURS 5
+#define AR9300_MAX_RATE_POWER 63
+#define AR9300_NUM_PDADC_VALUES 128
+#define AR9300_NUM_RATES 16
+#define AR9300_BCHAN_UNUSED 0xFF
+#define AR9300_MAX_PWR_RANGE_IN_HALF_DB 64
+#define AR9300_OPFLAGS_11A 0x01
+#define AR9300_OPFLAGS_11G 0x02
+#define AR9300_OPFLAGS_5G_HT40 0x04
+#define AR9300_OPFLAGS_2G_HT40 0x08
+#define AR9300_OPFLAGS_5G_HT20 0x10
+#define AR9300_OPFLAGS_2G_HT20 0x20
+#define AR9300_EEPMISC_BIG_ENDIAN 0x01
+#define AR9300_EEPMISC_WOW 0x02
+#define AR9300_CUSTOMER_DATA_SIZE 20
+
+#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
+#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
+#define AR9300_MAX_CHAINS 3
+#define AR9300_ANT_16S 25
+#define AR9300_FUTURE_MODAL_SZ 6
+
+#define AR9300_NUM_ANT_CHAIN_FIELDS 7
+#define AR9300_NUM_ANT_COMMON_FIELDS 4
+#define AR9300_SIZE_ANT_CHAIN_FIELD 3
+#define AR9300_SIZE_ANT_COMMON_FIELD 4
+#define AR9300_ANT_CHAIN_MASK 0x7
+#define AR9300_ANT_COMMON_MASK 0xf
+#define AR9300_CHAIN_0_IDX 0
+#define AR9300_CHAIN_1_IDX 1
+#define AR9300_CHAIN_2_IDX 2
+
+#define AR928X_NUM_ANT_CHAIN_FIELDS 6
+#define AR928X_SIZE_ANT_CHAIN_FIELD 2
+#define AR928X_ANT_CHAIN_MASK 0x3
+
+/* Delta from which to start power to pdadc table */
+/* This offset is used in both open loop and closed loop power control
+ * schemes. In open loop power control, it is not really needed, but for
+ * the "sake of consistency" it was kept. For certain AP designs, this
+ * value is overwritten by the value in the flag "pwrTableOffset" just
+ * before writing the pdadc vs pwr into the chip registers.
+ */
+#define AR9300_PWR_TABLE_OFFSET 0
+
+/* enable flags for voltage and temp compensation */
+#define ENABLE_TEMP_COMPENSATION 0x01
+#define ENABLE_VOLT_COMPENSATION 0x02
+/* byte addressable */
+#define AR9300_EEPROM_SIZE (16*1024)
+#define FIXED_CCA_THRESHOLD 15
+
+#define AR9300_BASE_ADDR 0x3ff
+
+enum targetPowerHTRates {
+ HT_TARGET_RATE_0_8_16,
+ HT_TARGET_RATE_1_3_9_11_17_19,
+ HT_TARGET_RATE_4,
+ HT_TARGET_RATE_5,
+ HT_TARGET_RATE_6,
+ HT_TARGET_RATE_7,
+ HT_TARGET_RATE_12,
+ HT_TARGET_RATE_13,
+ HT_TARGET_RATE_14,
+ HT_TARGET_RATE_15,
+ HT_TARGET_RATE_20,
+ HT_TARGET_RATE_21,
+ HT_TARGET_RATE_22,
+ HT_TARGET_RATE_23
+};
+
+enum targetPowerLegacyRates {
+ LEGACY_TARGET_RATE_6_24,
+ LEGACY_TARGET_RATE_36,
+ LEGACY_TARGET_RATE_48,
+ LEGACY_TARGET_RATE_54
+};
+
+enum targetPowerCckRates {
+ LEGACY_TARGET_RATE_1L_5L,
+ LEGACY_TARGET_RATE_5S,
+ LEGACY_TARGET_RATE_11L,
+ LEGACY_TARGET_RATE_11S
+};
+
+enum ar9300_Rates {
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_36,
+ ALL_TARGET_LEGACY_48,
+ ALL_TARGET_LEGACY_54,
+ ALL_TARGET_LEGACY_1L_5L,
+ ALL_TARGET_LEGACY_5S,
+ ALL_TARGET_LEGACY_11L,
+ ALL_TARGET_LEGACY_11S,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_4,
+ ALL_TARGET_HT20_5,
+ ALL_TARGET_HT20_6,
+ ALL_TARGET_HT20_7,
+ ALL_TARGET_HT20_12,
+ ALL_TARGET_HT20_13,
+ ALL_TARGET_HT20_14,
+ ALL_TARGET_HT20_15,
+ ALL_TARGET_HT20_20,
+ ALL_TARGET_HT20_21,
+ ALL_TARGET_HT20_22,
+ ALL_TARGET_HT20_23,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_4,
+ ALL_TARGET_HT40_5,
+ ALL_TARGET_HT40_6,
+ ALL_TARGET_HT40_7,
+ ALL_TARGET_HT40_12,
+ ALL_TARGET_HT40_13,
+ ALL_TARGET_HT40_14,
+ ALL_TARGET_HT40_15,
+ ALL_TARGET_HT40_20,
+ ALL_TARGET_HT40_21,
+ ALL_TARGET_HT40_22,
+ ALL_TARGET_HT40_23,
+ ar9300RateSize,
+};
+
+
+struct eepFlags {
+ u8 opFlags;
+ u8 eepMisc;
+} __packed;
+
+enum CompressAlgorithm {
+ _CompressNone = 0,
+ _CompressLzma,
+ _CompressPairs,
+ _CompressBlock,
+ _Compress4,
+ _Compress5,
+ _Compress6,
+ _Compress7,
+};
+
+struct ar9300_base_eep_hdr {
+ __le16 regDmn[2];
+ /* 4 bits tx and 4 bits rx */
+ u8 txrxMask;
+ struct eepFlags opCapFlags;
+ u8 rfSilent;
+ u8 blueToothOptions;
+ u8 deviceCap;
+ /* takes lower byte in eeprom location */
+ u8 deviceType;
+ /* offset in dB to be added to beginning
+ * of pdadc table in calibration
+ */
+ int8_t pwrTableOffset;
+ u8 params_for_tuning_caps[2];
+ /*
+ * bit0 - enable tx temp comp
+ * bit1 - enable tx volt comp
+ * bit2 - enable fastClock - default to 1
+ * bit3 - enable doubling - default to 1
+ * bit4 - enable internal regulator - default to 1
+ */
+ u8 featureEnable;
+ /* misc flags: bit0 - turn down drivestrength */
+ u8 miscConfiguration;
+ u8 eepromWriteEnableGpio;
+ u8 wlanDisableGpio;
+ u8 wlanLedGpio;
+ u8 rxBandSelectGpio;
+ u8 txrxgain;
+ /* SW controlled internal regulator fields */
+ __le32 swreg;
+} __packed;
+
+struct ar9300_modal_eep_header {
+ /* 4 idle, t1, t2, b (4 bits per setting) */
+ __le32 antCtrlCommon;
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ __le32 antCtrlCommon2;
+ /* 6 idle, t, r, rx1, rx12, b (2 bits each) */
+ __le16 antCtrlChain[AR9300_MAX_CHAINS];
+ /* 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ u8 xatten1DB[AR9300_MAX_CHAINS];
+ /* 3 xatten1_margin for merlin (0xa20c/b20c 16:12 */
+ u8 xatten1Margin[AR9300_MAX_CHAINS];
+ int8_t tempSlope;
+ int8_t voltSlope;
+ /* spur channels in usual fbin coding format */
+ u8 spurChans[AR9300_EEPROM_MODAL_SPURS];
+ /* 3 Check if the register is per chain */
+ int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
+ u8 ob[AR9300_MAX_CHAINS];
+ u8 db_stage2[AR9300_MAX_CHAINS];
+ u8 db_stage3[AR9300_MAX_CHAINS];
+ u8 db_stage4[AR9300_MAX_CHAINS];
+ u8 xpaBiasLvl;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 txClip;
+ int8_t antennaGain;
+ u8 switchSettling;
+ int8_t adcDesiredSize;
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ u8 futureModal[32];
+} __packed;
+
+struct ar9300_cal_data_per_freq_op_loop {
+ int8_t refPower;
+ /* pdadc voltage at power measurement */
+ u8 voltMeas;
+ /* pcdac used for power measurement */
+ u8 tempMeas;
+ /* range is -60 to -127 create a mapping equation 1db resolution */
+ int8_t rxNoisefloorCal;
+ /*range is same as noisefloor */
+ int8_t rxNoisefloorPower;
+ /* temp measured when noisefloor cal was performed */
+ u8 rxTempMeas;
+} __packed;
+
+struct cal_tgt_pow_legacy {
+ u8 tPow2x[4];
+} __packed;
+
+struct cal_tgt_pow_ht {
+ u8 tPow2x[14];
+} __packed;
+
+struct cal_ctl_edge_pwr {
+ u8 tPower:6,
+ flag:2;
+} __packed;
+
+struct cal_ctl_data_2g {
+ struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
+} __packed;
+
+struct cal_ctl_data_5g {
+ struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+} __packed;
+
+struct ar9300_eeprom {
+ u8 eepromVersion;
+ u8 templateVersion;
+ u8 macAddr[6];
+ u8 custData[AR9300_CUSTOMER_DATA_SIZE];
+
+ struct ar9300_base_eep_hdr baseEepHeader;
+
+ struct ar9300_modal_eep_header modalHeader2G;
+ u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
+ struct ar9300_cal_data_per_freq_op_loop
+ calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
+ u8 calTarget_freqbin_Cck[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ u8 calTarget_freqbin_2G[AR9300_NUM_2G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPowerCck[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPower2G[AR9300_NUM_2G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
+ u8 ctlIndex_2G[AR9300_NUM_CTLS_2G];
+ u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
+ struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
+ struct ar9300_modal_eep_header modalHeader5G;
+ u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
+ struct ar9300_cal_data_per_freq_op_loop
+ calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
+ u8 calTarget_freqbin_5G[AR9300_NUM_5G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPower5G[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
+ u8 ctlIndex_5G[AR9300_NUM_CTLS_5G];
+ u8 ctl_freqbin_5G[AR9300_NUM_CTLS_5G][AR9300_NUM_BAND_EDGES_5G];
+ struct cal_ctl_data_5g ctlPowerData_5G[AR9300_NUM_CTLS_5G];
+} __packed;
+
+s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
+s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
new file mode 100644
index 0000000..b15309c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_mac.h"
+#include "ar9003_initvals.h"
+
+/* General hardware code for the AR9003 hadware family */
+
+static bool ar9003_hw_macversion_supported(u32 macversion)
+{
+ switch (macversion) {
+ case AR_SREV_VERSION_9300:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */
+/*
+ * XXX: move TX/RX gain INI to its own init_mode_gain_regs after
+ * ensuring it does not affect hardware bring up
+ */
+static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+{
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9300_2p0_mac_core,
+ ARRAY_SIZE(ar9300_2p0_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9300_2p0_mac_postamble,
+ ARRAY_SIZE(ar9300_2p0_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9300_2p0_baseband_core,
+ ARRAY_SIZE(ar9300_2p0_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9300_2p0_baseband_postamble,
+ ARRAY_SIZE(ar9300_2p0_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9300_2p0_radio_core,
+ ARRAY_SIZE(ar9300_2p0_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9300_2p0_radio_postamble,
+ ARRAY_SIZE(ar9300_2p0_radio_postamble), 5);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9300_2p0_soc_preamble,
+ ARRAY_SIZE(ar9300_2p0_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9300_2p0_soc_postamble,
+ ARRAY_SIZE(ar9300_2p0_soc_postamble), 5);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
+ 5);
+
+ /* Load PCIE SERDES settings from INI */
+
+ /* Awake Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p0,
+ ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p0),
+ 2);
+
+ /* Sleep Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9300PciePhy_clkreq_enable_L1_2p0,
+ ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p0),
+ 2);
+
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9300Modes_fast_clock_2p0,
+ ARRAY_SIZE(ar9300Modes_fast_clock_2p0),
+ 3);
+}
+
+static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
+{
+ switch (ar9003_hw_get_tx_gain_idx(ah)) {
+ case 0:
+ default:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ case 1:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ case 2:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_low_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ }
+}
+
+static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
+{
+ switch (ar9003_hw_get_rx_gain_idx(ah)) {
+ case 0:
+ default:
+ INIT_INI_ARRAY(&ah->iniModesRxGain, ar9300Common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
+ 2);
+ break;
+ case 1:
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_wo_xlna_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
+ 2);
+ break;
+ }
+}
+
+/* set gain table pointers according to values read from the eeprom */
+static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ ar9003_tx_gain_table_apply(ah);
+ ar9003_rx_gain_table_apply(ah);
+}
+
+/*
+ * Helper for ASPM support.
+ *
+ * Disable PLL when in L0s as well as receiver clock when in L1.
+ * This power saving option must be enabled through the SerDes.
+ *
+ * Programming the SerDes must go through the same 288 bit serial shift
+ * register as the other analog registers. Hence the 9 writes.
+ */
+static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ if (ah->is_pciexpress != true)
+ return;
+
+ /* Do not touch SerDes registers */
+ if (ah->config.pcie_powersave_enable == 2)
+ return;
+
+ /* Nothing to do on restore for 11N */
+ if (!restore) {
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ /* Several PCIe massages to ensure proper behaviour */
+ if (ah->config.pcie_waen)
+ REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
+ }
+}
+
+/* Sets up the AR9003 hardware familiy callbacks */
+void ar9003_hw_attach_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
+ priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
+ priv_ops->macversion_supported = ar9003_hw_macversion_supported;
+
+ ops->config_pci_powersave = ar9003_hw_configpcipowersave;
+
+ ar9003_hw_attach_phy_ops(ah);
+ ar9003_hw_attach_calib_ops(ah);
+ ar9003_hw_attach_mac_ops(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
new file mode 100644
index 0000000..db019dd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
@@ -0,0 +1,1784 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9003_H
+#define INITVALS_9003_H
+
+/* AR9003 2.0 */
+
+static const u32 ar9300_2p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
+ {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
+ {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
+ {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_fast_clock_2p0[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9300_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x556cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00000000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x119f481e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x3fffbe01},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00000000},
+ {0x00016280, 0x058a0001},
+ {0x00016284, 0x3d840208},
+ {0x00016288, 0x05a20408},
+ {0x0001628c, 0x00038c07},
+ {0x00016290, 0x40000004},
+ {0x00016294, 0x458aa14f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x556cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x3fffbe01},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00000000},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x556cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x3fffbe01},
+ {0x00016904, 0xfff80000},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00000000},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 ar9300Common_rx_gain_table_merlin_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x02000101},
+ {0x0000a004, 0x02000102},
+ {0x0000a008, 0x02000103},
+ {0x0000a00c, 0x02000104},
+ {0x0000a010, 0x02000200},
+ {0x0000a014, 0x02000201},
+ {0x0000a018, 0x02000202},
+ {0x0000a01c, 0x02000203},
+ {0x0000a020, 0x02000204},
+ {0x0000a024, 0x02000205},
+ {0x0000a028, 0x02000208},
+ {0x0000a02c, 0x02000302},
+ {0x0000a030, 0x02000303},
+ {0x0000a034, 0x02000304},
+ {0x0000a038, 0x02000400},
+ {0x0000a03c, 0x02010300},
+ {0x0000a040, 0x02010301},
+ {0x0000a044, 0x02010302},
+ {0x0000a048, 0x02000500},
+ {0x0000a04c, 0x02010400},
+ {0x0000a050, 0x02020300},
+ {0x0000a054, 0x02020301},
+ {0x0000a058, 0x02020302},
+ {0x0000a05c, 0x02020303},
+ {0x0000a060, 0x02020400},
+ {0x0000a064, 0x02030300},
+ {0x0000a068, 0x02030301},
+ {0x0000a06c, 0x02030302},
+ {0x0000a070, 0x02030303},
+ {0x0000a074, 0x02030400},
+ {0x0000a078, 0x02040300},
+ {0x0000a07c, 0x02040301},
+ {0x0000a080, 0x02040302},
+ {0x0000a084, 0x02040303},
+ {0x0000a088, 0x02030500},
+ {0x0000a08c, 0x02040400},
+ {0x0000a090, 0x02050203},
+ {0x0000a094, 0x02050204},
+ {0x0000a098, 0x02050205},
+ {0x0000a09c, 0x02040500},
+ {0x0000a0a0, 0x02050301},
+ {0x0000a0a4, 0x02050302},
+ {0x0000a0a8, 0x02050303},
+ {0x0000a0ac, 0x02050400},
+ {0x0000a0b0, 0x02050401},
+ {0x0000a0b4, 0x02050402},
+ {0x0000a0b8, 0x02050403},
+ {0x0000a0bc, 0x02050500},
+ {0x0000a0c0, 0x02050501},
+ {0x0000a0c4, 0x02050502},
+ {0x0000a0c8, 0x02050503},
+ {0x0000a0cc, 0x02050504},
+ {0x0000a0d0, 0x02050600},
+ {0x0000a0d4, 0x02050601},
+ {0x0000a0d8, 0x02050602},
+ {0x0000a0dc, 0x02050603},
+ {0x0000a0e0, 0x02050604},
+ {0x0000a0e4, 0x02050700},
+ {0x0000a0e8, 0x02050701},
+ {0x0000a0ec, 0x02050702},
+ {0x0000a0f0, 0x02050703},
+ {0x0000a0f4, 0x02050704},
+ {0x0000a0f8, 0x02050705},
+ {0x0000a0fc, 0x02050708},
+ {0x0000a100, 0x02050709},
+ {0x0000a104, 0x0205070a},
+ {0x0000a108, 0x0205070b},
+ {0x0000a10c, 0x0205070c},
+ {0x0000a110, 0x0205070d},
+ {0x0000a114, 0x02050710},
+ {0x0000a118, 0x02050711},
+ {0x0000a11c, 0x02050712},
+ {0x0000a120, 0x02050713},
+ {0x0000a124, 0x02050714},
+ {0x0000a128, 0x02050715},
+ {0x0000a12c, 0x02050730},
+ {0x0000a130, 0x02050731},
+ {0x0000a134, 0x02050732},
+ {0x0000a138, 0x02050733},
+ {0x0000a13c, 0x02050734},
+ {0x0000a140, 0x02050735},
+ {0x0000a144, 0x02050750},
+ {0x0000a148, 0x02050751},
+ {0x0000a14c, 0x02050752},
+ {0x0000a150, 0x02050753},
+ {0x0000a154, 0x02050754},
+ {0x0000a158, 0x02050755},
+ {0x0000a15c, 0x02050770},
+ {0x0000a160, 0x02050771},
+ {0x0000a164, 0x02050772},
+ {0x0000a168, 0x02050773},
+ {0x0000a16c, 0x02050774},
+ {0x0000a170, 0x02050775},
+ {0x0000a174, 0x00000776},
+ {0x0000a178, 0x00000776},
+ {0x0000a17c, 0x00000776},
+ {0x0000a180, 0x00000776},
+ {0x0000a184, 0x00000776},
+ {0x0000a188, 0x00000776},
+ {0x0000a18c, 0x00000776},
+ {0x0000a190, 0x00000776},
+ {0x0000a194, 0x00000776},
+ {0x0000a198, 0x00000776},
+ {0x0000a19c, 0x00000776},
+ {0x0000a1a0, 0x00000776},
+ {0x0000a1a4, 0x00000776},
+ {0x0000a1a8, 0x00000776},
+ {0x0000a1ac, 0x00000776},
+ {0x0000a1b0, 0x00000776},
+ {0x0000a1b4, 0x00000776},
+ {0x0000a1b8, 0x00000776},
+ {0x0000a1bc, 0x00000776},
+ {0x0000a1c0, 0x00000776},
+ {0x0000a1c4, 0x00000776},
+ {0x0000a1c8, 0x00000776},
+ {0x0000a1cc, 0x00000776},
+ {0x0000a1d0, 0x00000776},
+ {0x0000a1d4, 0x00000776},
+ {0x0000a1d8, 0x00000776},
+ {0x0000a1dc, 0x00000776},
+ {0x0000a1e0, 0x00000776},
+ {0x0000a1e4, 0x00000776},
+ {0x0000a1e8, 0x00000776},
+ {0x0000a1ec, 0x00000776},
+ {0x0000a1f0, 0x00000776},
+ {0x0000a1f4, 0x00000776},
+ {0x0000a1f8, 0x00000776},
+ {0x0000a1fc, 0x00000776},
+ {0x0000b000, 0x02000101},
+ {0x0000b004, 0x02000102},
+ {0x0000b008, 0x02000103},
+ {0x0000b00c, 0x02000104},
+ {0x0000b010, 0x02000200},
+ {0x0000b014, 0x02000201},
+ {0x0000b018, 0x02000202},
+ {0x0000b01c, 0x02000203},
+ {0x0000b020, 0x02000204},
+ {0x0000b024, 0x02000205},
+ {0x0000b028, 0x02000208},
+ {0x0000b02c, 0x02000302},
+ {0x0000b030, 0x02000303},
+ {0x0000b034, 0x02000304},
+ {0x0000b038, 0x02000400},
+ {0x0000b03c, 0x02010300},
+ {0x0000b040, 0x02010301},
+ {0x0000b044, 0x02010302},
+ {0x0000b048, 0x02000500},
+ {0x0000b04c, 0x02010400},
+ {0x0000b050, 0x02020300},
+ {0x0000b054, 0x02020301},
+ {0x0000b058, 0x02020302},
+ {0x0000b05c, 0x02020303},
+ {0x0000b060, 0x02020400},
+ {0x0000b064, 0x02030300},
+ {0x0000b068, 0x02030301},
+ {0x0000b06c, 0x02030302},
+ {0x0000b070, 0x02030303},
+ {0x0000b074, 0x02030400},
+ {0x0000b078, 0x02040300},
+ {0x0000b07c, 0x02040301},
+ {0x0000b080, 0x02040302},
+ {0x0000b084, 0x02040303},
+ {0x0000b088, 0x02030500},
+ {0x0000b08c, 0x02040400},
+ {0x0000b090, 0x02050203},
+ {0x0000b094, 0x02050204},
+ {0x0000b098, 0x02050205},
+ {0x0000b09c, 0x02040500},
+ {0x0000b0a0, 0x02050301},
+ {0x0000b0a4, 0x02050302},
+ {0x0000b0a8, 0x02050303},
+ {0x0000b0ac, 0x02050400},
+ {0x0000b0b0, 0x02050401},
+ {0x0000b0b4, 0x02050402},
+ {0x0000b0b8, 0x02050403},
+ {0x0000b0bc, 0x02050500},
+ {0x0000b0c0, 0x02050501},
+ {0x0000b0c4, 0x02050502},
+ {0x0000b0c8, 0x02050503},
+ {0x0000b0cc, 0x02050504},
+ {0x0000b0d0, 0x02050600},
+ {0x0000b0d4, 0x02050601},
+ {0x0000b0d8, 0x02050602},
+ {0x0000b0dc, 0x02050603},
+ {0x0000b0e0, 0x02050604},
+ {0x0000b0e4, 0x02050700},
+ {0x0000b0e8, 0x02050701},
+ {0x0000b0ec, 0x02050702},
+ {0x0000b0f0, 0x02050703},
+ {0x0000b0f4, 0x02050704},
+ {0x0000b0f8, 0x02050705},
+ {0x0000b0fc, 0x02050708},
+ {0x0000b100, 0x02050709},
+ {0x0000b104, 0x0205070a},
+ {0x0000b108, 0x0205070b},
+ {0x0000b10c, 0x0205070c},
+ {0x0000b110, 0x0205070d},
+ {0x0000b114, 0x02050710},
+ {0x0000b118, 0x02050711},
+ {0x0000b11c, 0x02050712},
+ {0x0000b120, 0x02050713},
+ {0x0000b124, 0x02050714},
+ {0x0000b128, 0x02050715},
+ {0x0000b12c, 0x02050730},
+ {0x0000b130, 0x02050731},
+ {0x0000b134, 0x02050732},
+ {0x0000b138, 0x02050733},
+ {0x0000b13c, 0x02050734},
+ {0x0000b140, 0x02050735},
+ {0x0000b144, 0x02050750},
+ {0x0000b148, 0x02050751},
+ {0x0000b14c, 0x02050752},
+ {0x0000b150, 0x02050753},
+ {0x0000b154, 0x02050754},
+ {0x0000b158, 0x02050755},
+ {0x0000b15c, 0x02050770},
+ {0x0000b160, 0x02050771},
+ {0x0000b164, 0x02050772},
+ {0x0000b168, 0x02050773},
+ {0x0000b16c, 0x02050774},
+ {0x0000b170, 0x02050775},
+ {0x0000b174, 0x00000776},
+ {0x0000b178, 0x00000776},
+ {0x0000b17c, 0x00000776},
+ {0x0000b180, 0x00000776},
+ {0x0000b184, 0x00000776},
+ {0x0000b188, 0x00000776},
+ {0x0000b18c, 0x00000776},
+ {0x0000b190, 0x00000776},
+ {0x0000b194, 0x00000776},
+ {0x0000b198, 0x00000776},
+ {0x0000b19c, 0x00000776},
+ {0x0000b1a0, 0x00000776},
+ {0x0000b1a4, 0x00000776},
+ {0x0000b1a8, 0x00000776},
+ {0x0000b1ac, 0x00000776},
+ {0x0000b1b0, 0x00000776},
+ {0x0000b1b4, 0x00000776},
+ {0x0000b1b8, 0x00000776},
+ {0x0000b1bc, 0x00000776},
+ {0x0000b1c0, 0x00000776},
+ {0x0000b1c4, 0x00000776},
+ {0x0000b1c8, 0x00000776},
+ {0x0000b1cc, 0x00000776},
+ {0x0000b1d0, 0x00000776},
+ {0x0000b1d4, 0x00000776},
+ {0x0000b1d8, 0x00000776},
+ {0x0000b1dc, 0x00000776},
+ {0x0000b1e0, 0x00000776},
+ {0x0000b1e4, 0x00000776},
+ {0x0000b1e8, 0x00000776},
+ {0x0000b1ec, 0x00000776},
+ {0x0000b1f0, 0x00000776},
+ {0x0000b1f4, 0x00000776},
+ {0x0000b1f8, 0x00000776},
+ {0x0000b1fc, 0x00000776},
+};
+
+static const u32 ar9300_2p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9300_2p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
+};
+
+static const u32 ar9200_merlin_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00007800, 0x00040000},
+ {0x00007804, 0xdb005012},
+ {0x00007808, 0x04924914},
+ {0x0000780c, 0x21084210},
+ {0x00007810, 0x6d801300},
+ {0x00007814, 0x0019beff},
+ {0x00007818, 0x07e41000},
+ {0x0000781c, 0x00392000},
+ {0x00007820, 0x92592480},
+ {0x00007824, 0x00040000},
+ {0x00007828, 0xdb005012},
+ {0x0000782c, 0x04924914},
+ {0x00007830, 0x21084210},
+ {0x00007834, 0x6d801300},
+ {0x00007838, 0x0019beff},
+ {0x0000783c, 0x07e40000},
+ {0x00007840, 0x00392000},
+ {0x00007844, 0x92592480},
+ {0x00007848, 0x00100000},
+ {0x0000784c, 0x773f0567},
+ {0x00007850, 0x54214514},
+ {0x00007854, 0x12035828},
+ {0x00007858, 0x92592692},
+ {0x0000785c, 0x00000000},
+ {0x00007860, 0x56400000},
+ {0x00007864, 0x0a8e370e},
+ {0x00007868, 0xc0102850},
+ {0x0000786c, 0x812d4000},
+ {0x00007870, 0x807ec400},
+ {0x00007874, 0x001b6db0},
+ {0x00007878, 0x00376b63},
+ {0x0000787c, 0x06db6db6},
+ {0x00007880, 0x006d8000},
+ {0x00007884, 0xffeffffe},
+ {0x00007888, 0xffeffffe},
+ {0x0000788c, 0x00010000},
+ {0x00007890, 0x02060aeb},
+ {0x00007894, 0x5a108000},
+};
+
+static const u32 ar9300_2p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+};
+
+static const u32 ar9300_2p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x52440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e3c, 0xcf946222},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0x00000000},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a22c, 0x01036a1e},
+ {0x0000a234, 0x10000fff},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000246},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00000000},
+ {0x0000a610, 0x00000000},
+ {0x0000a614, 0x00000000},
+ {0x0000a618, 0x00000000},
+ {0x0000a61c, 0x00000000},
+ {0x0000a620, 0x00000000},
+ {0x0000a624, 0x00000000},
+ {0x0000a628, 0x00000000},
+ {0x0000a62c, 0x00000000},
+ {0x0000a630, 0x00000000},
+ {0x0000a634, 0x00000000},
+ {0x0000a638, 0x00000000},
+ {0x0000a63c, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00000637},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2dc, 0x00000000},
+ {0x0000b2e0, 0x00000000},
+ {0x0000b2e4, 0x00000000},
+ {0x0000b2e8, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2dc, 0x00000000},
+ {0x0000c2e0, 0x00000000},
+ {0x0000c2e4, 0x00000000},
+ {0x0000c2e8, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016448, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016848, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+};
+
+static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Common_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300_2p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f424},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e848},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x98a00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9300Common_wo_xlna_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300_2p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
+ {0x00007008, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08212e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_enable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08253e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08213e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+#endif /* INITVALS_9003_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
new file mode 100644
index 0000000..37ba374
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -0,0 +1,614 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "hw.h"
+#include "ar9003_mac.h"
+
+static void ar9003_hw_rx_enable(struct ath_hw *hw)
+{
+ REG_WRITE(hw, AR_CR, 0);
+}
+
+static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
+{
+ int checksum;
+
+ checksum = ads->info + ads->link
+ + ads->data0 + ads->ctl3
+ + ads->data1 + ads->ctl5
+ + ads->data2 + ads->ctl7
+ + ads->data3 + ads->ctl9;
+
+ return ((checksum & 0xffff) + (checksum >> 16)) & AR_TxPtrChkSum;
+}
+
+static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
+{
+ struct ar9003_txc *ads = ds;
+
+ ads->link = ds_link;
+ ads->ctl10 &= ~AR_TxPtrChkSum;
+ ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
+}
+
+static void ar9003_hw_get_desc_link(void *ds, u32 **ds_link)
+{
+ struct ar9003_txc *ads = ds;
+
+ *ds_link = &ads->link;
+}
+
+static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ u32 sync_cause = 0;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON)
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause)
+ return false;
+
+ if (isr) {
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+
+ mask2 |= ((isr2 & AR_ISR_S2_TIM) >>
+ MAP_ISR_S2_TIM);
+ mask2 |= ((isr2 & AR_ISR_S2_DTIM) >>
+ MAP_ISR_S2_DTIM);
+ mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >>
+ MAP_ISR_S2_DTIMSYNC);
+ mask2 |= ((isr2 & AR_ISR_S2_CABEND) >>
+ MAP_ISR_S2_CABEND);
+ mask2 |= ((isr2 & AR_ISR_S2_GTT) <<
+ MAP_ISR_S2_GTT);
+ mask2 |= ((isr2 & AR_ISR_S2_CST) <<
+ MAP_ISR_S2_CST);
+ mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
+ MAP_ISR_S2_TSFOOR);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S2, isr2);
+ isr &= ~AR_ISR_BCNMISC;
+ }
+ }
+
+ if ((pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED))
+ isr = REG_READ(ah, AR_ISR_RAC);
+
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (ah->config.rx_intr_mitigation)
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
+ *masked |= ATH9K_INT_RXLP;
+
+ if (ah->config.tx_intr_mitigation)
+ if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM))
+ *masked |= ATH9K_INT_TX;
+
+ if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RXLP;
+
+ if (isr & AR_ISR_HP_RXOK)
+ *masked |= ATH9K_INT_RXHP;
+
+ if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
+ *masked |= ATH9K_INT_TX;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ u32 s0, s1;
+ s0 = REG_READ(ah, AR_ISR_S0);
+ REG_WRITE(ah, AR_ISR_S0, s0);
+ s1 = REG_READ(ah, AR_ISR_S1);
+ REG_WRITE(ah, AR_ISR_S1, s1);
+
+ isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR |
+ AR_ISR_TXEOL);
+ }
+ }
+
+ if (isr & AR_ISR_GENTMR) {
+ u32 s5;
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+ s5 = REG_READ(ah, AR_ISR_S5_S);
+ else
+ s5 = REG_READ(ah, AR_ISR_S5);
+
+ ah->intr_gen_timer_trigger =
+ MS(s5, AR_ISR_S5_GENTIMER_TRIG);
+
+ ah->intr_gen_timer_thresh =
+ MS(s5, AR_ISR_S5_GENTIMER_THRESH);
+
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S5, s5);
+ isr &= ~AR_ISR_GENTMR;
+ }
+
+ }
+
+ *masked |= mask2;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR, isr);
+
+ (void) REG_READ(ah, AR_ISR);
+ }
+ }
+
+ if (sync_cause) {
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+
+ }
+ return true;
+}
+
+static void ar9003_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ unsigned int descid = 0;
+
+ ads->info = (ATHEROS_VENDOR_ID << AR_DescId_S) |
+ (1 << AR_TxRxDesc_S) |
+ (1 << AR_CtrlStat_S) |
+ (qcu << AR_TxQcuNum_S) | 0x17;
+
+ ads->data0 = buf_addr;
+ ads->data1 = 0;
+ ads->data2 = 0;
+ ads->data3 = 0;
+
+ ads->ctl3 = (seglen << AR_BufLen_S);
+ ads->ctl3 &= AR_BufLen;
+
+ /* Fill in pointer checksum and descriptor id */
+ ads->ctl10 = ar9003_calc_ptr_chksum(ads);
+ ads->ctl10 |= (descid << AR_TxDescId_S);
+
+ if (is_firstseg) {
+ ads->ctl12 |= (is_lastseg ? 0 : AR_TxMore);
+ } else if (is_lastseg) {
+ ads->ctl11 = 0;
+ ads->ctl12 = 0;
+ ads->ctl13 = AR9003TXC_CONST(ds0)->ctl13;
+ ads->ctl14 = AR9003TXC_CONST(ds0)->ctl14;
+ } else {
+ /* XXX Intermediate descriptor in a multi-descriptor frame.*/
+ ads->ctl11 = 0;
+ ads->ctl12 = AR_TxMore;
+ ads->ctl13 = 0;
+ ads->ctl14 = 0;
+ }
+}
+
+static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ struct ar9003_txs *ads;
+
+ ads = &ah->ts_ring[ah->ts_tail];
+
+ if ((ads->status8 & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
+
+ if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
+ (MS(ads->ds_info, AR_TxRxDesc) != 1)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "Tx Descriptor error %x\n", ads->ds_info);
+ memset(ads, 0, sizeof(*ads));
+ return -EIO;
+ }
+
+ ts->qid = MS(ads->ds_info, AR_TxQcuNum);
+ ts->desc_id = MS(ads->status1, AR_TxDescId);
+ ts->ts_seqnum = MS(ads->status8, AR_SeqNum);
+ ts->ts_tstamp = ads->status4;
+ ts->ts_status = 0;
+ ts->ts_flags = 0;
+
+ if (ads->status3 & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (ads->status3 & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (ads->status3 & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status8 & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ if (ads->status3 & AR_TxTimerExpired)
+ ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+
+ if (ads->status3 & AR_DescCfgErr)
+ ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (ads->status3 & AR_TxDataUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status3 & AR_TxDelimUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status2 & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->status5;
+ ts->ba_high = ads->status6;
+ }
+
+ ts->ts_rateindex = MS(ads->status8, AR_FinalTxIdx);
+
+ ts->ts_rssi = MS(ads->status7, AR_TxRSSICombined);
+ ts->ts_rssi_ctl0 = MS(ads->status2, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(ads->status2, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(ads->status2, AR_TxRSSIAnt02);
+ ts->ts_rssi_ext0 = MS(ads->status7, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(ads->status7, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(ads->status7, AR_TxRSSIAnt12);
+ ts->ts_shortretry = MS(ads->status3, AR_RTSFailCnt);
+ ts->ts_longretry = MS(ads->status3, AR_DataFailCnt);
+ ts->ts_virtcol = MS(ads->status3, AR_VirtRetryCnt);
+ ts->ts_antenna = 0;
+
+ ts->tid = MS(ads->status8, AR_TxTid);
+
+ memset(ads, 0, sizeof(*ads));
+
+ return 0;
+}
+
+static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
+ u32 keyIx, enum ath9k_key_type keyType, u32 flags)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ if (txpower > ah->txpower_limit)
+ txpower = ah->txpower_limit;
+
+ txpower += ah->txpower_indexoffset;
+ if (txpower > 63)
+ txpower = 63;
+
+ ads->ctl11 = (pktlen & AR_FrameLen)
+ | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(txpower, AR_XmitPower)
+ | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
+ | (flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0);
+
+ ads->ctl12 =
+ (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
+ | SM(type, AR_FrameType)
+ | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ads->ctl17 = SM(keyType, AR_EncrType) |
+ (flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
+ ads->ctl18 = 0;
+ ads->ctl19 = AR_Not_Sounding;
+
+ ads->ctl20 = 0;
+ ads->ctl21 = 0;
+ ads->ctl22 = 0;
+}
+
+static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ struct ar9003_txc *last_ads = (struct ar9003_txc *) lastds;
+ u_int32_t ctl11;
+
+ if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
+ ctl11 = ads->ctl11;
+
+ if (flags & ATH9K_TXDESC_RTSENA) {
+ ctl11 &= ~AR_CTSEnable;
+ ctl11 |= AR_RTSEnable;
+ } else {
+ ctl11 &= ~AR_RTSEnable;
+ ctl11 |= AR_CTSEnable;
+ }
+
+ ads->ctl11 = ctl11;
+ } else {
+ ads->ctl11 = (ads->ctl11 & ~(AR_RTSEnable | AR_CTSEnable));
+ }
+
+ ads->ctl13 = set11nTries(series, 0)
+ | set11nTries(series, 1)
+ | set11nTries(series, 2)
+ | set11nTries(series, 3)
+ | (durUpdateEn ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ads->ctl14 = set11nRate(series, 0)
+ | set11nRate(series, 1)
+ | set11nRate(series, 2)
+ | set11nRate(series, 3);
+
+ ads->ctl15 = set11nPktDurRTSCTS(series, 0)
+ | set11nPktDurRTSCTS(series, 1);
+
+ ads->ctl16 = set11nPktDurRTSCTS(series, 2)
+ | set11nPktDurRTSCTS(series, 3);
+
+ ads->ctl18 = set11nRateFlags(series, 0)
+ | set11nRateFlags(series, 1)
+ | set11nRateFlags(series, 2)
+ | set11nRateFlags(series, 3)
+ | SM(rtsctsRate, AR_RTSCTSRate);
+ ads->ctl19 = AR_Not_Sounding;
+
+ last_ads->ctl13 = ads->ctl13;
+ last_ads->ctl14 = ads->ctl14;
+}
+
+static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
+
+ ads->ctl17 &= ~AR_AggrLen;
+ ads->ctl17 |= SM(aggrLen, AR_AggrLen);
+}
+
+static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ unsigned int ctl17;
+
+ ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
+
+ /*
+ * We use a stack variable to manipulate ctl6 to reduce uncached
+ * read modify, modfiy, write.
+ */
+ ctl17 = ads->ctl17;
+ ctl17 &= ~AR_PadDelim;
+ ctl17 |= SM(numDelims, AR_PadDelim);
+ ads->ctl17 = ctl17;
+}
+
+static void ar9003_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 |= AR_IsAggr;
+ ads->ctl12 &= ~AR_MoreAggr;
+ ads->ctl17 &= ~AR_PadDelim;
+}
+
+static void ar9003_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 &= (~AR_IsAggr & ~AR_MoreAggr);
+}
+
+static void ar9003_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl13 &= ~AR_BurstDur;
+ ads->ctl13 |= SM(burstDuration, AR_BurstDur);
+
+}
+
+static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ if (vmf)
+ ads->ctl11 |= AR_VirtMoreFrag;
+ else
+ ads->ctl11 &= ~AR_VirtMoreFrag;
+}
+
+void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
+{
+ struct ath_hw_ops *ops = ath9k_hw_ops(hw);
+
+ ops->rx_enable = ar9003_hw_rx_enable;
+ ops->set_desc_link = ar9003_hw_set_desc_link;
+ ops->get_desc_link = ar9003_hw_get_desc_link;
+ ops->get_isr = ar9003_hw_get_isr;
+ ops->fill_txdesc = ar9003_hw_fill_txdesc;
+ ops->proc_txdesc = ar9003_hw_proc_txdesc;
+ ops->set11n_txdesc = ar9003_hw_set11n_txdesc;
+ ops->set11n_ratescenario = ar9003_hw_set11n_ratescenario;
+ ops->set11n_aggr_first = ar9003_hw_set11n_aggr_first;
+ ops->set11n_aggr_middle = ar9003_hw_set11n_aggr_middle;
+ ops->set11n_aggr_last = ar9003_hw_set11n_aggr_last;
+ ops->clr11n_aggr = ar9003_hw_clr11n_aggr;
+ ops->set11n_burstduration = ar9003_hw_set11n_burstduration;
+ ops->set11n_virtualmorefrag = ar9003_hw_set11n_virtualmorefrag;
+}
+
+void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
+{
+ REG_WRITE(ah, AR_DATABUF_SIZE, buf_size & AR_DATABUF_SIZE_MASK);
+}
+EXPORT_SYMBOL(ath9k_hw_set_rx_bufsize);
+
+void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
+ enum ath9k_rx_qtype qtype)
+{
+ if (qtype == ATH9K_RX_QUEUE_HP)
+ REG_WRITE(ah, AR_HP_RXDP, rxdp);
+ else
+ REG_WRITE(ah, AR_LP_RXDP, rxdp);
+}
+EXPORT_SYMBOL(ath9k_hw_addrxbuf_edma);
+
+int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
+ void *buf_addr)
+{
+ struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
+ unsigned int phyerr;
+
+ /* TODO: byte swap on big endian for ar9300_10 */
+
+ if ((rxsp->status11 & AR_RxDone) == 0)
+ return -EINPROGRESS;
+
+ if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
+ return -EINVAL;
+
+ if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
+ return -EINPROGRESS;
+
+ if (!rxs)
+ return 0;
+
+ rxs->rs_status = 0;
+ rxs->rs_flags = 0;
+
+ rxs->rs_datalen = rxsp->status2 & AR_DataLen;
+ rxs->rs_tstamp = rxsp->status3;
+
+ /* XXX: Keycache */
+ rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
+ rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
+ rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
+ rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
+ rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
+ rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
+ rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
+
+ if (rxsp->status11 & AR_RxKeyIdxValid)
+ rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
+ else
+ rxs->rs_keyix = ATH9K_RXKEYIX_INVALID;
+
+ rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
+ rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
+
+ rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
+ rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
+ rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
+ rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0;
+ rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0;
+
+ rxs->evm0 = rxsp->status6;
+ rxs->evm1 = rxsp->status7;
+ rxs->evm2 = rxsp->status8;
+ rxs->evm3 = rxsp->status9;
+ rxs->evm4 = (rxsp->status10 & 0xffff);
+
+ if (rxsp->status11 & AR_PreDelimCRCErr)
+ rxs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+
+ if (rxsp->status11 & AR_PostDelimCRCErr)
+ rxs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+
+ if (rxsp->status11 & AR_DecryptBusyErr)
+ rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+
+ if ((rxsp->status11 & AR_RxFrameOK) == 0) {
+ if (rxsp->status11 & AR_CRCErr) {
+ rxs->rs_status |= ATH9K_RXERR_CRC;
+ } else if (rxsp->status11 & AR_PHYErr) {
+ rxs->rs_status |= ATH9K_RXERR_PHY;
+ phyerr = MS(rxsp->status11, AR_PHYErrCode);
+ rxs->rs_phyerr = phyerr;
+ } else if (rxsp->status11 & AR_DecryptCRCErr) {
+ rxs->rs_status |= ATH9K_RXERR_DECRYPT;
+ } else if (rxsp->status11 & AR_MichaelErr) {
+ rxs->rs_status |= ATH9K_RXERR_MIC;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
+
+void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
+{
+ ah->ts_tail = 0;
+
+ memset((void *) ah->ts_ring, 0,
+ ah->ts_size * sizeof(struct ar9003_txs));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "TS Start 0x%x End 0x%x Virt %p, Size %d\n",
+ ah->ts_paddr_start, ah->ts_paddr_end,
+ ah->ts_ring, ah->ts_size);
+
+ REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
+ REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
+}
+
+void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
+ u32 ts_paddr_start,
+ u8 size)
+{
+
+ ah->ts_paddr_start = ts_paddr_start;
+ ah->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9003_txs));
+ ah->ts_size = size;
+ ah->ts_ring = (struct ar9003_txs *) ts_start;
+
+ ath9k_hw_reset_txstatus_ring(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_setup_statusring);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
new file mode 100644
index 0000000..f17558b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MAC_H
+#define AR9003_MAC_H
+
+#define AR_DescId 0xffff0000
+#define AR_DescId_S 16
+#define AR_CtrlStat 0x00004000
+#define AR_CtrlStat_S 14
+#define AR_TxRxDesc 0x00008000
+#define AR_TxRxDesc_S 15
+#define AR_TxQcuNum 0x00000f00
+#define AR_TxQcuNum_S 8
+
+#define AR_BufLen 0x0fff0000
+#define AR_BufLen_S 16
+
+#define AR_TxDescId 0xffff0000
+#define AR_TxDescId_S 16
+#define AR_TxPtrChkSum 0x0000ffff
+
+#define AR_TxTid 0xf0000000
+#define AR_TxTid_S 28
+
+#define AR_LowRxChain 0x00004000
+
+#define AR_Not_Sounding 0x20000000
+
+#define MAP_ISR_S2_CST 6
+#define MAP_ISR_S2_GTT 6
+#define MAP_ISR_S2_TIM 3
+#define MAP_ISR_S2_CABEND 0
+#define MAP_ISR_S2_DTIMSYNC 7
+#define MAP_ISR_S2_DTIM 7
+#define MAP_ISR_S2_TSFOOR 4
+
+#define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds)
+
+struct ar9003_rxs {
+ u32 ds_info;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+ u32 status9;
+ u32 status10;
+ u32 status11;
+} __packed;
+
+/* Transmit Control Descriptor */
+struct ar9003_txc {
+ u32 info; /* descriptor information */
+ u32 link; /* link pointer */
+ u32 data0; /* data pointer to 1st buffer */
+ u32 ctl3; /* DMA control 3 */
+ u32 data1; /* data pointer to 2nd buffer */
+ u32 ctl5; /* DMA control 5 */
+ u32 data2; /* data pointer to 3rd buffer */
+ u32 ctl7; /* DMA control 7 */
+ u32 data3; /* data pointer to 4th buffer */
+ u32 ctl9; /* DMA control 9 */
+ u32 ctl10; /* DMA control 10 */
+ u32 ctl11; /* DMA control 11 */
+ u32 ctl12; /* DMA control 12 */
+ u32 ctl13; /* DMA control 13 */
+ u32 ctl14; /* DMA control 14 */
+ u32 ctl15; /* DMA control 15 */
+ u32 ctl16; /* DMA control 16 */
+ u32 ctl17; /* DMA control 17 */
+ u32 ctl18; /* DMA control 18 */
+ u32 ctl19; /* DMA control 19 */
+ u32 ctl20; /* DMA control 20 */
+ u32 ctl21; /* DMA control 21 */
+ u32 ctl22; /* DMA control 22 */
+ u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
+} __packed;
+
+struct ar9003_txs {
+ u32 ds_info;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+} __packed;
+
+void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
+void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
+void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
+ enum ath9k_rx_qtype qtype);
+
+int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah,
+ struct ath_rx_status *rxs,
+ void *buf_addr);
+void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
+void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
+ u32 ts_paddr_start,
+ u8 size);
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
new file mode 100644
index 0000000..80431a2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -0,0 +1,1134 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+
+/**
+ * ar9003_hw_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * all devices after ar9280.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ *
+ * For 5GHz channels which are 5MHz spaced,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ */
+static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode = 0, aModeRefSel = 0;
+ u32 freq, channelSel = 0, reg32 = 0;
+ struct chan_centers centers;
+ int loadSynthChannel;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ channelSel = CHANSEL_2G(freq);
+ /* Set to 2G mode */
+ bMode = 1;
+ } else {
+ channelSel = CHANSEL_5G(freq);
+ /* Doubler is ON, so, divide channelSel by 2. */
+ channelSel >>= 1;
+ /* Set to 5G mode */
+ bMode = 0;
+ }
+
+ /* Enable fractional mode for all channels */
+ fracMode = 1;
+ aModeRefSel = 0;
+ loadSynthChannel = 0;
+
+ reg32 = (bMode << 29);
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ /* Enable Long shift Select for Synthesizer */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH4,
+ AR_PHY_SYNTH4_LONG_SHIFT_SELECT, 1);
+
+ /* Program Synth. setting */
+ reg32 = (channelSel << 2) | (fracMode << 30) |
+ (aModeRefSel << 28) | (loadSynthChannel << 31);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
+
+ /* Toggle Load Synth channel bit */
+ loadSynthChannel = 1;
+ reg32 = (channelSel << 2) | (fracMode << 30) |
+ (aModeRefSel << 28) | (loadSynthChannel << 31);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar9003_hw_spur_mitigate - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ *
+ * Spur mitigation for MRC CCK
+ */
+static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
+ int cur_bb_spur, negative = 0, cck_spur_freq;
+ int i;
+
+ /*
+ * Need to verify range +/- 10 MHz in control channel, otherwise spur
+ * is out-of-band and can be ignored.
+ */
+
+ for (i = 0; i < 4; i++) {
+ negative = 0;
+ cur_bb_spur = spur_freq[i] - chan->channel;
+
+ if (cur_bb_spur < 0) {
+ negative = 1;
+ cur_bb_spur = -cur_bb_spur;
+ }
+ if (cur_bb_spur < 10) {
+ cck_spur_freq = (int)((cur_bb_spur << 19) / 11);
+
+ if (negative == 1)
+ cck_spur_freq = -cck_spur_freq;
+
+ cck_spur_freq = cck_spur_freq & 0xfffff;
+
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE,
+ 0x2);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT,
+ 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ,
+ cck_spur_freq);
+
+ return;
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ, 0x0);
+}
+
+/* Clean all spur register fields */
+static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
+{
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_FREQ_SD, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 0);
+
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0);
+}
+
+static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
+ int freq_offset,
+ int spur_freq_sd,
+ int spur_delta_phase,
+ int spur_subchannel_sd)
+{
+ int mask_index = 0;
+
+ /* OFDM Spur mitigation */
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_FREQ_SD, spur_freq_sd);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE, spur_delta_phase);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_SPUR_RSSI_THRESH, 34);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1);
+
+ if (REG_READ_FIELD(ah, AR_PHY_MODE,
+ AR_PHY_MODE_DYNAMIC) == 0x1)
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1);
+
+ mask_index = (freq_offset << 4) / 5;
+ if (mask_index < 0)
+ mask_index = mask_index - 1;
+
+ mask_index = mask_index & 0x7f;
+
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
+}
+
+static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ int freq_offset)
+{
+ int spur_freq_sd = 0;
+ int spur_subchannel_sd = 0;
+ int spur_delta_phase = 0;
+
+ if (IS_CHAN_HT40(chan)) {
+ if (freq_offset < 0) {
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ spur_subchannel_sd = 1;
+ else
+ spur_subchannel_sd = 0;
+
+ spur_freq_sd = ((freq_offset + 10) << 9) / 11;
+
+ } else {
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ spur_subchannel_sd = 0;
+ else
+ spur_subchannel_sd = 1;
+
+ spur_freq_sd = ((freq_offset - 10) << 9) / 11;
+
+ }
+
+ spur_delta_phase = (freq_offset << 17) / 5;
+
+ } else {
+ spur_subchannel_sd = 0;
+ spur_freq_sd = (freq_offset << 9) /11;
+ spur_delta_phase = (freq_offset << 18) / 5;
+ }
+
+ spur_freq_sd = spur_freq_sd & 0x3ff;
+ spur_delta_phase = spur_delta_phase & 0xfffff;
+
+ ar9003_hw_spur_ofdm(ah,
+ freq_offset,
+ spur_freq_sd,
+ spur_delta_phase,
+ spur_subchannel_sd);
+}
+
+/* Spur mitigation for OFDM */
+static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int synth_freq;
+ int range = 10;
+ int freq_offset = 0;
+ int mode;
+ u8* spurChansPtr;
+ unsigned int i;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (IS_CHAN_5GHZ(chan)) {
+ spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
+ mode = 0;
+ }
+ else {
+ spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
+ mode = 1;
+ }
+
+ if (spurChansPtr[0] == 0)
+ return; /* No spur in the mode */
+
+ if (IS_CHAN_HT40(chan)) {
+ range = 19;
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ synth_freq = chan->channel - 10;
+ else
+ synth_freq = chan->channel + 10;
+ } else {
+ range = 10;
+ synth_freq = chan->channel;
+ }
+
+ ar9003_hw_spur_ofdm_clear(ah);
+
+ for (i = 0; spurChansPtr[i] && i < 5; i++) {
+ freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq;
+ if (abs(freq_offset) < range) {
+ ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
+ break;
+ }
+ }
+}
+
+static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
+ ar9003_hw_spur_mitigate_ofdm(ah, chan);
+}
+
+static u32 ar9003_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9300_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9300_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9300_PLL_CLKSEL);
+
+ pll |= SM(0x2c, AR_RTC_9300_PLL_DIV);
+
+ return pll;
+}
+
+static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 phymode;
+ u32 enableDacFifo = 0;
+
+ enableDacFifo =
+ (REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
+
+ /* Enable 11n HT, 20 MHz */
+ phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 | AR_PHY_GC_WALSH |
+ AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
+
+ /* Configure baseband for dynamic 20/40 operation */
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_GC_DYN2040_EN;
+ /* Configure control (primary) channel at +-10MHz */
+ if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
+ (chan->chanmode == CHANNEL_G_HT40PLUS))
+ phymode |= AR_PHY_GC_DYN2040_PRI_CH;
+
+ }
+
+ /* make sure we preserve INI settings */
+ phymode |= REG_READ(ah, AR_PHY_GEN_CTRL);
+ /* turn off Green Field detection for STA for now */
+ phymode &= ~AR_PHY_GC_GF_DETECT_EN;
+
+ REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
+
+ /* Configure MAC for 20/40 operation */
+ ath9k_hw_set11nmac2040(ah);
+
+ /* global transmit timeout (25 TUs default)*/
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ /* carrier sense timeout */
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+}
+
+static void ar9003_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ /*
+ * Wait for the frequency synth to settle (synth goes on
+ * via AR_PHY_ACTIVE_EN). Read the phy active delay register.
+ * Value is in 100ns increments.
+ */
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ /* Activate the PHY (includes baseband activate + synthesizer on) */
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ /*
+ * There is an issue if the AP starts the calibration before
+ * the base band timeout completes. This could result in the
+ * rx_clear false triggering. As a workaround we add delay an
+ * extra BASE_ACTIVATE_DELAY usecs to ensure this condition
+ * does not happen.
+ */
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+}
+
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+{
+ switch (rx) {
+ case 0x5:
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ case 0x3:
+ case 0x1:
+ case 0x2:
+ case 0x7:
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+ if (tx == 0x5) {
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ }
+}
+
+/*
+ * Override INI values with chip specific configuration.
+ */
+static void ar9003_hw_override_ini(struct ath_hw *ah)
+{
+ u32 val;
+
+ /*
+ * Set the RX_ABORT and RX_DIS and clear it only after
+ * RXE is set for MAC. This prevents frames with
+ * corrupted descriptor status.
+ */
+ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ /*
+ * For AR9280 and above, there is a new feature that allows
+ * Multicast search based on both MAC Address and Key ID. By default,
+ * this feature is enabled. But since the driver is not using this
+ * feature, we switch it off; otherwise multicast search based on
+ * MAC addr only will fail.
+ */
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
+ REG_WRITE(ah, AR_PCU_MISC_MODE2,
+ val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
+}
+
+static void ar9003_hw_prog_ini(struct ath_hw *ah,
+ struct ar5416IniArray *iniArr,
+ int column)
+{
+ unsigned int i, regWrites = 0;
+
+ /* New INI format: Array may be undefined (pre, core, post arrays) */
+ if (!iniArr->ia_array)
+ return;
+
+ /*
+ * New INI format: Pre, core, and post arrays for a given subsystem
+ * may be modal (> 2 columns) or non-modal (2 columns). Determine if
+ * the array is non-modal and force the column to 1.
+ */
+ if (column >= iniArr->ia_columns)
+ column = 1;
+
+ for (i = 0; i < iniArr->ia_rows; i++) {
+ u32 reg = INI_RA(iniArr, i, 0);
+ u32 val = INI_RA(iniArr, i, column);
+
+ REG_WRITE(ah, reg, val);
+
+ /*
+ * Determine if this is a shift register value, and insert the
+ * configured delay if so.
+ */
+ if (reg >= 0x16000 && reg < 0x17000
+ && ah->config.analog_shiftreg)
+ udelay(100);
+
+ DO_DELAY(regWrites);
+ }
+}
+
+static int ar9003_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ unsigned int regWrites = 0, i;
+ struct ieee80211_channel *channel = chan->chan;
+ u32 modesIndex, freqIndex;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ modesIndex = 1;
+ freqIndex = 1;
+ break;
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ modesIndex = 2;
+ freqIndex = 1;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ modesIndex = 4;
+ freqIndex = 2;
+ break;
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ modesIndex = 3;
+ freqIndex = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
+ ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
+ }
+
+ REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+
+ /*
+ * For 5GHz channels requiring Fast Clock, apply
+ * different modal values.
+ */
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ REG_WRITE_ARRAY(&ah->iniModesAdditional,
+ modesIndex, regWrites);
+
+ ar9003_hw_override_ini(ah);
+ ar9003_hw_set_channel_regs(ah, chan);
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Set TX power */
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(regulatory, chan),
+ channel->max_antenna_gain * 2,
+ channel->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) regulatory->power_limit));
+
+ return 0;
+}
+
+static void ar9003_hw_set_rfmode(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
+ ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static void ar9003_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+static void ar9003_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ /*
+ * half and quarter rate can divide the scaled clock by 2 or 4
+ * scale for selected channel bandwidth
+ */
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ /*
+ * ALGO -> coef = 1e8/fcarrier*fclock/40;
+ * scaled coef to provide precision for this floating calculation
+ */
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ /*
+ * For Short GI,
+ * scaled coeff is 9/10 that of normal coeff
+ */
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ /* for short gi */
+ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
+ AR_PHY_SGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
+ AR_PHY_SGI_DSC_EXP, ds_coef_exp);
+}
+
+static bool ar9003_hw_rfbus_req(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
+}
+
+/*
+ * Wait for the frequency synth to settle (synth goes on via PHY_ACTIVE_EN).
+ * Read the phy active delay register. Value is in 100ns increments.
+ */
+static void ar9003_hw_rfbus_done(struct ath_hw *ah)
+{
+ u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(ah->curchan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+}
+
+/*
+ * Set the interrupt and GPIO values so the ISR can disable RF
+ * on a switch signal. Assumes GPIO port and interrupt polarity
+ * are set prior to call.
+ */
+static void ar9003_hw_enable_rfkill(struct ath_hw *ah)
+{
+ /* Connect rfsilent_bb_l to baseband */
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+ /* Set input mux for rfsilent_bb_l to GPIO #0 */
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+ AR_GPIO_INPUT_MUX2_RFSILENT);
+
+ /*
+ * Configure the desired GPIO port for input and
+ * enable baseband rf silence.
+ */
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+}
+
+static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value)
+{
+ u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
+ if (value)
+ v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ else
+ v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
+}
+
+static bool ar9003_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
+ return false;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_TOT_DES,
+ ah->totalSizeDesired[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC,
+ AR_PHY_AGC_COARSE_LOW,
+ ah->coarse_low[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC,
+ AR_PHY_AGC_COARSE_HIGH,
+ ah->coarse_high[level]);
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]);
+
+ if (level > aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_niup++;
+ else if (level < aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_nidown++;
+ aniState->noiseImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ const int m1ThreshLow[] = { 127, 50 };
+ const int m2ThreshLow[] = { 127, 40 };
+ const int m1Thresh[] = { 127, 0x4d };
+ const int m2Thresh[] = { 127, 0x40 };
+ const int m2CountThr[] = { 31, 16 };
+ const int m2CountThrLow[] = { 63, 48 };
+ u32 on = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow[on]);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
+ const int weakSigThrCck[] = { 8, 6 };
+ u32 high = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
+ weakSigThrCck[high]);
+ if (high != aniState->cckWeakSigThreshold) {
+ if (high)
+ ah->stats.ast_ani_cckhigh++;
+ else
+ ah->stats.ast_ani_ccklow++;
+ aniState->cckWeakSigThreshold = high;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ const int firstep[] = { 0, 4, 8 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(firstep));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ firstep[level]);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(cycpwrThr1));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ cycpwrThr1[level]);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ ath_print(common, ATH_DBG_ANI,
+ "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
+ ath_print(common, ATH_DBG_ANI,
+ "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
+ "ofdmWeakSigDetectOff=%d\n",
+ aniState->noiseImmunityLevel,
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff);
+ ath_print(common, ATH_DBG_ANI,
+ "cckWeakSigThreshold=%d, "
+ "firstepLevel=%d, listenTime=%d\n",
+ aniState->cckWeakSigThreshold,
+ aniState->firstepLevel,
+ aniState->listenTime);
+ ath_print(common, ATH_DBG_ANI,
+ "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ aniState->cycleCount,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+
+ return true;
+}
+
+static void ar9003_hw_nf_sanitize_2g(struct ath_hw *ah, s16 *nf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (*nf > ah->nf_2g_max) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "2 GHz NF (%d) > MAX (%d), "
+ "correcting to MAX",
+ *nf, ah->nf_2g_max);
+ *nf = ah->nf_2g_max;
+ } else if (*nf < ah->nf_2g_min) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "2 GHz NF (%d) < MIN (%d), "
+ "correcting to MIN",
+ *nf, ah->nf_2g_min);
+ *nf = ah->nf_2g_min;
+ }
+}
+
+static void ar9003_hw_nf_sanitize_5g(struct ath_hw *ah, s16 *nf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (*nf > ah->nf_5g_max) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "5 GHz NF (%d) > MAX (%d), "
+ "correcting to MAX",
+ *nf, ah->nf_5g_max);
+ *nf = ah->nf_5g_max;
+ } else if (*nf < ah->nf_5g_min) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "5 GHz NF (%d) < MIN (%d), "
+ "correcting to MIN",
+ *nf, ah->nf_5g_min);
+ *nf = ah->nf_5g_min;
+ }
+}
+
+static void ar9003_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
+{
+ if (IS_CHAN_2GHZ(ah->curchan))
+ ar9003_hw_nf_sanitize_2g(ah, nf);
+ else
+ ar9003_hw_nf_sanitize_5g(ah, nf);
+}
+
+static void ar9003_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 2] is %d\n", nf);
+ nfarray[2] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+ nfarray[3] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 2] is %d\n", nf);
+ nfarray[5] = nf;
+}
+
+void ar9003_hw_set_nf_limits(struct ath_hw *ah)
+{
+ ah->nf_2g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
+ ah->nf_2g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
+ ah->nf_5g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
+ ah->nf_5g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
+}
+
+/*
+ * Find out which of the RX chains are enabled
+ */
+static u32 ar9003_hw_get_rx_chainmask(struct ath_hw *ah)
+{
+ u32 chain = REG_READ(ah, AR_PHY_RX_CHAINMASK);
+ /*
+ * The bits [2:0] indicate the rx chain mask and are to be
+ * interpreted as follows:
+ * 00x => Only chain 0 is enabled
+ * 01x => Chain 1 and 0 enabled
+ * 1xx => Chain 2,1 and 0 enabled
+ */
+ return chain & 0x7;
+}
+
+static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h;
+ unsigned i, j;
+ int32_t val;
+ const u32 ar9300_cca_regs[6] = {
+ AR_PHY_CCA_0,
+ AR_PHY_CCA_1,
+ AR_PHY_CCA_2,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CCA_1,
+ AR_PHY_EXT_CCA_2,
+ };
+ u8 chainmask, rx_chain_status;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ rx_chain_status = ar9003_hw_get_rx_chainmask(ah);
+
+ chainmask = 0x3F;
+ h = ah->nfCalHist;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar9300_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+ REG_WRITE(ah, ar9300_cca_regs[i], val);
+ }
+ }
+
+ /*
+ * Load software filtered NF value into baseband internal minCCApwr
+ * variable.
+ */
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+
+ /*
+ * Wait for load to complete, should be fast, a few 10s of us.
+ * The max delay was changed from an original 250us to 10000us
+ * since 250us often results in NF load timeout and causes deaf
+ * condition during stress testing 12/12/2009
+ */
+ for (j = 0; j < 1000; j++) {
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_NF) == 0)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * We timed out waiting for the noisefloor to load, probably due to an
+ * in-progress rx. Simply return here and allow the load plenty of time
+ * to complete before the next calibration interval. We need to avoid
+ * trying to load -50 (which happens below) while the previous load is
+ * still in progress as this can cause rx deafness. Instead by returning
+ * here, the baseband nf cal will just be capped by our present
+ * noisefloor until the next calibration timer.
+ */
+ if (j == 1000) {
+ ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf "
+ "to load: AR_PHY_AGC_CONTROL=0x%x\n",
+ REG_READ(ah, AR_PHY_AGC_CONTROL));
+ return;
+ }
+
+ /*
+ * Restore maxCCAPower register parameter again so that we're not capped
+ * by the median we just loaded. This will be initial (and max) value
+ * of next noise floor calibration the baseband does.
+ */
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar9300_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (-50) << 1) & 0x1ff);
+ REG_WRITE(ah, ar9300_cca_regs[i], val);
+ }
+ }
+}
+
+void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->rf_set_freq = ar9003_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
+ priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
+ priv_ops->set_channel_regs = ar9003_hw_set_channel_regs;
+ priv_ops->init_bb = ar9003_hw_init_bb;
+ priv_ops->process_ini = ar9003_hw_process_ini;
+ priv_ops->set_rfmode = ar9003_hw_set_rfmode;
+ priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive;
+ priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
+ priv_ops->rfbus_req = ar9003_hw_rfbus_req;
+ priv_ops->rfbus_done = ar9003_hw_rfbus_done;
+ priv_ops->enable_rfkill = ar9003_hw_enable_rfkill;
+ priv_ops->set_diversity = ar9003_hw_set_diversity;
+ priv_ops->ani_control = ar9003_hw_ani_control;
+ priv_ops->do_getnf = ar9003_hw_do_getnf;
+ priv_ops->loadnf = ar9003_hw_loadnf;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
new file mode 100644
index 0000000..f08cc8b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -0,0 +1,847 @@
+/*
+ * Copyright (c) 2002-2010 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_PHY_H
+#define AR9003_PHY_H
+
+/*
+ * Channel Register Map
+ */
+#define AR_CHAN_BASE 0x9800
+
+#define AR_PHY_TIMING1 (AR_CHAN_BASE + 0x0)
+#define AR_PHY_TIMING2 (AR_CHAN_BASE + 0x4)
+#define AR_PHY_TIMING3 (AR_CHAN_BASE + 0x8)
+#define AR_PHY_TIMING4 (AR_CHAN_BASE + 0xc)
+#define AR_PHY_TIMING5 (AR_CHAN_BASE + 0x10)
+#define AR_PHY_TIMING6 (AR_CHAN_BASE + 0x14)
+#define AR_PHY_TIMING11 (AR_CHAN_BASE + 0x18)
+#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c)
+#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc)
+#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0)
+
+#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
+#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
+
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC_S 30
+
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR 0x80000000
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR_S 31
+
+#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT 0x4000000
+#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT_S 26
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000 /* bins move with freq offset */
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM_S 17
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x000000FF
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI 0x00000100
+#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI_S 8
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL 0x03FC0000
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN 0x20000000
+#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN_S 29
+
+#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN 0x80000000
+#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN_S 31
+
+#define AR_PHY_FIND_SIG_LOW (AR_CHAN_BASE + 0x20)
+
+#define AR_PHY_SFCORR (AR_CHAN_BASE + 0x24)
+#define AR_PHY_SFCORR_LOW (AR_CHAN_BASE + 0x28)
+#define AR_PHY_SFCORR_EXT (AR_CHAN_BASE + 0x2c)
+
+#define AR_PHY_EXT_CCA (AR_CHAN_BASE + 0x30)
+#define AR_PHY_RADAR_0 (AR_CHAN_BASE + 0x34)
+#define AR_PHY_RADAR_1 (AR_CHAN_BASE + 0x38)
+#define AR_PHY_RADAR_EXT (AR_CHAN_BASE + 0x3c)
+#define AR_PHY_MULTICHAIN_CTRL (AR_CHAN_BASE + 0x80)
+#define AR_PHY_PERCHAIN_CSD (AR_CHAN_BASE + 0x84)
+
+#define AR_PHY_TX_PHASE_RAMP_0 (AR_CHAN_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_0 (AR_CHAN_BASE + 0xd4)
+#define AR_PHY_IQ_ADC_MEAS_0_B0 (AR_CHAN_BASE + 0xc0)
+#define AR_PHY_IQ_ADC_MEAS_1_B0 (AR_CHAN_BASE + 0xc4)
+#define AR_PHY_IQ_ADC_MEAS_2_B0 (AR_CHAN_BASE + 0xc8)
+#define AR_PHY_IQ_ADC_MEAS_3_B0 (AR_CHAN_BASE + 0xcc)
+
+/* The following registers changed position from AR9300 1.0 to AR9300 2.0 */
+#define AR_PHY_TX_PHASE_RAMP_0_9300_10 (AR_CHAN_BASE + 0xd0 - 0x10)
+#define AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 (AR_CHAN_BASE + 0xd4 - 0x10)
+#define AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 (AR_CHAN_BASE + 0xc0 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 (AR_CHAN_BASE + 0xc4 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 (AR_CHAN_BASE + 0xc8 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 (AR_CHAN_BASE + 0xcc + 0x8)
+
+#define AR_PHY_TX_CRC (AR_CHAN_BASE + 0xa0)
+#define AR_PHY_TST_DAC_CONST (AR_CHAN_BASE + 0xa4)
+#define AR_PHY_SPUR_REPORT_0 (AR_CHAN_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_0 (AR_CHAN_BASE + 0x300)
+
+/*
+ * Channel Field Definitions
+ */
+#define AR_PHY_TIMING2_USE_FORCE_PPM 0x00001000
+#define AR_PHY_TIMING2_FORCE_PPM_VAL 0x00000fff
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING4_ENABLE_PILOT_MASK 0x10000000
+#define AR_PHY_TIMING4_ENABLE_PILOT_MASK_S 28
+#define AR_PHY_TIMING4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING4_ENABLE_CHAN_MASK_S 29
+
+#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER_S 30
+#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI_S 31
+
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD 0x10000000
+#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD_S 28
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_EXT_MINCCA_PWR_S 16
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001
+#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE_S 0
+#define AR_PHY_TIMING5_CYCPWR_THR1A 0x007F0000
+#define AR_PHY_TIMING5_CYCPWR_THR1A_S 16
+#define AR_PHY_TIMING5_RSSI_THR1A (0x7F << 16)
+#define AR_PHY_TIMING5_RSSI_THR1A_S 16
+#define AR_PHY_TIMING5_RSSI_THR1A_ENA (0x1 << 15)
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+#define AR_PHY_RADAR_DC_PWR_THRESH 0x007f8000
+#define AR_PHY_RADAR_DC_PWR_THRESH_S 15
+#define AR_PHY_RADAR_LB_DC_CAP 0x7f800000
+#define AR_PHY_RADAR_LB_DC_CAP_S 23
+#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW (0x3f << 6)
+#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW_S 6
+#define AR_PHY_FIND_SIG_LOW_FIRPWR (0x7f << 12)
+#define AR_PHY_FIND_SIG_LOW_FIRPWR_S 12
+#define AR_PHY_FIND_SIG_LOW_FIRPWR_SIGN_BIT 19
+#define AR_PHY_FIND_SIG_LOW_RELSTEP 0x1f
+#define AR_PHY_FIND_SIG_LOW_RELSTEP_S 0
+#define AR_PHY_FIND_SIG_LOW_RELSTEP_SIGN_BIT 5
+#define AR_PHY_CHAN_INFO_TAB_S2_READ 0x00000008
+#define AR_PHY_CHAN_INFO_TAB_S2_READ_S 3
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF 0x0000007F
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF 0x00003F80
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF_S 7
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE 0x00004000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF 0x003f8000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF_S 15
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF 0x1fc00000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF_S 22
+
+/*
+ * MRC Register Map
+ */
+#define AR_MRC_BASE 0x9c00
+
+#define AR_PHY_TIMING_3A (AR_MRC_BASE + 0x0)
+#define AR_PHY_LDPC_CNTL1 (AR_MRC_BASE + 0x4)
+#define AR_PHY_LDPC_CNTL2 (AR_MRC_BASE + 0x8)
+#define AR_PHY_PILOT_SPUR_MASK (AR_MRC_BASE + 0xc)
+#define AR_PHY_CHAN_SPUR_MASK (AR_MRC_BASE + 0x10)
+#define AR_PHY_SGI_DELTA (AR_MRC_BASE + 0x14)
+#define AR_PHY_ML_CNTL_1 (AR_MRC_BASE + 0x18)
+#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c)
+#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20)
+
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
+
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0
+
+/*
+ * MRC Feild Definitions
+ */
+#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_SGI_DSC_MAN_S 4
+#define AR_PHY_SGI_DSC_EXP 0x0000000F
+#define AR_PHY_SGI_DSC_EXP_S 0
+/*
+ * BBB Register Map
+ */
+#define AR_BBB_BASE 0x9d00
+
+/*
+ * AGC Register Map
+ */
+#define AR_AGC_BASE 0x9e00
+
+#define AR_PHY_SETTLING (AR_AGC_BASE + 0x0)
+#define AR_PHY_FORCEMAX_GAINS_0 (AR_AGC_BASE + 0x4)
+#define AR_PHY_GAINS_MINOFF0 (AR_AGC_BASE + 0x8)
+#define AR_PHY_DESIRED_SZ (AR_AGC_BASE + 0xc)
+#define AR_PHY_FIND_SIG (AR_AGC_BASE + 0x10)
+#define AR_PHY_AGC (AR_AGC_BASE + 0x14)
+#define AR_PHY_EXT_ATTEN_CTL_0 (AR_AGC_BASE + 0x18)
+#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
+#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
+#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
+#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28)
+#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
+#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
+#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
+#define AR_PHY_RIFS_SRCH (AR_AGC_BASE + 0x38)
+#define AR_PHY_PEAK_DET_CTRL_1 (AR_AGC_BASE + 0x3c)
+#define AR_PHY_PEAK_DET_CTRL_2 (AR_AGC_BASE + 0x40)
+#define AR_PHY_RX_GAIN_BOUNDS_1 (AR_AGC_BASE + 0x44)
+#define AR_PHY_RX_GAIN_BOUNDS_2 (AR_AGC_BASE + 0x48)
+#define AR_PHY_RSSI_0 (AR_AGC_BASE + 0x180)
+#define AR_PHY_SPUR_CCK_REP0 (AR_AGC_BASE + 0x184)
+#define AR_PHY_CCK_DETECT (AR_AGC_BASE + 0x1c0)
+#define AR_PHY_DAG_CTRLCCK (AR_AGC_BASE + 0x1c4)
+#define AR_PHY_IQCORR_CTRL_CCK (AR_AGC_BASE + 0x1c8)
+
+#define AR_PHY_CCK_SPUR_MIT (AR_AGC_BASE + 0x1cc)
+#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR 0x000001fe
+#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR_S 1
+#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE 0x60000000
+#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE_S 29
+#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT 0x00000001
+#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_S 0
+#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00
+#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9
+
+#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
+
+#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
+#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
+
+/*
+ * AGC Field Definitions
+ */
+#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN_S 18
+#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN 0x00003C00
+#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN_S 10
+#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN 0x0000001F
+#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN_S 0
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN_S 17
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN_S 12
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB 0x00000FC0
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB_S 6
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB 0x0000003F
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB_S 0
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+#define AR_PHY_MINCCA_PWR 0x1FF00000
+#define AR_PHY_MINCCA_PWR_S 20
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR_S 9
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
+#define AR_PHY_AGC_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_COARSE_LOW_S 7
+#define AR_PHY_AGC_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_COARSE_HIGH_S 15
+#define AR_PHY_AGC_COARSE_PWR_CONST 0x0000007F
+#define AR_PHY_AGC_COARSE_PWR_CONST_S 0
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+#define AR_PHY_FIND_SIG_FIRPWR_SIGN_BIT 25
+#define AR_PHY_FIND_SIG_RELPWR (0x1f << 6)
+#define AR_PHY_FIND_SIG_RELPWR_S 6
+#define AR_PHY_FIND_SIG_RELPWR_SIGN_BIT 11
+#define AR_PHY_FIND_SIG_RELSTEP 0x1f
+#define AR_PHY_FIND_SIG_RELSTEP_S 0
+#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+#define AR_PHY_RESTART_ENA 0x01
+#define AR_PHY_DC_RESTART_DIS 0x40000000
+
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON 0xFF000000
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON_S 24
+#define AR_PHY_TPC_OLPC_GAIN_DELTA 0x00FF0000
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_S 16
+
+#define AR_PHY_TPC_6_ERROR_EST_MODE 0x03000000
+#define AR_PHY_TPC_6_ERROR_EST_MODE_S 24
+
+/*
+ * SM Register Map
+ */
+#define AR_SM_BASE 0xa200
+
+#define AR_PHY_D2_CHIP_ID (AR_SM_BASE + 0x0)
+#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
+#define AR_PHY_MODE (AR_SM_BASE + 0x8)
+#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
+#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + 0x20)
+#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
+#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
+#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
+#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
+#define AR_PHY_MAX_RX_LEN (AR_SM_BASE + 0x34)
+#define AR_PHY_FRAME_CTL (AR_SM_BASE + 0x38)
+#define AR_PHY_RFBUS_REQ (AR_SM_BASE + 0x3c)
+#define AR_PHY_RFBUS_GRANT (AR_SM_BASE + 0x40)
+#define AR_PHY_RIFS (AR_SM_BASE + 0x44)
+#define AR_PHY_RX_CLR_DELAY (AR_SM_BASE + 0x50)
+#define AR_PHY_RX_DELAY (AR_SM_BASE + 0x54)
+
+#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
+#define AR_PHY_MISC_PA_CTL (AR_SM_BASE + 0x80)
+#define AR_PHY_SWITCH_CHAIN_0 (AR_SM_BASE + 0x84)
+#define AR_PHY_SWITCH_COM (AR_SM_BASE + 0x88)
+#define AR_PHY_SWITCH_COM_2 (AR_SM_BASE + 0x8c)
+#define AR_PHY_RX_CHAINMASK (AR_SM_BASE + 0xa0)
+#define AR_PHY_CAL_CHAINMASK (AR_SM_BASE + 0xc0)
+#define AR_PHY_CALMODE (AR_SM_BASE + 0xc8)
+#define AR_PHY_FCAL_1 (AR_SM_BASE + 0xcc)
+#define AR_PHY_FCAL_2_0 (AR_SM_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_0 (AR_SM_BASE + 0xd4)
+#define AR_PHY_CL_CAL_CTL (AR_SM_BASE + 0xd8)
+#define AR_PHY_CL_TAB_0 (AR_SM_BASE + 0x100)
+#define AR_PHY_SYNTH_CONTROL (AR_SM_BASE + 0x140)
+#define AR_PHY_ADDAC_CLK_SEL (AR_SM_BASE + 0x144)
+#define AR_PHY_PLL_CTL (AR_SM_BASE + 0x148)
+#define AR_PHY_ANALOG_SWAP (AR_SM_BASE + 0x14c)
+#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150)
+#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158)
+
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
+
+#define AR_PHY_TEST (AR_SM_BASE + 0x160)
+
+#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
+#define AR_PHY_TEST_BBB_OBS_SEL_S 19
+
+#define AR_PHY_TEST_RX_OBS_SEL_BIT5_S 23
+#define AR_PHY_TEST_RX_OBS_SEL_BIT5 (1 << AR_PHY_TEST_RX_OBS_SEL_BIT5_S)
+
+#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
+#define AR_PHY_TEST_CHAIN_SEL_S 30
+
+#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + 0x164)
+#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
+#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
+#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
+#define AR_PHY_TEST_CTL_TX_OBS_SEL_S 2
+#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL 0x60
+#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL_S 5
+#define AR_PHY_TEST_CTL_TSTADC_EN 0x100
+#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
+#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
+#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
+
+
+#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
+
+#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
+#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
+#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
+#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
+#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
+#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180)
+#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
+#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
+
+#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
+#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
+
+#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
+#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
+
+#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
+#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
+#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
+#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
+#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
+#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
+
+#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
+
+#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280)
+
+#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
+#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
+#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450)
+
+#define AR_PHY_PANIC_WD_STATUS (AR_SM_BASE + 0x5c0)
+#define AR_PHY_PANIC_WD_CTL_1 (AR_SM_BASE + 0x5c4)
+#define AR_PHY_PANIC_WD_CTL_2 (AR_SM_BASE + 0x5c8)
+#define AR_PHY_BT_CTL (AR_SM_BASE + 0x5cc)
+#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0)
+#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4)
+#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc)
+#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
+
+#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S 1
+#define AR_PHY_65NM_CH0_SYNTH7 0x16098
+#define AR_PHY_65NM_CH0_BIAS1 0x160c0
+#define AR_PHY_65NM_CH0_BIAS2 0x160c4
+#define AR_PHY_65NM_CH0_BIAS4 0x160cc
+#define AR_PHY_65NM_CH0_RXTX4 0x1610c
+#define AR_PHY_65NM_CH0_THERM 0x16290
+
+#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
+#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
+#define AR_PHY_65NM_CH0_THERM_START 0x20000000
+#define AR_PHY_65NM_CH0_THERM_START_S 29
+#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00
+#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
+
+#define AR_PHY_65NM_CH0_RXTX1 0x16100
+#define AR_PHY_65NM_CH0_RXTX2 0x16104
+#define AR_PHY_65NM_CH1_RXTX1 0x16500
+#define AR_PHY_65NM_CH1_RXTX2 0x16504
+#define AR_PHY_65NM_CH2_RXTX1 0x16900
+#define AR_PHY_65NM_CH2_RXTX2 0x16904
+
+#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT 0x00380000
+#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT_S 19
+#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT 0x00c00000
+#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT_S 22
+#define AR_PHY_LNAGAIN_LONG_SHIFT 0xe0000000
+#define AR_PHY_LNAGAIN_LONG_SHIFT_S 29
+#define AR_PHY_MXRGAIN_LONG_SHIFT 0x03000000
+#define AR_PHY_MXRGAIN_LONG_SHIFT_S 24
+#define AR_PHY_VGAGAIN_LONG_SHIFT 0x1c000000
+#define AR_PHY_VGAGAIN_LONG_SHIFT_S 26
+#define AR_PHY_SCFIR_GAIN_LONG_SHIFT 0x00000001
+#define AR_PHY_SCFIR_GAIN_LONG_SHIFT_S 0
+#define AR_PHY_MANRXGAIN_LONG_SHIFT 0x00000002
+#define AR_PHY_MANRXGAIN_LONG_SHIFT_S 1
+
+/*
+ * SM Field Definitions
+ */
+#define AR_PHY_CL_CAL_ENABLE 0x00000002
+#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR_PHY_ADDAC_PARACTL_OFF_PWDADC 0x00008000
+
+#define AR_PHY_FCAL20_CAP_STATUS_0 0x01f00000
+#define AR_PHY_FCAL20_CAP_STATUS_0_S 20
+
+#define AR_PHY_RFBUS_REQ_EN 0x00000001 /* request for RF bus */
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001 /* RF bus granted */
+#define AR_PHY_GC_TURBO_MODE 0x00000001 /* set turbo mode bits */
+#define AR_PHY_GC_TURBO_SHORT 0x00000002 /* set short symbols to turbo mode setting */
+#define AR_PHY_GC_DYN2040_EN 0x00000004 /* enable dyn 20/40 mode */
+#define AR_PHY_GC_DYN2040_PRI_ONLY 0x00000008 /* dyn 20/40 - primary only */
+#define AR_PHY_GC_DYN2040_PRI_CH 0x00000010 /* dyn 20/40 - primary ch offset (0=+10MHz, 1=-10MHz)*/
+#define AR_PHY_GC_DYN2040_PRI_CH_S 4
+#define AR_PHY_GC_DYN2040_EXT_CH 0x00000020 /* dyn 20/40 - ext ch spacing (0=20MHz/ 1=25MHz) */
+#define AR_PHY_GC_HT_EN 0x00000040 /* ht enable */
+#define AR_PHY_GC_SHORT_GI_40 0x00000080 /* allow short GI for HT 40 */
+#define AR_PHY_GC_WALSH 0x00000100 /* walsh spatial spreading for 2 chains,2 streams TX */
+#define AR_PHY_GC_SINGLE_HT_LTF1 0x00000200 /* single length (4us) 1st HT long training symbol */
+#define AR_PHY_GC_GF_DETECT_EN 0x00000400 /* enable Green Field detection. Only affects rx, not tx */
+#define AR_PHY_GC_ENABLE_DAC_FIFO 0x00000800 /* fifo between bb and dac */
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF /* delay from wakeup to rx ena */
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+#define AR_PHY_MODE_OFDM 0x00000000
+#define AR_PHY_MODE_CCK 0x00000001
+#define AR_PHY_MODE_DYNAMIC 0x00000004
+#define AR_PHY_MODE_DYNAMIC_S 2
+#define AR_PHY_MODE_HALF 0x00000020
+#define AR_PHY_MODE_QUARTER 0x00000040
+#define AR_PHY_MAC_CLK_MODE 0x00000080
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x00000100
+#define AR_PHY_MODE_SVD_HALF 0x00000200
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF_S 24
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF_S 16
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON_S 8
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON_S 0
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+#define AR_PHY_TPCGR1_FORCED_DAC_GAIN 0x0000003e
+#define AR_PHY_TPCGR1_FORCED_DAC_GAIN_S 1
+#define AR_PHY_TPCGR1_FORCE_DAC_GAIN 0x00000001
+#define AR_PHY_TXGAIN_FORCE 0x00000001
+#define AR_PHY_TXGAIN_FORCED_PADVGNRA 0x00003c00
+#define AR_PHY_TXGAIN_FORCED_PADVGNRA_S 10
+#define AR_PHY_TXGAIN_FORCED_PADVGNRB 0x0003c000
+#define AR_PHY_TXGAIN_FORCED_PADVGNRB_S 14
+#define AR_PHY_TXGAIN_FORCED_PADVGNRD 0x00c00000
+#define AR_PHY_TXGAIN_FORCED_PADVGNRD_S 22
+#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN 0x000003c0
+#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN_S 6
+#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
+#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_MASK 0xFFF
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_SIGNED_BIT 0x800
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
+#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24
+#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
+#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT 0x01fc0000
+#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_S 18
+#define AR_PHY_TX_IQCAL_START_DO_CAL 0x00000001
+#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0
+
+#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0
+
+#define AR_PHY_TPC_18_THERM_CAL_VALUE 0xff
+#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
+#define AR_PHY_TPC_19_ALPHA_THERM 0xff
+#define AR_PHY_TPC_19_ALPHA_THERM_S 0
+
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
+
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
+
+/*
+ * Channel 1 Register Map
+ */
+#define AR_CHAN1_BASE 0xa800
+
+#define AR_PHY_EXT_CCA_1 (AR_CHAN1_BASE + 0x30)
+#define AR_PHY_TX_PHASE_RAMP_1 (AR_CHAN1_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_1 (AR_CHAN1_BASE + 0xd4)
+
+#define AR_PHY_SPUR_REPORT_1 (AR_CHAN1_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_1 (AR_CHAN1_BASE + 0x300)
+#define AR_PHY_RX_IQCAL_CORR_B1 (AR_CHAN1_BASE + 0xdc)
+
+/*
+ * Channel 1 Field Definitions
+ */
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+/*
+ * AGC 1 Register Map
+ */
+#define AR_AGC1_BASE 0xae00
+
+#define AR_PHY_FORCEMAX_GAINS_1 (AR_AGC1_BASE + 0x4)
+#define AR_PHY_EXT_ATTEN_CTL_1 (AR_AGC1_BASE + 0x18)
+#define AR_PHY_CCA_1 (AR_AGC1_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_1 (AR_AGC1_BASE + 0x20)
+#define AR_PHY_RSSI_1 (AR_AGC1_BASE + 0x180)
+#define AR_PHY_SPUR_CCK_REP_1 (AR_AGC1_BASE + 0x184)
+#define AR_PHY_RX_OCGAIN_2 (AR_AGC1_BASE + 0x200)
+
+/*
+ * AGC 1 Field Definitions
+ */
+#define AR_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH1_MINCCA_PWR_S 20
+
+/*
+ * SM 1 Register Map
+ */
+#define AR_SM1_BASE 0xb200
+
+#define AR_PHY_SWITCH_CHAIN_1 (AR_SM1_BASE + 0x84)
+#define AR_PHY_FCAL_2_1 (AR_SM1_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_1 (AR_SM1_BASE + 0xd4)
+#define AR_PHY_CL_TAB_1 (AR_SM1_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_1 (AR_SM1_BASE + 0x180)
+#define AR_PHY_TPC_4_B1 (AR_SM1_BASE + 0x204)
+#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
+#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
+#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
+#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
+#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B1 (AR_SM1_BASE + 0x450)
+
+/*
+ * Channel 2 Register Map
+ */
+#define AR_CHAN2_BASE 0xb800
+
+#define AR_PHY_EXT_CCA_2 (AR_CHAN2_BASE + 0x30)
+#define AR_PHY_TX_PHASE_RAMP_2 (AR_CHAN2_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_2 (AR_CHAN2_BASE + 0xd4)
+
+#define AR_PHY_SPUR_REPORT_2 (AR_CHAN2_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_2 (AR_CHAN2_BASE + 0x300)
+#define AR_PHY_RX_IQCAL_CORR_B2 (AR_CHAN2_BASE + 0xdc)
+
+/*
+ * Channel 2 Field Definitions
+ */
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 16
+/*
+ * AGC 2 Register Map
+ */
+#define AR_AGC2_BASE 0xbe00
+
+#define AR_PHY_FORCEMAX_GAINS_2 (AR_AGC2_BASE + 0x4)
+#define AR_PHY_EXT_ATTEN_CTL_2 (AR_AGC2_BASE + 0x18)
+#define AR_PHY_CCA_2 (AR_AGC2_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_2 (AR_AGC2_BASE + 0x20)
+#define AR_PHY_RSSI_2 (AR_AGC2_BASE + 0x180)
+
+/*
+ * AGC 2 Field Definitions
+ */
+#define AR_PHY_CH2_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH2_MINCCA_PWR_S 20
+
+/*
+ * SM 2 Register Map
+ */
+#define AR_SM2_BASE 0xc200
+
+#define AR_PHY_SWITCH_CHAIN_2 (AR_SM2_BASE + 0x84)
+#define AR_PHY_FCAL_2_2 (AR_SM2_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_2 (AR_SM2_BASE + 0xd4)
+#define AR_PHY_CL_TAB_2 (AR_SM2_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_2 (AR_SM2_BASE + 0x180)
+#define AR_PHY_TPC_4_B2 (AR_SM2_BASE + 0x204)
+#define AR_PHY_TPC_5_B2 (AR_SM2_BASE + 0x208)
+#define AR_PHY_TPC_6_B2 (AR_SM2_BASE + 0x20c)
+#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
+#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240)
+#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B2 (AR_SM2_BASE + 0x450)
+
+#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
+
+/*
+ * AGC 3 Register Map
+ */
+#define AR_AGC3_BASE 0xce00
+
+#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
+
+/*
+ * Misc helper defines
+ */
+#define AR_PHY_CHAIN_OFFSET (AR_CHAN1_BASE - AR_CHAN_BASE)
+
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (AR_PHY_ADC_GAIN_DC_CORR_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR_9300_10(_i) (AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_SWITCH_CHAIN(_i) (AR_PHY_SWITCH_CHAIN_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_EXT_ATTEN_CTL(_i) (AR_PHY_EXT_ATTEN_CTL_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_RXGAIN(_i) (AR_PHY_FORCEMAX_GAINS_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_TPCRG5(_i) (AR_PHY_TPC_5_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_PDADC_TAB(_i) (AR_PHY_PDADC_TAB_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_CAL_MEAS_0(_i) (AR_PHY_IQ_ADC_MEAS_0_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_1(_i) (AR_PHY_IQ_ADC_MEAS_1_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_2(_i) (AR_PHY_IQ_ADC_MEAS_2_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_3(_i) (AR_PHY_IQ_ADC_MEAS_3_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_0_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_1_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_BB_PANIC_NON_IDLE_ENABLE 0x00000001
+#define AR_PHY_BB_PANIC_IDLE_ENABLE 0x00000002
+#define AR_PHY_BB_PANIC_IDLE_MASK 0xFFFF0000
+#define AR_PHY_BB_PANIC_NON_IDLE_MASK 0x0000FFFC
+
+#define AR_PHY_BB_PANIC_RST_ENABLE 0x00000002
+#define AR_PHY_BB_PANIC_IRQ_ENABLE 0x00000004
+#define AR_PHY_BB_PANIC_CNTL2_MASK 0xFFFFFFF9
+
+#define AR_PHY_BB_WD_STATUS 0x00000007
+#define AR_PHY_BB_WD_STATUS_S 0
+#define AR_PHY_BB_WD_DET_HANG 0x00000008
+#define AR_PHY_BB_WD_DET_HANG_S 3
+#define AR_PHY_BB_WD_RADAR_SM 0x000000F0
+#define AR_PHY_BB_WD_RADAR_SM_S 4
+#define AR_PHY_BB_WD_RX_OFDM_SM 0x00000F00
+#define AR_PHY_BB_WD_RX_OFDM_SM_S 8
+#define AR_PHY_BB_WD_RX_CCK_SM 0x0000F000
+#define AR_PHY_BB_WD_RX_CCK_SM_S 12
+#define AR_PHY_BB_WD_TX_OFDM_SM 0x000F0000
+#define AR_PHY_BB_WD_TX_OFDM_SM_S 16
+#define AR_PHY_BB_WD_TX_CCK_SM 0x00F00000
+#define AR_PHY_BB_WD_TX_CCK_SM_S 20
+#define AR_PHY_BB_WD_AGC_SM 0x0F000000
+#define AR_PHY_BB_WD_AGC_SM_S 24
+#define AR_PHY_BB_WD_SRCH_SM 0xF0000000
+#define AR_PHY_BB_WD_SRCH_SM_S 28
+
+#define AR_PHY_BB_WD_STATUS_CLR 0x00000008
+
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
+
+#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 83c7ea4..fbb7dec 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -114,8 +114,10 @@ enum buffer_type {
#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
+#define ATH_TXSTATUS_RING_SIZE 64
+
struct ath_descdma {
- struct ath_desc *dd_desc;
+ void *dd_desc;
dma_addr_t dd_desc_paddr;
u32 dd_desc_len;
struct ath_buf *dd_bufptr;
@@ -123,7 +125,7 @@ struct ath_descdma {
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
- int nbuf, int ndesc);
+ int nbuf, int ndesc, bool is_tx);
void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head);
@@ -178,9 +180,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
#define BAW_WITHIN(_start, _bawsz, _seqno) \
((((_seqno) - (_start)) & 4095) < (_bawsz))
-#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
-#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
-#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
#define ATH_TX_COMPLETE_POLL_INT 1000
@@ -191,6 +190,7 @@ enum ATH_AGGR_STATUS {
ATH_AGGR_LIMITED,
};
+#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
u32 axq_qnum;
u32 *axq_link;
@@ -200,6 +200,10 @@ struct ath_txq {
bool stopped;
bool axq_tx_inprogress;
struct list_head axq_acq;
+ struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
+ struct list_head txq_fifo_pending;
+ u8 txq_headidx;
+ u8 txq_tailidx;
};
#define AGGR_CLEANUP BIT(1)
@@ -226,6 +230,12 @@ struct ath_tx {
struct ath_descdma txdma;
};
+struct ath_rx_edma {
+ struct sk_buff_head rx_fifo;
+ struct sk_buff_head rx_buffers;
+ u32 rx_fifo_hwsize;
+};
+
struct ath_rx {
u8 defant;
u8 rxotherant;
@@ -235,6 +245,8 @@ struct ath_rx {
spinlock_t rxbuflock;
struct list_head rxbuf;
struct ath_descdma rxdma;
+ struct ath_buf *rx_bufptr;
+ struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
};
int ath_startrecv(struct ath_softc *sc);
@@ -243,7 +255,7 @@ void ath_flushrecv(struct ath_softc *sc);
u32 ath_calcrxfilter(struct ath_softc *sc);
int ath_rx_init(struct ath_softc *sc, int nbufs);
void ath_rx_cleanup(struct ath_softc *sc);
-int ath_rx_tasklet(struct ath_softc *sc, int flush);
+int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_setup(struct ath_softc *sc, int haltype);
@@ -261,6 +273,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl);
void ath_tx_tasklet(struct ath_softc *sc);
+void ath_tx_edma_tasklet(struct ath_softc *sc);
void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -483,7 +496,6 @@ struct ath_softc {
bool ps_enabled;
bool ps_idle;
unsigned long ps_usecount;
- enum ath9k_int imask;
struct ath_config config;
struct ath_rx rx;
@@ -511,6 +523,8 @@ struct ath_softc {
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
struct ath_btcoex btcoex;
+
+ struct ath_descdma txsdma;
};
struct ath_wiphy {
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b4a31a4..c8a4558 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -93,8 +93,6 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
}
- ds->ds_data = bf->bf_buf_addr;
-
sband = &sc->sbands[common->hw->conf.channel->band];
rate = sband->bitrates[rateidx].hw_value;
if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
@@ -109,7 +107,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
/* NB: beacon's BufLen must be a multiple of 4 bytes */
ath9k_hw_filltxdesc(ah, ds, roundup(skb->len, 4),
- true, true, ds);
+ true, true, ds, bf->bf_buf_addr,
+ sc->beacon.beaconq);
memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
series[0].Tries = 1;
@@ -524,6 +523,7 @@ static void ath9k_beacon_init(struct ath_softc *sc,
static void ath_beacon_config_ap(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
+ struct ath_hw *ah = sc->sc_ah;
u32 nexttbtt, intval;
/* NB: the beacon interval is kept internally in TU's */
@@ -539,15 +539,15 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
* prepare beacon frames.
*/
intval |= ATH9K_BEACON_ENA;
- sc->imask |= ATH9K_INT_SWBA;
+ ah->imask |= ATH9K_INT_SWBA;
ath_beaconq_config(sc);
/* Set the computed AP beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
+ ath9k_hw_set_interrupts(ah, 0);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
/* Clear the reset TSF flag, so that subsequent beacon updation
will not reset the HW TSF. */
@@ -566,7 +566,8 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
static void ath_beacon_config_sta(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
int dtimperiod, dtimcount, sleepduration;
int cfpperiod, cfpcount;
@@ -605,7 +606,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
* Pull nexttbtt forward to reflect the current
* TSF and calculate dtim+cfp state for the result.
*/
- tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
num_beacons = tsftu / intval + 1;
@@ -678,17 +679,18 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* Set the computed STA beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
- ath9k_hw_set_sta_beacon_timers(sc->sc_ah, &bs);
- sc->imask |= ATH9K_INT_BMISS;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_set_sta_beacon_timers(ah, &bs);
+ ah->imask |= ATH9K_INT_BMISS;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
static void ath_beacon_config_adhoc(struct ath_softc *sc,
struct ath_beacon_config *conf,
struct ieee80211_vif *vif)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
u64 tsf;
u32 tsftu, intval, nexttbtt;
@@ -703,7 +705,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
else if (intval)
nexttbtt = roundup(nexttbtt, intval);
- tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE;
do {
nexttbtt += intval;
@@ -719,20 +721,20 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
* self-linked tx descriptor and let the hardware deal with things.
*/
intval |= ATH9K_BEACON_ENA;
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
- sc->imask |= ATH9K_INT_SWBA;
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
+ ah->imask |= ATH9K_INT_SWBA;
ath_beaconq_config(sc);
/* Set the computed ADHOC beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
+ ath9k_hw_set_interrupts(ah, 0);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
/* FIXME: Handle properly when vif is NULL */
- if (vif && sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
+ if (vif && ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
ath_beacon_start_adhoc(sc, vif);
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 238a574..07b8fa6 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -15,6 +15,9 @@
*/
#include "hw.h"
+#include "hw-ops.h"
+
+/* Common calibration code */
/* We can tune this as we go by monitoring really low values */
#define ATH9K_NF_TOO_LOW -60
@@ -83,93 +86,11 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
}
}
- return;
}
-static void ath9k_hw_do_getnf(struct ath_hw *ah,
- int16_t nfarray[NUM_NF_READINGS])
-{
- struct ath_common *common = ath9k_hw_common(ah);
- int16_t nf;
-
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 0] is %d\n", nf);
- nfarray[0] = nf;
-
- if (!AR_SREV_9285(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
- AR9280_PHY_CH1_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
- AR_PHY_CH1_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 1] is %d\n", nf);
- nfarray[1] = nf;
-
- if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
- nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
- AR_PHY_CH2_MINCCA_PWR);
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 2] is %d\n", nf);
- nfarray[2] = nf;
- }
- }
-
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
- AR9280_PHY_EXT_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
- AR_PHY_EXT_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 0] is %d\n", nf);
- nfarray[3] = nf;
-
- if (!AR_SREV_9285(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
- AR9280_PHY_CH1_EXT_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
- AR_PHY_CH1_EXT_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 1] is %d\n", nf);
- nfarray[4] = nf;
-
- if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
- nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
- AR_PHY_CH2_EXT_MINCCA_PWR);
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 2] is %d\n", nf);
- nfarray[5] = nf;
- }
- }
-}
-
-static bool getNoiseFloorThresh(struct ath_hw *ah,
- enum ieee80211_band band,
- int16_t *nft)
+static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
+ enum ieee80211_band band,
+ int16_t *nft)
{
switch (band) {
case IEEE80211_BAND_5GHZ:
@@ -186,44 +107,8 @@ static bool getNoiseFloorThresh(struct ath_hw *ah,
return true;
}
-static void ath9k_hw_setup_calibration(struct ath_hw *ah,
- struct ath9k_cal_list *currCal)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
- currCal->calData->calCountMax);
-
- switch (currCal->calData->calType) {
- case IQ_MISMATCH_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting IQ Mismatch Calibration\n");
- break;
- case ADC_GAIN_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting ADC Gain Calibration\n");
- break;
- case ADC_DC_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting ADC DC Calibration\n");
- break;
- case ADC_DC_INIT_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting Init ADC DC Calibration\n");
- break;
- }
-
- REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_DO_CAL);
-}
-
-static void ath9k_hw_reset_calibration(struct ath_hw *ah,
- struct ath9k_cal_list *currCal)
+void ath9k_hw_reset_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
{
int i;
@@ -241,324 +126,6 @@ static void ath9k_hw_reset_calibration(struct ath_hw *ah,
ah->cal_samples = 0;
}
-static bool ath9k_hw_per_calibration(struct ath_hw *ah,
- struct ath9k_channel *ichan,
- u8 rxchainmask,
- struct ath9k_cal_list *currCal)
-{
- bool iscaldone = false;
-
- if (currCal->calState == CAL_RUNNING) {
- if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
- AR_PHY_TIMING_CTRL4_DO_CAL)) {
-
- currCal->calData->calCollect(ah);
- ah->cal_samples++;
-
- if (ah->cal_samples >= currCal->calData->calNumSamples) {
- int i, numChains = 0;
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- if (rxchainmask & (1 << i))
- numChains++;
- }
-
- currCal->calData->calPostProc(ah, numChains);
- ichan->CalValid |= currCal->calData->calType;
- currCal->calState = CAL_DONE;
- iscaldone = true;
- } else {
- ath9k_hw_setup_calibration(ah, currCal);
- }
- }
- } else if (!(ichan->CalValid & currCal->calData->calType)) {
- ath9k_hw_reset_calibration(ah, currCal);
- }
-
- return iscaldone;
-}
-
-/* Assumes you are talking about the currently configured channel */
-static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
- enum ath9k_cal_types calType)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- switch (calType & ah->supp_cals) {
- case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
- return true;
- case ADC_GAIN_CAL:
- case ADC_DC_CAL:
- if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
- conf_is_ht20(conf)))
- return true;
- break;
- }
- return false;
-}
-
-static void ath9k_hw_iqcal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalPowerMeasI[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalPowerMeasQ[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalIqCorrMeas[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
- ah->cal_samples, i, ah->totalPowerMeasI[i],
- ah->totalPowerMeasQ[i],
- ah->totalIqCorrMeas[i]);
- }
-}
-
-static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalAdcIOddPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalAdcIEvenPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalAdcQOddPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ah->totalAdcQEvenPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
- "oddq=0x%08x; evenq=0x%08x;\n",
- ah->cal_samples, i,
- ah->totalAdcIOddPhase[i],
- ah->totalAdcIEvenPhase[i],
- ah->totalAdcQOddPhase[i],
- ah->totalAdcQEvenPhase[i]);
- }
-}
-
-static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalAdcDcOffsetIOddPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalAdcDcOffsetIEvenPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalAdcDcOffsetQOddPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ah->totalAdcDcOffsetQEvenPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
- "oddq=0x%08x; evenq=0x%08x;\n",
- ah->cal_samples, i,
- ah->totalAdcDcOffsetIOddPhase[i],
- ah->totalAdcDcOffsetIEvenPhase[i],
- ah->totalAdcDcOffsetQOddPhase[i],
- ah->totalAdcDcOffsetQEvenPhase[i]);
- }
-}
-
-static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 powerMeasQ, powerMeasI, iqCorrMeas;
- u32 qCoffDenom, iCoffDenom;
- int32_t qCoff, iCoff;
- int iqCorrNeg, i;
-
- for (i = 0; i < numChains; i++) {
- powerMeasI = ah->totalPowerMeasI[i];
- powerMeasQ = ah->totalPowerMeasQ[i];
- iqCorrMeas = ah->totalIqCorrMeas[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting IQ Cal and Correction for Chain %d\n",
- i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
- i, ah->totalIqCorrMeas[i]);
-
- iqCorrNeg = 0;
-
- if (iqCorrMeas > 0x80000000) {
- iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
- iqCorrNeg = 1;
- }
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
-
- iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
- qCoffDenom = powerMeasQ / 64;
-
- if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
- (qCoffDenom != 0)) {
- iCoff = iqCorrMeas / iCoffDenom;
- qCoff = powerMeasI / qCoffDenom - 64;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
-
- iCoff = iCoff & 0x3f;
- ath_print(common, ATH_DBG_CALIBRATE,
- "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
- if (iqCorrNeg == 0x0)
- iCoff = 0x40 - iCoff;
-
- if (qCoff > 15)
- qCoff = 15;
- else if (qCoff <= -16)
- qCoff = 16;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
- i, iCoff, qCoff);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
- AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
- iCoff);
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
- AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
- qCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "IQ Cal and Correction done for Chain %d\n",
- i);
- }
- }
-
- REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
-}
-
-static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
- u32 qGainMismatch, iGainMismatch, val, i;
-
- for (i = 0; i < numChains; i++) {
- iOddMeasOffset = ah->totalAdcIOddPhase[i];
- iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
- qOddMeasOffset = ah->totalAdcQOddPhase[i];
- qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting ADC Gain Cal for Chain %d\n", i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
- iOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = 0x%08x\n", i,
- iEvenMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
- qOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = 0x%08x\n", i,
- qEvenMeasOffset);
-
- if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
- iGainMismatch =
- ((iEvenMeasOffset * 32) /
- iOddMeasOffset) & 0x3f;
- qGainMismatch =
- ((qOddMeasOffset * 32) /
- qEvenMeasOffset) & 0x3f;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_i = 0x%08x\n", i,
- iGainMismatch);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_q = 0x%08x\n", i,
- qGainMismatch);
-
- val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
- val &= 0xfffff000;
- val |= (qGainMismatch) | (iGainMismatch << 6);
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "ADC Gain Cal done for Chain %d\n", i);
- }
- }
-
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
- REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
- AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
-}
-
-static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 iOddMeasOffset, iEvenMeasOffset, val, i;
- int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
- const struct ath9k_percal_data *calData =
- ah->cal_list_curr->calData;
- u32 numSamples =
- (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
-
- for (i = 0; i < numChains; i++) {
- iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
- iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
- qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
- qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting ADC DC Offset Cal for Chain %d\n", i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = %d\n", i,
- iOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = %d\n", i,
- iEvenMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = %d\n", i,
- qOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = %d\n", i,
- qEvenMeasOffset);
-
- iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
- numSamples) & 0x1ff;
- qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
- numSamples) & 0x1ff;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
- iDcMismatch);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
- qDcMismatch);
-
- val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
- val &= 0xc0000fff;
- val |= (qDcMismatch << 12) | (iDcMismatch << 21);
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "ADC DC Offset Cal done for Chain %d\n", i);
- }
-
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
- REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
- AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
-}
-
/* This is done for the currently configured channel */
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
@@ -605,72 +172,6 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
}
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath9k_nfcal_hist *h;
- int i, j;
- int32_t val;
- const u32 ar5416_cca_regs[6] = {
- AR_PHY_CCA,
- AR_PHY_CH1_CCA,
- AR_PHY_CH2_CCA,
- AR_PHY_EXT_CCA,
- AR_PHY_CH1_EXT_CCA,
- AR_PHY_CH2_EXT_CCA
- };
- u8 chainmask, rx_chain_status;
-
- rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
- if (AR_SREV_9285(ah))
- chainmask = 0x9;
- else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
- if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
- chainmask = 0x1B;
- else
- chainmask = 0x09;
- } else {
- if (rx_chain_status & 0x4)
- chainmask = 0x3F;
- else if (rx_chain_status & 0x2)
- chainmask = 0x1B;
- else
- chainmask = 0x09;
- }
-
- h = ah->nfCalHist;
-
- for (i = 0; i < NUM_NF_READINGS; i++) {
- if (chainmask & (1 << i)) {
- val = REG_READ(ah, ar5416_cca_regs[i]);
- val &= 0xFFFFFE00;
- val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
- REG_WRITE(ah, ar5416_cca_regs[i], val);
- }
- }
-
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_ENABLE_NF);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
-
- for (j = 0; j < 5; j++) {
- if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
- AR_PHY_AGC_CONTROL_NF) == 0)
- break;
- udelay(50);
- }
-
- for (i = 0; i < NUM_NF_READINGS; i++) {
- if (chainmask & (1 << i)) {
- val = REG_READ(ah, ar5416_cca_regs[i]);
- val &= 0xFFFFFE00;
- val |= (((u32) (-50) << 1) & 0x1ff);
- REG_WRITE(ah, ar5416_cca_regs[i], val);
- }
- }
-}
-
int16_t ath9k_hw_getnf(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -690,7 +191,7 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
} else {
ath9k_hw_do_getnf(ah, nfarray);
nf = nfarray[0];
- if (getNoiseFloorThresh(ah, c->band, &nfThresh)
+ if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
&& nf > nfThresh) {
ath_print(common, ATH_DBG_CALIBRATE,
"noise floor failed detected; "
@@ -715,7 +216,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
if (AR_SREV_9280(ah))
noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE;
- else if (AR_SREV_9285(ah))
+ else if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE;
else if (AR_SREV_9287(ah))
noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE;
@@ -748,508 +249,3 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
return nf;
}
EXPORT_SYMBOL(ath9k_hw_getchan_noise);
-
-static void ath9k_olc_temp_compensation_9287(struct ath_hw *ah)
-{
- u32 rddata;
- int32_t delta, currPDADC, slope;
-
- rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
- currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
-
- if (ah->initPDADC == 0 || currPDADC == 0) {
- /*
- * Zero value indicates that no frames have been transmitted yet,
- * can't do temperature compensation until frames are transmitted.
- */
- return;
- } else {
- slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
-
- if (slope == 0) { /* to avoid divide by zero case */
- delta = 0;
- } else {
- delta = ((currPDADC - ah->initPDADC)*4) / slope;
- }
- REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
- AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
- REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
- AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
- }
-}
-
-static void ath9k_olc_temp_compensation(struct ath_hw *ah)
-{
- u32 rddata, i;
- int delta, currPDADC, regval;
-
- if (OLC_FOR_AR9287_10_LATER) {
- ath9k_olc_temp_compensation_9287(ah);
- } else {
- rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
- currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
-
- if (ah->initPDADC == 0 || currPDADC == 0) {
- return;
- } else {
- if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
- delta = (currPDADC - ah->initPDADC + 4) / 8;
- else
- delta = (currPDADC - ah->initPDADC + 5) / 10;
-
- if (delta != ah->PDADCdelta) {
- ah->PDADCdelta = delta;
- for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
- regval = ah->originalGain[i] - delta;
- if (regval < 0)
- regval = 0;
-
- REG_RMW_FIELD(ah,
- AR_PHY_TX_GAIN_TBL1 + i * 4,
- AR_PHY_TX_GAIN, regval);
- }
- }
- }
- }
-}
-
-static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
-{
- u32 regVal;
- unsigned int i;
- u32 regList [][2] = {
- { 0x786c, 0 },
- { 0x7854, 0 },
- { 0x7820, 0 },
- { 0x7824, 0 },
- { 0x7868, 0 },
- { 0x783c, 0 },
- { 0x7838, 0 } ,
- { 0x7828, 0 } ,
- };
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- regList[i][1] = REG_READ(ah, regList[i][0]);
-
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1));
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal |= (0x1 << 27);
- REG_WRITE(ah, 0x9808, regVal);
-
- /* 786c,b23,1, pwddac=1 */
- REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
- /* 7854, b5,1, pdrxtxbb=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
- /* 7854, b7,1, pdv2i=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
- /* 7854, b8,1, pddacinterface=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
- /* 7824,b12,0, offcal=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
- /* 7838, b1,0, pwddb=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
- /* 7820,b11,0, enpacal=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
- /* 7820,b25,1, pdpadrv1=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
- /* 7820,b24,0, pdpadrv2=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1,AR9285_AN_RF2G1_PDPADRV2,0);
- /* 7820,b23,0, pdpaout=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
- /* 783c,b14-16,7, padrvgn2tab_0=7 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G8,AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
- /*
- * 7838,b29-31,0, padrvgn1tab_0=0
- * does not matter since we turn it off
- */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7,AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
-
- /* Set:
- * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
- * txon=1,paon=1,oscon=1,synthon_force=1
- */
- REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
- udelay(30);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
-
- /* find off_6_1; */
- for (i = 6; i > 0; i--) {
- regVal = REG_READ(ah, 0x7834);
- regVal |= (1 << (20 + i));
- REG_WRITE(ah, 0x7834, regVal);
- udelay(1);
- //regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1 << (20 + i)));
- regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
- << (20 + i));
- REG_WRITE(ah, 0x7834, regVal);
- }
-
- regVal = (regVal >>20) & 0x7f;
-
- /* Update PA cal info */
- if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
- if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
- ah->pacal_info.max_skipcount =
- 2 * ah->pacal_info.max_skipcount;
- ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
- } else {
- ah->pacal_info.max_skipcount = 1;
- ah->pacal_info.skipcount = 0;
- ah->pacal_info.prev_offset = regVal;
- }
-
- regVal = REG_READ(ah, 0x7834);
- regVal |= 0x1;
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal &= (~(0x1 << 27));
- REG_WRITE(ah, 0x9808, regVal);
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- REG_WRITE(ah, regList[i][0], regList[i][1]);
-}
-
-static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 regVal;
- int i, offset, offs_6_1, offs_0;
- u32 ccomp_org, reg_field;
- u32 regList[][2] = {
- { 0x786c, 0 },
- { 0x7854, 0 },
- { 0x7820, 0 },
- { 0x7824, 0 },
- { 0x7868, 0 },
- { 0x783c, 0 },
- { 0x7838, 0 },
- };
-
- ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
-
- /* PA CAL is not needed for high power solution */
- if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
- AR5416_EEP_TXGAIN_HIGH_POWER)
- return;
-
- if (AR_SREV_9285_11(ah)) {
- REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
- udelay(10);
- }
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- regList[i][1] = REG_READ(ah, regList[i][0]);
-
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1));
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal |= (0x1 << 27);
- REG_WRITE(ah, 0x9808, regVal);
-
- REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
- ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
-
- REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
- udelay(30);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
-
- for (i = 6; i > 0; i--) {
- regVal = REG_READ(ah, 0x7834);
- regVal |= (1 << (19 + i));
- REG_WRITE(ah, 0x7834, regVal);
- udelay(1);
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1 << (19 + i)));
- reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
- regVal |= (reg_field << (19 + i));
- REG_WRITE(ah, 0x7834, regVal);
- }
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
- udelay(1);
- reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
- offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
- offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
-
- offset = (offs_6_1<<1) | offs_0;
- offset = offset - 0;
- offs_6_1 = offset>>1;
- offs_0 = offset & 1;
-
- if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
- if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
- ah->pacal_info.max_skipcount =
- 2 * ah->pacal_info.max_skipcount;
- ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
- } else {
- ah->pacal_info.max_skipcount = 1;
- ah->pacal_info.skipcount = 0;
- ah->pacal_info.prev_offset = offset;
- }
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
-
- regVal = REG_READ(ah, 0x7834);
- regVal |= 0x1;
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal &= (~(0x1 << 27));
- REG_WRITE(ah, 0x9808, regVal);
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- REG_WRITE(ah, regList[i][0], regList[i][1]);
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
-
- if (AR_SREV_9285_11(ah))
- REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
-
-}
-
-bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
- u8 rxchainmask, bool longcal)
-{
- bool iscaldone = true;
- struct ath9k_cal_list *currCal = ah->cal_list_curr;
-
- if (currCal &&
- (currCal->calState == CAL_RUNNING ||
- currCal->calState == CAL_WAITING)) {
- iscaldone = ath9k_hw_per_calibration(ah, chan,
- rxchainmask, currCal);
- if (iscaldone) {
- ah->cal_list_curr = currCal = currCal->calNext;
-
- if (currCal->calState == CAL_WAITING) {
- iscaldone = false;
- ath9k_hw_reset_calibration(ah, currCal);
- }
- }
- }
-
- /* Do NF cal only at longer intervals */
- if (longcal) {
- /* Do periodic PAOffset Cal */
- if (AR_SREV_9271(ah))
- ath9k_hw_9271_pa_cal(ah, false);
- else if (AR_SREV_9285_11_OR_LATER(ah)) {
- if (!ah->pacal_info.skipcount)
- ath9k_hw_9285_pa_cal(ah, false);
- else
- ah->pacal_info.skipcount--;
- }
-
- if (OLC_FOR_AR9280_20_LATER || OLC_FOR_AR9287_10_LATER)
- ath9k_olc_temp_compensation(ah);
-
- /* Get the value from the previous NF cal and update history buffer */
- ath9k_hw_getnf(ah, chan);
-
- /*
- * Load the NF from history buffer of the current channel.
- * NF is slow time-variant, so it is OK to use a historical value.
- */
- ath9k_hw_loadnf(ah, ah->curchan);
-
- ath9k_hw_start_nfcal(ah);
- }
-
- return iscaldone;
-}
-EXPORT_SYMBOL(ath9k_hw_calibrate);
-
-/* Carrier leakage Calibration fix */
-static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- if (IS_CHAN_HT20(chan)) {
- REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE, "offset "
- "calibration failed to complete in "
- "1ms; noisy ??\n");
- return false;
- }
- REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- }
- REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
- "failed to complete in 1ms; noisy ??\n");
- return false;
- }
-
- REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
-
- return true;
-}
-
-bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
- if (!ar9285_clc(ah, chan))
- return false;
- } else {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- if (!AR_SREV_9287_10_OR_LATER(ah))
- REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
- AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- }
-
- /* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
-
- /* Poll for offset calibration complete */
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "offset calibration failed to "
- "complete in 1ms; noisy environment?\n");
- return false;
- }
-
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- if (!AR_SREV_9287_10_OR_LATER(ah))
- REG_SET_BIT(ah, AR_PHY_ADC_CTL,
- AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- }
- }
-
- /* Do PA Calibration */
- if (AR_SREV_9271(ah))
- ath9k_hw_9271_pa_cal(ah, true);
- else if (AR_SREV_9285_11_OR_LATER(ah))
- ath9k_hw_9285_pa_cal(ah, true);
-
- /* Do NF Calibration after DC offset and other calibrations */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
-
- ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
-
- /* Enable IQ, ADC Gain and ADC DC offset CALs */
- if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
- if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
- INIT_CAL(&ah->adcgain_caldata);
- INSERT_CAL(ah, &ah->adcgain_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling ADC Gain Calibration.\n");
- }
- if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
- INIT_CAL(&ah->adcdc_caldata);
- INSERT_CAL(ah, &ah->adcdc_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling ADC DC Calibration.\n");
- }
- if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
- INIT_CAL(&ah->iq_caldata);
- INSERT_CAL(ah, &ah->iq_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
- }
-
- ah->cal_list_curr = ah->cal_list;
-
- if (ah->cal_list_curr)
- ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
- }
-
- chan->CalValid = 0;
-
- return true;
-}
-
-const struct ath9k_percal_data iq_cal_multi_sample = {
- IQ_MISMATCH_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_iqcal_collect,
- ath9k_hw_iqcalibrate
-};
-const struct ath9k_percal_data iq_cal_single_sample = {
- IQ_MISMATCH_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_iqcal_collect,
- ath9k_hw_iqcalibrate
-};
-const struct ath9k_percal_data adc_gain_cal_multi_sample = {
- ADC_GAIN_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_adc_gaincal_collect,
- ath9k_hw_adc_gaincal_calibrate
-};
-const struct ath9k_percal_data adc_gain_cal_single_sample = {
- ADC_GAIN_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_adc_gaincal_collect,
- ath9k_hw_adc_gaincal_calibrate
-};
-const struct ath9k_percal_data adc_dc_cal_multi_sample = {
- ADC_DC_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
-const struct ath9k_percal_data adc_dc_cal_single_sample = {
- ADC_DC_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
-const struct ath9k_percal_data adc_init_dc_cal = {
- ADC_DC_INIT_CAL,
- MIN_CAL_SAMPLES,
- INIT_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index b2c873e..24538bd 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -19,14 +19,6 @@
#include "hw.h"
-extern const struct ath9k_percal_data iq_cal_multi_sample;
-extern const struct ath9k_percal_data iq_cal_single_sample;
-extern const struct ath9k_percal_data adc_gain_cal_multi_sample;
-extern const struct ath9k_percal_data adc_gain_cal_single_sample;
-extern const struct ath9k_percal_data adc_dc_cal_multi_sample;
-extern const struct ath9k_percal_data adc_dc_cal_single_sample;
-extern const struct ath9k_percal_data adc_init_dc_cal;
-
#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85
#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112
#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118
@@ -76,7 +68,8 @@ enum ath9k_cal_types {
ADC_DC_INIT_CAL = 0x1,
ADC_GAIN_CAL = 0x2,
ADC_DC_CAL = 0x4,
- IQ_MISMATCH_CAL = 0x8
+ IQ_MISMATCH_CAL = 0x8,
+ TEMP_COMP_CAL = 0x10,
};
enum ath9k_cal_state {
@@ -122,14 +115,12 @@ struct ath9k_pacal_info{
bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
void ath9k_hw_start_nfcal(struct ath_hw *ah);
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
int16_t ath9k_hw_getnf(struct ath_hw *ah,
struct ath9k_channel *chan);
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
-bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
- u8 rxchainmask, bool longcal);
-bool ath9k_hw_init_cal(struct ath_hw *ah,
- struct ath9k_channel *chan);
+void ath9k_hw_reset_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal);
+
#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 4d775ae..7707341 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -57,13 +57,19 @@ static bool ath9k_rx_accept(struct ath_common *common,
* rs_more indicates chained descriptors which can be used
* to link buffers together for a sort of scatter-gather
* operation.
- *
+ * reject the frame, we don't support scatter-gather yet and
+ * the frame is probably corrupt anyway
+ */
+ if (rx_stats->rs_more)
+ return false;
+
+ /*
* The rx_stats->rs_status will not be set until the end of the
* chained descriptors so it can be ignored if rs_more is set. The
* rs_more will be false at the last element of the chained
* descriptors.
*/
- if (!rx_stats->rs_more && rx_stats->rs_status != 0) {
+ if (rx_stats->rs_status != 0) {
if (rx_stats->rs_status & ATH9K_RXERR_CRC)
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
if (rx_stats->rs_status & ATH9K_RXERR_PHY)
@@ -102,11 +108,11 @@ static bool ath9k_rx_accept(struct ath_common *common,
return true;
}
-static u8 ath9k_process_rate(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- struct sk_buff *skb)
+static int ath9k_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ struct sk_buff *skb)
{
struct ieee80211_supported_band *sband;
enum ieee80211_band band;
@@ -122,25 +128,32 @@ static u8 ath9k_process_rate(struct ath_common *common,
rxs->flag |= RX_FLAG_40MHZ;
if (rx_stats->rs_flags & ATH9K_RX_GI)
rxs->flag |= RX_FLAG_SHORT_GI;
- return rx_stats->rs_rate & 0x7f;
+ rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+ return 0;
}
for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_stats->rs_rate)
- return i;
+ if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+ rxs->rate_idx = i;
+ return 0;
+ }
if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
rxs->flag |= RX_FLAG_SHORTPRE;
- return i;
+ rxs->rate_idx = i;
+ return 0;
}
}
- /* No valid hardware bitrate found -- we should not get here */
+ /*
+ * No valid hardware bitrate found -- we should not get here
+ * because hardware has already validated this frame as OK.
+ */
ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
"0x%02x using 1 Mbit\n", rx_stats->rs_rate);
if ((common->debug_mask & ATH_DBG_XMIT))
print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len);
- return 0;
+ return -EINVAL;
}
static void ath9k_process_rssi(struct ath_common *common,
@@ -202,17 +215,22 @@ int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
struct ath_hw *ah = common->ah;
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error))
return -EINVAL;
ath9k_process_rssi(common, hw, skb, rx_stats);
- rx_status->rate_idx = ath9k_process_rate(common, hw,
- rx_stats, rx_status, skb);
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status, skb))
+ return -EINVAL;
+
rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
rx_status->band = hw->conf.channel->band;
rx_status->freq = hw->conf.channel->center_freq;
- rx_status->noise = common->ani.noise_floor;
rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_TSFT;
@@ -255,7 +273,8 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
keyix = rx_stats->rs_keyix;
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
rxs->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc)
&& !decrypt_error && skb->len >= hdrlen + 4) {
@@ -286,6 +305,345 @@ int ath9k_cmn_padpos(__le16 frame_control)
}
EXPORT_SYMBOL(ath9k_cmn_padpos);
+int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->control.hw_key) {
+ if (tx_info->control.hw_key->alg == ALG_WEP)
+ return ATH9K_KEY_TYPE_WEP;
+ else if (tx_info->control.hw_key->alg == ALG_TKIP)
+ return ATH9K_KEY_TYPE_TKIP;
+ else if (tx_info->control.hw_key->alg == ALG_CCMP)
+ return ATH9K_KEY_TYPE_AES;
+ }
+
+ return ATH9K_KEY_TYPE_CLEAR;
+}
+EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
+
+static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ u32 chanmode = 0;
+
+ switch (chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ switch (channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ chanmode = CHANNEL_G_HT20;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ chanmode = CHANNEL_G_HT40PLUS;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ chanmode = CHANNEL_G_HT40MINUS;
+ break;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ switch (channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ chanmode = CHANNEL_A_HT20;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ chanmode = CHANNEL_A_HT40PLUS;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ chanmode = CHANNEL_A_HT40MINUS;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return chanmode;
+}
+
+/*
+ * Update internal channel flags.
+ */
+void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
+ struct ath9k_channel *ichan)
+{
+ struct ieee80211_channel *chan = hw->conf.channel;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ ichan->channel = chan->center_freq;
+ ichan->chan = chan;
+
+ if (chan->band == IEEE80211_BAND_2GHZ) {
+ ichan->chanmode = CHANNEL_G;
+ ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
+ } else {
+ ichan->chanmode = CHANNEL_A;
+ ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
+ }
+
+ if (conf_is_ht(conf))
+ ichan->chanmode = ath9k_get_extchanmode(chan,
+ conf->channel_type);
+}
+EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
+
+/*
+ * Get the internal channel reference.
+ */
+struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
+ struct ath_hw *ah)
+{
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath9k_channel *channel;
+ u8 chan_idx;
+
+ chan_idx = curchan->hw_value;
+ channel = &ah->channels[chan_idx];
+ ath9k_cmn_update_ichannel(hw, channel);
+
+ return channel;
+}
+EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
+
+static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
+ struct ath9k_keyval *hk, const u8 *addr,
+ bool authenticator)
+{
+ struct ath_hw *ah = common->ah;
+ const u8 *key_rxmic;
+ const u8 *key_txmic;
+
+ key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
+ key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
+
+ if (addr == NULL) {
+ /*
+ * Group key installation - only two key cache entries are used
+ * regardless of splitmic capability since group key is only
+ * used either for TX or RX.
+ */
+ if (authenticator) {
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
+ } else {
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
+ }
+ return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
+ }
+ if (!common->splitmic) {
+ /* TX and RX keys share the same key cache entry. */
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
+ return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
+ }
+
+ /* Separate key cache entries for TX and RX */
+
+ /* TX key goes at first index, RX key at +32. */
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
+ /* TX MIC entry failed. No need to proceed further */
+ ath_print(common, ATH_DBG_FATAL,
+ "Setting TX MIC Key Failed\n");
+ return 0;
+ }
+
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ /* XXX delete tx key on failure? */
+ return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
+}
+
+static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
+{
+ int i;
+
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+ if (test_bit(i, common->keymap) ||
+ test_bit(i + 64, common->keymap))
+ continue; /* At least one part of TKIP key allocated */
+ if (common->splitmic &&
+ (test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ continue; /* At least one part of TKIP key allocated */
+
+ /* Found a free slot for a TKIP key */
+ return i;
+ }
+ return -1;
+}
+
+static int ath_reserve_key_cache_slot(struct ath_common *common)
+{
+ int i;
+
+ /* First, try to find slots that would not be available for TKIP. */
+ if (common->splitmic) {
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
+ if (!test_bit(i, common->keymap) &&
+ (test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i;
+ if (!test_bit(i + 32, common->keymap) &&
+ (test_bit(i, common->keymap) ||
+ test_bit(i + 64, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i + 32;
+ if (!test_bit(i + 64, common->keymap) &&
+ (test_bit(i , common->keymap) ||
+ test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i + 64;
+ if (!test_bit(i + 64 + 32, common->keymap) &&
+ (test_bit(i, common->keymap) ||
+ test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64, common->keymap)))
+ return i + 64 + 32;
+ }
+ } else {
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+ if (!test_bit(i, common->keymap) &&
+ test_bit(i + 64, common->keymap))
+ return i;
+ if (test_bit(i, common->keymap) &&
+ !test_bit(i + 64, common->keymap))
+ return i + 64;
+ }
+ }
+
+ /* No partially used TKIP slots, pick any available slot */
+ for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
+ /* Do not allow slots that could be needed for TKIP group keys
+ * to be used. This limitation could be removed if we know that
+ * TKIP will not be used. */
+ if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
+ continue;
+ if (common->splitmic) {
+ if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
+ continue;
+ if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
+ continue;
+ }
+
+ if (!test_bit(i, common->keymap))
+ return i; /* Found a free slot for a key */
+ }
+
+ /* No free slot found */
+ return -1;
+}
+
+/*
+ * Configure encryption in the HW.
+ */
+int ath9k_cmn_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_hw *ah = common->ah;
+ struct ath9k_keyval hk;
+ const u8 *mac = NULL;
+ int ret = 0;
+ int idx;
+
+ memset(&hk, 0, sizeof(hk));
+
+ switch (key->alg) {
+ case ALG_WEP:
+ hk.kv_type = ATH9K_CIPHER_WEP;
+ break;
+ case ALG_TKIP:
+ hk.kv_type = ATH9K_CIPHER_TKIP;
+ break;
+ case ALG_CCMP:
+ hk.kv_type = ATH9K_CIPHER_AES_CCM;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ hk.kv_len = key->keylen;
+ memcpy(hk.kv_val, key->key, key->keylen);
+
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /* For now, use the default keys for broadcast keys. This may
+ * need to change with virtual interfaces. */
+ idx = key->keyidx;
+ } else if (key->keyidx) {
+ if (WARN_ON(!sta))
+ return -EOPNOTSUPP;
+ mac = sta->addr;
+
+ if (vif->type != NL80211_IFTYPE_AP) {
+ /* Only keyidx 0 should be used with unicast key, but
+ * allow this for client mode for now. */
+ idx = key->keyidx;
+ } else
+ return -EIO;
+ } else {
+ if (WARN_ON(!sta))
+ return -EOPNOTSUPP;
+ mac = sta->addr;
+
+ if (key->alg == ALG_TKIP)
+ idx = ath_reserve_key_cache_slot_tkip(common);
+ else
+ idx = ath_reserve_key_cache_slot(common);
+ if (idx < 0)
+ return -ENOSPC; /* no free key cache entries */
+ }
+
+ if (key->alg == ALG_TKIP)
+ ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
+ vif->type == NL80211_IFTYPE_AP);
+ else
+ ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
+
+ if (!ret)
+ return -EIO;
+
+ set_bit(idx, common->keymap);
+ if (key->alg == ALG_TKIP) {
+ set_bit(idx + 64, common->keymap);
+ if (common->splitmic) {
+ set_bit(idx + 32, common->keymap);
+ set_bit(idx + 64 + 32, common->keymap);
+ }
+ }
+
+ return idx;
+}
+EXPORT_SYMBOL(ath9k_cmn_key_config);
+
+/*
+ * Delete Key.
+ */
+void ath9k_cmn_key_delete(struct ath_common *common,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_hw *ah = common->ah;
+
+ ath9k_hw_keyreset(ah, key->hw_key_idx);
+ if (key->hw_key_idx < IEEE80211_WEP_NKID)
+ return;
+
+ clear_bit(key->hw_key_idx, common->keymap);
+ if (key->alg != ALG_TKIP)
+ return;
+
+ clear_bit(key->hw_key_idx + 64, common->keymap);
+ if (common->splitmic) {
+ ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
+ clear_bit(key->hw_key_idx + 32, common->keymap);
+ clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
+ }
+}
+EXPORT_SYMBOL(ath9k_cmn_key_delete);
+
static int __init ath9k_cmn_init(void)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 042999c..e08f7e5 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -20,9 +20,12 @@
#include "../debug.h"
#include "hw.h"
+#include "hw-ops.h"
/* Common header for Atheros 802.11n base driver cores */
+#define IEEE80211_WEP_NKID 4
+
#define WME_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define WME_MAX_BA WME_BA_BMP_SIZE
@@ -74,11 +77,12 @@ struct ath_buf {
an aggregate) */
struct ath_buf *bf_next; /* next subframe in the aggregate */
struct sk_buff *bf_mpdu; /* enclosing frame structure */
- struct ath_desc *bf_desc; /* virtual addr of desc */
+ void *bf_desc; /* virtual addr of desc */
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr; /* physical addr of data buffer */
bool bf_stale;
bool bf_isnullfunc;
+ bool bf_tx_aborted;
u16 bf_flags;
struct ath_buf_state bf_state;
dma_addr_t bf_dmacontext;
@@ -125,3 +129,14 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
bool decrypt_error);
int ath9k_cmn_padpos(__le16 frame_control);
+int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
+void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
+ struct ath9k_channel *ichan);
+struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
+ struct ath_hw *ah);
+int ath9k_cmn_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void ath9k_cmn_key_delete(struct ath_common *common,
+ struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 081e008..29898f8 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -78,6 +78,90 @@ static const struct file_operations fops_debug = {
#define DMA_BUF_LEN 1024
+static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", common->tx_chainmask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long mask;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ common->tx_chainmask = mask;
+ sc->sc_ah->caps.tx_chainmask = mask;
+ return count;
+}
+
+static const struct file_operations fops_tx_chainmask = {
+ .read = read_file_tx_chainmask,
+ .write = write_file_tx_chainmask,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+
+static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", common->rx_chainmask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long mask;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ common->rx_chainmask = mask;
+ sc->sc_ah->caps.rx_chainmask = mask;
+ return count;
+}
+
+static const struct file_operations fops_rx_chainmask = {
+ .read = read_file_rx_chainmask,
+ .write = write_file_rx_chainmask,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -157,10 +241,10 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
REG_READ_D(ah, AR_OBS_BUS_1));
len += snprintf(buf + len, DMA_BUF_LEN - len,
- "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
+ "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
@@ -180,8 +264,15 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
{
if (status)
sc->debug.stats.istats.total++;
- if (status & ATH9K_INT_RX)
- sc->debug.stats.istats.rxok++;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (status & ATH9K_INT_RXLP)
+ sc->debug.stats.istats.rxlp++;
+ if (status & ATH9K_INT_RXHP)
+ sc->debug.stats.istats.rxhp++;
+ } else {
+ if (status & ATH9K_INT_RX)
+ sc->debug.stats.istats.rxok++;
+ }
if (status & ATH9K_INT_RXEOL)
sc->debug.stats.istats.rxeol++;
if (status & ATH9K_INT_RXORN)
@@ -223,8 +314,15 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
+ } else {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
+ }
len += snprintf(buf + len, sizeof(buf) - len,
"%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol);
len += snprintf(buf + len, sizeof(buf) - len,
@@ -557,10 +655,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf)
+ struct ath_buf *bf, struct ath_tx_status *ts)
{
- struct ath_desc *ds = bf->bf_desc;
-
if (bf_isampdu(bf)) {
if (bf_isxretried(bf))
TX_STAT_INC(txq->axq_qnum, a_xretries);
@@ -570,17 +666,17 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
TX_STAT_INC(txq->axq_qnum, completed);
}
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO)
+ if (ts->ts_status & ATH9K_TXERR_FIFO)
TX_STAT_INC(txq->axq_qnum, fifo_underrun);
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_XTXOP)
+ if (ts->ts_status & ATH9K_TXERR_XTXOP)
TX_STAT_INC(txq->axq_qnum, xtxop);
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_TIMER_EXPIRED)
+ if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
TX_STAT_INC(txq->axq_qnum, timer_exp);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DESC_CFG_ERR)
+ if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
TX_STAT_INC(txq->axq_qnum, desc_cfg_err);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DATA_UNDERRUN)
+ if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, data_underrun);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DELIM_UNDERRUN)
+ if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, delim_underrun);
}
@@ -663,30 +759,29 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
#undef PHY_ERR
}
-void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
- struct ath_desc *ds = bf->bf_desc;
u32 phyerr;
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
+ if (rs->rs_status & ATH9K_RXERR_CRC)
RX_STAT_INC(crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
+ if (rs->rs_status & ATH9K_RXERR_DECRYPT)
RX_STAT_INC(decrypt_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
+ if (rs->rs_status & ATH9K_RXERR_MIC)
RX_STAT_INC(mic_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
RX_STAT_INC(pre_delim_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
RX_STAT_INC(post_delim_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
+ if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
RX_STAT_INC(decrypt_busy_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
+ if (rs->rs_status & ATH9K_RXERR_PHY) {
RX_STAT_INC(phy_err);
- phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
+ phyerr = rs->rs_phyerr & 0x24;
RX_PHY_ERR_INC(phyerr);
}
@@ -700,6 +795,86 @@ static const struct file_operations fops_recv = {
.owner = THIS_MODULE
};
+static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.regidx);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long regidx;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &regidx))
+ return -EINVAL;
+
+ sc->debug.regidx = regidx;
+ return count;
+}
+
+static const struct file_operations fops_regidx = {
+ .read = read_file_regidx,
+ .write = write_file_regidx,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_regval(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ char buf[32];
+ unsigned int len;
+ u32 regval;
+
+ regval = REG_READ_D(ah, sc->debug.regidx);
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", regval);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned long regval;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &regval))
+ return -EINVAL;
+
+ REG_WRITE_D(ah, sc->debug.regidx, regval);
+ return count;
+}
+
+static const struct file_operations fops_regval = {
+ .read = read_file_regval,
+ .write = write_file_regval,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -711,54 +886,55 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
ath9k_debugfs_root);
if (!sc->debug.debugfs_phy)
- goto err;
+ return -ENOMEM;
#ifdef CONFIG_ATH_DEBUG
- sc->debug.debugfs_debug = debugfs_create_file("debug",
- S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug);
- if (!sc->debug.debugfs_debug)
+ if (!debugfs_create_file("debug", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_debug))
goto err;
#endif
- sc->debug.debugfs_dma = debugfs_create_file("dma", S_IRUSR,
- sc->debug.debugfs_phy, sc, &fops_dma);
- if (!sc->debug.debugfs_dma)
+ if (!debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_dma))
+ goto err;
+
+ if (!debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_interrupt))
+ goto err;
+
+ if (!debugfs_create_file("rcstat", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_rcstat))
+ goto err;
+
+ if (!debugfs_create_file("wiphy", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_wiphy))
+ goto err;
+
+ if (!debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_xmit))
goto err;
- sc->debug.debugfs_interrupt = debugfs_create_file("interrupt",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_interrupt);
- if (!sc->debug.debugfs_interrupt)
+ if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_recv))
goto err;
- sc->debug.debugfs_rcstat = debugfs_create_file("rcstat",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_rcstat);
- if (!sc->debug.debugfs_rcstat)
+ if (!debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_rx_chainmask))
goto err;
- sc->debug.debugfs_wiphy = debugfs_create_file(
- "wiphy", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc,
- &fops_wiphy);
- if (!sc->debug.debugfs_wiphy)
+ if (!debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_tx_chainmask))
goto err;
- sc->debug.debugfs_xmit = debugfs_create_file("xmit",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_xmit);
- if (!sc->debug.debugfs_xmit)
+ if (!debugfs_create_file("regidx", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_regidx))
goto err;
- sc->debug.debugfs_recv = debugfs_create_file("recv",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_recv);
- if (!sc->debug.debugfs_recv)
+ if (!debugfs_create_file("regval", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_regval))
goto err;
+ sc->debug.regidx = 0;
return 0;
err:
ath9k_exit_debug(ah);
@@ -770,14 +946,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
- debugfs_remove(sc->debug.debugfs_recv);
- debugfs_remove(sc->debug.debugfs_xmit);
- debugfs_remove(sc->debug.debugfs_wiphy);
- debugfs_remove(sc->debug.debugfs_rcstat);
- debugfs_remove(sc->debug.debugfs_interrupt);
- debugfs_remove(sc->debug.debugfs_dma);
- debugfs_remove(sc->debug.debugfs_debug);
- debugfs_remove(sc->debug.debugfs_phy);
+ debugfs_remove_recursive(sc->debug.debugfs_phy);
}
int ath9k_debug_create_root(void)
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 86780e6..5147b87 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -35,6 +35,8 @@ struct ath_buf;
* struct ath_interrupt_stats - Contains statistics about interrupts
* @total: Total no. of interrupts generated so far
* @rxok: RX with no errors
+ * @rxlp: RX with low priority RX
+ * @rxhp: RX with high priority, uapsd only
* @rxeol: RX with no more RXDESC available
* @rxorn: RX FIFO overrun
* @txok: TX completed at the requested rate
@@ -55,6 +57,8 @@ struct ath_buf;
struct ath_interrupt_stats {
u32 total;
u32 rxok;
+ u32 rxlp;
+ u32 rxhp;
u32 rxeol;
u32 rxorn;
u32 txok;
@@ -149,13 +153,7 @@ struct ath_stats {
struct ath9k_debug {
struct dentry *debugfs_phy;
- struct dentry *debugfs_debug;
- struct dentry *debugfs_dma;
- struct dentry *debugfs_interrupt;
- struct dentry *debugfs_rcstat;
- struct dentry *debugfs_wiphy;
- struct dentry *debugfs_xmit;
- struct dentry *debugfs_recv;
+ u32 regidx;
struct ath_stats stats;
};
@@ -167,8 +165,8 @@ void ath9k_debug_remove_root(void);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf);
-void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
+ struct ath_buf *bf, struct ath_tx_status *ts);
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per);
@@ -204,12 +202,13 @@ static inline void ath_debug_stat_rc(struct ath_softc *sc,
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_txq *txq,
- struct ath_buf *bf)
+ struct ath_buf *bf,
+ struct ath_tx_status *ts)
{
}
static inline void ath_debug_stat_rx(struct ath_softc *sc,
- struct ath_buf *bf)
+ struct ath_rx_status *rs)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index dacaae9..ca8704a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -36,8 +36,6 @@ void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
if (ah->config.analog_shiftreg)
udelay(100);
-
- return;
}
int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
@@ -256,14 +254,13 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah)
{
int status;
- if (AR_SREV_9287(ah)) {
- ah->eep_map = EEP_MAP_AR9287;
- ah->eep_ops = &eep_AR9287_ops;
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->eep_ops = &eep_ar9300_ops;
+ else if (AR_SREV_9287(ah)) {
+ ah->eep_ops = &eep_ar9287_ops;
} else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
- ah->eep_map = EEP_MAP_4KBITS;
ah->eep_ops = &eep_4k_ops;
} else {
- ah->eep_map = EEP_MAP_DEFAULT;
ah->eep_ops = &eep_def_ops;
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 2f2993b..21354c1 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -19,6 +19,7 @@
#include "../ath.h"
#include <net/cfg80211.h>
+#include "ar9003_eeprom.h"
#define AH_USE_EEPROM 0x1
@@ -93,7 +94,6 @@
*/
#define AR9285_RDEXT_DEFAULT 0x1F
-#define AR_EEPROM_MAC(i) (0x1d+(i))
#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
@@ -155,6 +155,7 @@
#define AR5416_BCHAN_UNUSED 0xFF
#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
#define AR5416_MAX_CHAINS 3
+#define AR9300_MAX_CHAINS 3
#define AR5416_PWR_TABLE_OFFSET_DB -5
/* Rx gain type values */
@@ -249,16 +250,20 @@ enum eeprom_param {
EEP_MINOR_REV,
EEP_TX_MASK,
EEP_RX_MASK,
+ EEP_FSTCLK_5G,
EEP_RXGAIN_TYPE,
- EEP_TXGAIN_TYPE,
EEP_OL_PWRCTRL,
+ EEP_TXGAIN_TYPE,
EEP_RC_CHAIN_MASK,
EEP_DAC_HPWR_5G,
EEP_FRAC_N_5G,
EEP_DEV_TYPE,
EEP_TEMPSENSE_SLOPE,
EEP_TEMPSENSE_SLOPE_PAL_ON,
- EEP_PWR_TABLE_OFFSET
+ EEP_PWR_TABLE_OFFSET,
+ EEP_DRIVE_STRENGTH,
+ EEP_INTERNAL_REGULATOR,
+ EEP_SWREG
};
enum ar5416_rates {
@@ -295,7 +300,8 @@ struct base_eep_header {
u32 binBuildNumber;
u8 deviceType;
u8 pwdclkind;
- u8 futureBase_1[2];
+ u8 fastClk5g;
+ u8 divChain;
u8 rxGainType;
u8 dacHiPwrMode_5G;
u8 openLoopPwrCntl;
@@ -656,13 +662,6 @@ struct ath9k_country_entry {
u8 iso[3];
};
-enum ath9k_eep_map {
- EEP_MAP_DEFAULT = 0x0,
- EEP_MAP_4KBITS,
- EEP_MAP_AR9287,
- EEP_MAP_MAX
-};
-
struct eeprom_ops {
int (*check_eeprom)(struct ath_hw *hw);
u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param);
@@ -713,6 +712,8 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah);
extern const struct eeprom_ops eep_def_ops;
extern const struct eeprom_ops eep_4k_ops;
-extern const struct eeprom_ops eep_AR9287_ops;
+extern const struct eeprom_ops eep_ar9287_ops;
+extern const struct eeprom_ops eep_ar9287_ops;
+extern const struct eeprom_ops eep_ar9300_ops;
#endif /* EEPROM_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 68db166..41a77d1 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
{
@@ -43,7 +44,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
- "Unable to read eeprom region \n");
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
@@ -182,11 +183,11 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
switch (param) {
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -453,6 +454,8 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
&tMinCalPower, gainBoundaries,
pdadcValues, numXpdGain);
+ ENABLE_REGWRITE_BUFFER(ah);
+
if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset,
SM(pdGainOverlap_t2,
@@ -493,6 +496,9 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
regOffset += 4;
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
}
@@ -758,6 +764,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
}
+ ENABLE_REGWRITE_BUFFER(ah);
+
/* OFDM power per rate */
REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
ATH9K_POW_SM(ratesArray[rate18mb], 24)
@@ -820,6 +828,9 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
| ATH9K_POW_SM(ratesArray[rateDupCck], 0));
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 839d05a..b471db5 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
{
@@ -44,7 +45,7 @@ static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
if (!ath9k_hw_nvram_read(common,
addr + eep_start_loc, eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
- "Unable to read eeprom region \n");
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
@@ -172,11 +173,11 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
switch (param) {
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -1169,7 +1170,7 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
#undef EEP_MAP9287_SPURCHAN
}
-const struct eeprom_ops eep_AR9287_ops = {
+const struct eeprom_ops eep_ar9287_ops = {
.check_eeprom = ath9k_hw_AR9287_check_eeprom,
.get_eeprom = ath9k_hw_AR9287_get_eeprom,
.fill_eeprom = ath9k_hw_AR9287_fill_eeprom,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 404a034..7e1ed78 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static void ath9k_get_txgain_index(struct ath_hw *ah,
struct ath9k_channel *chan,
@@ -49,7 +50,6 @@ static void ath9k_get_txgain_index(struct ath_hw *ah,
i++;
*pcdacIdx = i;
- return;
}
static void ath9k_olc_get_pdadcs(struct ath_hw *ah,
@@ -222,6 +222,12 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
return -EINVAL;
}
+ /* Enable fixup for AR_AN_TOP2 if necessary */
+ if (AR_SREV_9280_10_OR_LATER(ah) &&
+ (eep->baseEepHeader.version & 0xff) > 0x0a &&
+ eep->baseEepHeader.pwdclkind == 0)
+ ah->need_an_top2_fixup = 1;
+
return 0;
}
@@ -237,11 +243,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return pModal[0].noiseFloorThreshCh[0];
case EEP_NFTHRESH_2:
return pModal[1].noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -267,6 +273,8 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return pBase->txMask;
case EEP_RX_MASK:
return pBase->rxMask;
+ case EEP_FSTCLK_5G:
+ return pBase->fastClk5g;
case EEP_RXGAIN_TYPE:
return pBase->rxGainType;
case EEP_TXGAIN_TYPE:
@@ -742,8 +750,6 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
pPDADCValues[k] = pPDADCValues[k - 1];
k++;
}
-
- return;
}
static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index deab8be..0ee75e7 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -283,22 +283,17 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
u32 timer_next,
u32 timer_period)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
- if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
+ if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
ath9k_hw_set_interrupts(ah, 0);
- sc->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
ath9k_hw_gen_timer_stop(ah, timer);
@@ -306,8 +301,8 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
/* if no timer is enabled, turn off interrupt mask */
if (timer_table->timer_mask.val == 0) {
ath9k_hw_set_interrupts(ah, 0);
- sc->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
@@ -364,7 +359,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "no stomp timer running \n");
+ "no stomp timer running\n");
spin_lock_bh(&btcoex->btcoex_lock);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
new file mode 100644
index 0000000..46dc41a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -0,0 +1,1008 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#define ATH9K_FW_USB_DEV(devid, fw) \
+ { USB_DEVICE(0x0cf3, devid), .driver_info = (unsigned long) fw }
+
+static struct usb_device_id ath9k_hif_usb_ids[] = {
+ ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"),
+ ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"),
+ { },
+};
+
+MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids);
+
+static int __hif_usb_tx(struct hif_device_usb *hif_dev);
+
+static void hif_usb_regout_cb(struct urb *urb)
+{
+ struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ break;
+ }
+
+ if (cmd) {
+ ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
+ cmd->skb, 1);
+ kfree(cmd);
+ }
+
+ return;
+free:
+ kfree_skb(cmd->skb);
+ kfree(cmd);
+}
+
+static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct urb *urb;
+ struct cmd_buf *cmd;
+ int ret = 0;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL)
+ return -ENOMEM;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ cmd->skb = skb;
+ cmd->hif_dev = hif_dev;
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
+ skb->data, skb->len,
+ hif_usb_regout_cb, cmd, 1);
+
+ usb_anchor_urb(urb, &hif_dev->regout_submitted);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ kfree(cmd);
+ }
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(list)) != NULL) {
+ dev_kfree_skb_any(skb);
+ TX_STAT_INC(skb_dropped);
+ }
+}
+
+static void hif_usb_tx_cb(struct urb *urb)
+{
+ struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
+ struct hif_device_usb *hif_dev = tx_buf->hif_dev;
+ struct sk_buff *skb;
+
+ if (!hif_dev || !tx_buf)
+ return;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ /*
+ * The URB has been killed, free the SKBs
+ * and return.
+ */
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ return;
+ default:
+ break;
+ }
+
+ /* Check if TX has been stopped */
+ spin_lock(&hif_dev->tx.tx_lock);
+ if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
+ spin_unlock(&hif_dev->tx.tx_lock);
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ goto add_free;
+ }
+ spin_unlock(&hif_dev->tx.tx_lock);
+
+ /* Complete the queued SKBs. */
+ while ((skb = __skb_dequeue(&tx_buf->skb_queue)) != NULL) {
+ ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
+ skb, 1);
+ TX_STAT_INC(skb_completed);
+ }
+
+add_free:
+ /* Re-initialize the SKB queue */
+ tx_buf->len = tx_buf->offset = 0;
+ __skb_queue_head_init(&tx_buf->skb_queue);
+
+ /* Add this TX buffer to the free list */
+ spin_lock(&hif_dev->tx.tx_lock);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ hif_dev->tx.tx_buf_cnt++;
+ if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
+ __hif_usb_tx(hif_dev); /* Check for pending SKBs */
+ TX_STAT_INC(buf_completed);
+ spin_unlock(&hif_dev->tx.tx_lock);
+}
+
+/* TX lock has to be taken */
+static int __hif_usb_tx(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf = NULL;
+ struct sk_buff *nskb = NULL;
+ int ret = 0, i;
+ u16 *hdr, tx_skb_cnt = 0;
+ u8 *buf;
+
+ if (hif_dev->tx.tx_skb_cnt == 0)
+ return 0;
+
+ /* Check if a free TX buffer is available */
+ if (list_empty(&hif_dev->tx.tx_buf))
+ return 0;
+
+ tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
+ hif_dev->tx.tx_buf_cnt--;
+
+ tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);
+
+ for (i = 0; i < tx_skb_cnt; i++) {
+ nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);
+
+ /* Should never be NULL */
+ BUG_ON(!nskb);
+
+ hif_dev->tx.tx_skb_cnt--;
+
+ buf = tx_buf->buf;
+ buf += tx_buf->offset;
+ hdr = (u16 *)buf;
+ *hdr++ = nskb->len;
+ *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+ buf += 4;
+ memcpy(buf, nskb->data, nskb->len);
+ tx_buf->len = nskb->len + 4;
+
+ if (i < (tx_skb_cnt - 1))
+ tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4;
+
+ if (i == (tx_skb_cnt - 1))
+ tx_buf->len += tx_buf->offset;
+
+ __skb_queue_tail(&tx_buf->skb_queue, nskb);
+ TX_STAT_INC(skb_queued);
+ }
+
+ usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
+ usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
+ tx_buf->buf, tx_buf->len,
+ hif_usb_tx_cb, tx_buf);
+
+ ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
+ if (ret) {
+ tx_buf->len = tx_buf->offset = 0;
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ __skb_queue_head_init(&tx_buf->skb_queue);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ hif_dev->tx.tx_buf_cnt++;
+ }
+
+ if (!ret)
+ TX_STAT_INC(buf_queued);
+
+ return ret;
+}
+
+static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+
+ if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ return -ENODEV;
+ }
+
+ /* Check if the max queue count has been reached */
+ if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) {
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ return -ENOMEM;
+ }
+
+ __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
+ hif_dev->tx.tx_skb_cnt++;
+
+ /* Send normal frames immediately */
+ if (!tx_ctl || (tx_ctl && (tx_ctl->type == ATH9K_HTC_NORMAL)))
+ __hif_usb_tx(hif_dev);
+
+ /* Check if AMPDUs have to be sent immediately */
+ if (tx_ctl && (tx_ctl->type == ATH9K_HTC_AMPDU) &&
+ (hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
+ (hif_dev->tx.tx_skb_cnt < 2)) {
+ __hif_usb_tx(hif_dev);
+ }
+
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ return 0;
+}
+
+static void hif_usb_start(void *hif_handle, u8 pipe_id)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ unsigned long flags;
+
+ hif_dev->flags |= HIF_USB_START;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ hif_dev->tx.flags &= ~HIF_USB_TX_STOP;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+}
+
+static void hif_usb_stop(void *hif_handle, u8 pipe_id)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ ath9k_skb_queue_purge(hif_dev, &hif_dev->tx.tx_skb_queue);
+ hif_dev->tx.tx_skb_cnt = 0;
+ hif_dev->tx.flags |= HIF_USB_TX_STOP;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+}
+
+static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ int ret = 0;
+
+ switch (pipe_id) {
+ case USB_WLAN_TX_PIPE:
+ ret = hif_usb_send_tx(hif_dev, skb, tx_ctl);
+ break;
+ case USB_REG_OUT_PIPE:
+ ret = hif_usb_send_regout(hif_dev, skb);
+ break;
+ default:
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct ath9k_htc_hif hif_usb = {
+ .transport = ATH9K_HIF_USB,
+ .name = "ath9k_hif_usb",
+
+ .control_ul_pipe = USB_REG_OUT_PIPE,
+ .control_dl_pipe = USB_REG_IN_PIPE,
+
+ .start = hif_usb_start,
+ .stop = hif_usb_stop,
+ .send = hif_usb_send,
+};
+
+static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
+ int index = 0, i = 0, chk_idx, len = skb->len;
+ int rx_remain_len = 0, rx_pkt_len = 0;
+ u16 pkt_len, pkt_tag, pool_index = 0;
+ u8 *ptr;
+
+ spin_lock(&hif_dev->rx_lock);
+
+ rx_remain_len = hif_dev->rx_remain_len;
+ rx_pkt_len = hif_dev->rx_transfer_len;
+
+ if (rx_remain_len != 0) {
+ struct sk_buff *remain_skb = hif_dev->remain_skb;
+
+ if (remain_skb) {
+ ptr = (u8 *) remain_skb->data;
+
+ index = rx_remain_len;
+ rx_remain_len -= hif_dev->rx_pad_len;
+ ptr += rx_pkt_len;
+
+ memcpy(ptr, skb->data, rx_remain_len);
+
+ rx_pkt_len += rx_remain_len;
+ hif_dev->rx_remain_len = 0;
+ skb_put(remain_skb, rx_pkt_len);
+
+ skb_pool[pool_index++] = remain_skb;
+
+ } else {
+ index = rx_remain_len;
+ }
+ }
+
+ spin_unlock(&hif_dev->rx_lock);
+
+ while (index < len) {
+ ptr = (u8 *) skb->data;
+
+ pkt_len = ptr[index] + (ptr[index+1] << 8);
+ pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
+
+ if (pkt_tag == ATH_USB_RX_STREAM_MODE_TAG) {
+ u16 pad_len;
+
+ pad_len = 4 - (pkt_len & 0x3);
+ if (pad_len == 4)
+ pad_len = 0;
+
+ chk_idx = index;
+ index = index + 4 + pkt_len + pad_len;
+
+ if (index > MAX_RX_BUF_SIZE) {
+ spin_lock(&hif_dev->rx_lock);
+ hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+ hif_dev->rx_transfer_len =
+ MAX_RX_BUF_SIZE - chk_idx - 4;
+ hif_dev->rx_pad_len = pad_len;
+
+ nskb = __dev_alloc_skb(pkt_len + 32,
+ GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation"
+ " error\n");
+ spin_unlock(&hif_dev->rx_lock);
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]),
+ hif_dev->rx_transfer_len);
+
+ /* Record the buffer pointer */
+ hif_dev->remain_skb = nskb;
+ spin_unlock(&hif_dev->rx_lock);
+ } else {
+ nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation"
+ " error\n");
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
+ skb_put(nskb, pkt_len);
+ skb_pool[pool_index++] = nskb;
+ }
+ } else {
+ RX_STAT_INC(skb_dropped);
+ return;
+ }
+ }
+
+err:
+ for (i = 0; i < pool_index; i++) {
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
+ skb_pool[i]->len, USB_WLAN_RX_PIPE);
+ RX_STAT_INC(skb_completed);
+ }
+}
+
+static void ath9k_hif_usb_rx_cb(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+ goto free;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ goto resubmit;
+ }
+
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+ ath9k_hif_usb_rx_stream(hif_dev, skb);
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto free;
+ }
+
+ return;
+free:
+ kfree_skb(skb);
+}
+
+static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct sk_buff *nskb;
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+ goto free;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ goto resubmit;
+ }
+
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+
+ /* Process the command first */
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
+ skb->len, USB_REG_IN_PIPE);
+
+
+ nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: REG_IN memory allocation failure\n");
+ urb->context = NULL;
+ return;
+ }
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
+ nskb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, nskb, 1);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ kfree_skb(nskb);
+ urb->context = NULL;
+ }
+
+ return;
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ goto free;
+
+ return;
+free:
+ kfree_skb(skb);
+ urb->context = NULL;
+}
+
+static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
+
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_buf, list) {
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_pending, list) {
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+}
+
+static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf;
+ int i;
+
+ INIT_LIST_HEAD(&hif_dev->tx.tx_buf);
+ INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
+ spin_lock_init(&hif_dev->tx.tx_lock);
+ __skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
+
+ for (i = 0; i < MAX_TX_URB_NUM; i++) {
+ tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
+ if (!tx_buf)
+ goto err;
+
+ tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL);
+ if (!tx_buf->buf)
+ goto err;
+
+ tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tx_buf->urb)
+ goto err;
+
+ tx_buf->hif_dev = hif_dev;
+ __skb_queue_head_init(&tx_buf->skb_queue);
+
+ list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ }
+
+ hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM;
+
+ return 0;
+err:
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+ return -ENOMEM;
+}
+
+static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->rx_submitted);
+}
+
+static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct urb *urb = NULL;
+ struct sk_buff *skb = NULL;
+ int i, ret;
+
+ init_usb_anchor(&hif_dev->rx_submitted);
+ spin_lock_init(&hif_dev->rx_lock);
+
+ for (i = 0; i < MAX_RX_URB_NUM; i++) {
+
+ /* Allocate URB */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL) {
+ ret = -ENOMEM;
+ goto err_urb;
+ }
+
+ /* Allocate buffer */
+ skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err_skb;
+ }
+
+ usb_fill_bulk_urb(urb, hif_dev->udev,
+ usb_rcvbulkpipe(hif_dev->udev,
+ USB_WLAN_RX_PIPE),
+ skb->data, MAX_RX_BUF_SIZE,
+ ath9k_hif_usb_rx_cb, skb);
+
+ /* Anchor URB */
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+
+ /* Submit URB */
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto err_submit;
+ }
+
+ /*
+ * Drop reference count.
+ * This ensures that the URB is freed when killing them.
+ */
+ usb_free_urb(urb);
+ }
+
+ return 0;
+
+err_submit:
+ kfree_skb(skb);
+err_skb:
+ usb_free_urb(urb);
+err_urb:
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+ return ret;
+}
+
+static void ath9k_hif_usb_dealloc_reg_in_urb(struct hif_device_usb *hif_dev)
+{
+ if (hif_dev->reg_in_urb) {
+ usb_kill_urb(hif_dev->reg_in_urb);
+ if (hif_dev->reg_in_urb->context)
+ kfree_skb((void *)hif_dev->reg_in_urb->context);
+ usb_free_urb(hif_dev->reg_in_urb);
+ hif_dev->reg_in_urb = NULL;
+ }
+}
+
+static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
+{
+ struct sk_buff *skb;
+
+ hif_dev->reg_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (hif_dev->reg_in_urb == NULL)
+ return -ENOMEM;
+
+ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
+ if (!skb)
+ goto err;
+
+ usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
+ skb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, skb, 1);
+
+ if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
+ goto err;
+
+ return 0;
+
+err:
+ ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
+ return -ENOMEM;
+}
+
+static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
+{
+ /* Register Write */
+ init_usb_anchor(&hif_dev->regout_submitted);
+
+ /* TX */
+ if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
+ goto err;
+
+ /* RX */
+ if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
+ goto err;
+
+ /* Register Read */
+ if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
+ goto err;
+
+ return 0;
+err:
+ return -ENOMEM;
+}
+
+static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->regout_submitted);
+ ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+}
+
+static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
+{
+ int transfer, err;
+ const void *data = hif_dev->firmware->data;
+ size_t len = hif_dev->firmware->size;
+ u32 addr = AR9271_FIRMWARE;
+ u8 *buf = kzalloc(4096, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ while (len) {
+ transfer = min_t(int, len, 4096);
+ memcpy(buf, data, transfer);
+
+ err = usb_control_msg(hif_dev->udev,
+ usb_sndctrlpipe(hif_dev->udev, 0),
+ FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
+ addr >> 8, 0, buf, transfer, HZ);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ }
+
+ len -= transfer;
+ data += transfer;
+ addr += transfer;
+ }
+ kfree(buf);
+
+ /*
+ * Issue FW download complete command to firmware.
+ */
+ err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
+ FIRMWARE_DOWNLOAD_COMP,
+ 0x40 | USB_DIR_OUT,
+ AR9271_FIRMWARE_TEXT >> 8, 0, NULL, 0, HZ);
+ if (err)
+ return -EIO;
+
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
+ "ar9271.fw", (unsigned long) hif_dev->firmware->size);
+
+ return 0;
+}
+
+static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev,
+ const char *fw_name)
+{
+ int ret;
+
+ /* Request firmware */
+ ret = request_firmware(&hif_dev->firmware, fw_name, &hif_dev->udev->dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Firmware - %s not found\n", fw_name);
+ goto err_fw_req;
+ }
+
+ /* Alloc URBs */
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Unable to allocate URBs\n");
+ goto err_urb;
+ }
+
+ /* Download firmware */
+ ret = ath9k_hif_usb_download_fw(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Firmware - %s download failed\n", fw_name);
+ goto err_fw_download;
+ }
+
+ return 0;
+
+err_fw_download:
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+err_urb:
+ release_firmware(hif_dev->firmware);
+err_fw_req:
+ hif_dev->firmware = NULL;
+ return ret;
+}
+
+static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
+{
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ if (hif_dev->firmware)
+ release_firmware(hif_dev->firmware);
+}
+
+static int ath9k_hif_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct hif_device_usb *hif_dev;
+ const char *fw_name = (const char *) id->driver_info;
+ int ret = 0;
+
+ hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
+ if (!hif_dev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ usb_get_dev(udev);
+ hif_dev->udev = udev;
+ hif_dev->interface = interface;
+ hif_dev->device_id = id->idProduct;
+#ifdef CONFIG_PM
+ udev->reset_resume = 1;
+#endif
+ usb_set_intfdata(interface, hif_dev);
+
+ hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
+ &hif_dev->udev->dev);
+ if (hif_dev->htc_handle == NULL) {
+ ret = -ENOMEM;
+ goto err_htc_hw_alloc;
+ }
+
+ ret = ath9k_hif_usb_dev_init(hif_dev, fw_name);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_hif_init_usb;
+ }
+
+ ret = ath9k_htc_hw_init(hif_dev->htc_handle,
+ &hif_dev->udev->dev, hif_dev->device_id);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_htc_hw_init;
+ }
+
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: USB layer initialized\n");
+
+ return 0;
+
+err_htc_hw_init:
+ ath9k_hif_usb_dev_deinit(hif_dev);
+err_hif_init_usb:
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+err_htc_hw_alloc:
+ usb_set_intfdata(interface, NULL);
+ kfree(hif_dev);
+ usb_put_dev(udev);
+err_alloc:
+ return ret;
+}
+
+static void ath9k_hif_usb_reboot(struct usb_device *udev)
+{
+ u32 reboot_cmd = 0xffffffff;
+ void *buf;
+ int ret;
+
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ memcpy(buf, &reboot_cmd, 4);
+
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
+ buf, 4, NULL, HZ);
+ if (ret)
+ dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
+
+ kfree(buf);
+}
+
+static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+
+ if (hif_dev) {
+ ath9k_htc_hw_deinit(hif_dev->htc_handle,
+ (udev->state == USB_STATE_NOTATTACHED) ? true : false);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+ ath9k_hif_usb_dev_deinit(hif_dev);
+ usb_set_intfdata(interface, NULL);
+ }
+
+ if (hif_dev->flags & HIF_USB_START)
+ ath9k_hif_usb_reboot(udev);
+
+ kfree(hif_dev);
+ dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
+ usb_put_dev(udev);
+}
+
+#ifdef CONFIG_PM
+static int ath9k_hif_usb_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+
+ return 0;
+}
+
+static int ath9k_hif_usb_resume(struct usb_interface *interface)
+{
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+ int ret;
+
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret)
+ return ret;
+
+ if (hif_dev->firmware) {
+ ret = ath9k_hif_usb_download_fw(hif_dev);
+ if (ret)
+ goto fail_resume;
+ } else {
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ return -EIO;
+ }
+
+ mdelay(100);
+
+ ret = ath9k_htc_resume(hif_dev->htc_handle);
+
+ if (ret)
+ goto fail_resume;
+
+ return 0;
+
+fail_resume:
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+
+ return ret;
+}
+#endif
+
+static struct usb_driver ath9k_hif_usb_driver = {
+ .name = "ath9k_hif_usb",
+ .probe = ath9k_hif_usb_probe,
+ .disconnect = ath9k_hif_usb_disconnect,
+#ifdef CONFIG_PM
+ .suspend = ath9k_hif_usb_suspend,
+ .resume = ath9k_hif_usb_resume,
+ .reset_resume = ath9k_hif_usb_resume,
+#endif
+ .id_table = ath9k_hif_usb_ids,
+ .soft_unbind = 1,
+};
+
+int ath9k_hif_usb_init(void)
+{
+ return usb_register(&ath9k_hif_usb_driver);
+}
+
+void ath9k_hif_usb_exit(void)
+{
+ usb_deregister(&ath9k_hif_usb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
new file mode 100644
index 0000000..0aca49b6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_USB_H
+#define HTC_USB_H
+
+#define AR9271_FIRMWARE 0x501000
+#define AR9271_FIRMWARE_TEXT 0x903000
+
+#define FIRMWARE_DOWNLOAD 0x30
+#define FIRMWARE_DOWNLOAD_COMP 0x31
+
+#define ATH_USB_RX_STREAM_MODE_TAG 0x4e00
+#define ATH_USB_TX_STREAM_MODE_TAG 0x697e
+
+/* FIXME: Verify these numbers (with Windows) */
+#define MAX_TX_URB_NUM 8
+#define MAX_TX_BUF_NUM 1024
+#define MAX_TX_BUF_SIZE 32768
+#define MAX_TX_AGGR_NUM 20
+
+#define MAX_RX_URB_NUM 8
+#define MAX_RX_BUF_SIZE 16384
+#define MAX_PKT_NUM_IN_TRANSFER 10
+
+#define MAX_REG_OUT_URB_NUM 1
+#define MAX_REG_OUT_BUF_NUM 8
+
+#define MAX_REG_IN_BUF_SIZE 64
+
+/* USB Endpoint definition */
+#define USB_WLAN_TX_PIPE 1
+#define USB_WLAN_RX_PIPE 2
+#define USB_REG_IN_PIPE 3
+#define USB_REG_OUT_PIPE 4
+
+#define HIF_USB_MAX_RXPIPES 2
+#define HIF_USB_MAX_TXPIPES 4
+
+struct tx_buf {
+ u8 *buf;
+ u16 len;
+ u16 offset;
+ struct urb *urb;
+ struct sk_buff_head skb_queue;
+ struct hif_device_usb *hif_dev;
+ struct list_head list;
+};
+
+#define HIF_USB_TX_STOP BIT(0)
+
+struct hif_usb_tx {
+ u8 flags;
+ u8 tx_buf_cnt;
+ u16 tx_skb_cnt;
+ struct sk_buff_head tx_skb_queue;
+ struct list_head tx_buf;
+ struct list_head tx_pending;
+ spinlock_t tx_lock;
+};
+
+struct cmd_buf {
+ struct sk_buff *skb;
+ struct hif_device_usb *hif_dev;
+};
+
+#define HIF_USB_START BIT(0)
+
+struct hif_device_usb {
+ u16 device_id;
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ const struct firmware *firmware;
+ struct htc_target *htc_handle;
+ struct hif_usb_tx tx;
+ struct urb *reg_in_urb;
+ struct usb_anchor regout_submitted;
+ struct usb_anchor rx_submitted;
+ struct sk_buff *remain_skb;
+ int rx_remain_len;
+ int rx_pkt_len;
+ int rx_transfer_len;
+ int rx_pad_len;
+ spinlock_t rx_lock;
+ u8 flags; /* HIF_USB_* */
+};
+
+int ath9k_hif_usb_init(void);
+void ath9k_hif_usb_exit(void);
+
+#endif /* HTC_USB_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
new file mode 100644
index 0000000..ad556aa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -0,0 +1,464 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_H
+#define HTC_H
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/leds.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "htc_hst.h"
+#include "hif_usb.h"
+#include "wmi.h"
+
+#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
+#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
+#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
+#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+
+#define ATH_DEFAULT_BMISS_LIMIT 10
+#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+#define TSF_TO_TU(_h, _l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
+extern struct ieee80211_ops ath9k_htc_ops;
+extern int htc_modparam_nohwcrypt;
+
+enum htc_phymode {
+ HTC_MODE_AUTO = 0,
+ HTC_MODE_11A = 1,
+ HTC_MODE_11B = 2,
+ HTC_MODE_11G = 3,
+ HTC_MODE_FH = 4,
+ HTC_MODE_TURBO_A = 5,
+ HTC_MODE_TURBO_G = 6,
+ HTC_MODE_11NA = 7,
+ HTC_MODE_11NG = 8
+};
+
+enum htc_opmode {
+ HTC_M_STA = 1,
+ HTC_M_IBSS = 0,
+ HTC_M_AHDEMO = 3,
+ HTC_M_HOSTAP = 6,
+ HTC_M_MONITOR = 8,
+ HTC_M_WDS = 2
+};
+
+#define ATH9K_HTC_HDRSPACE sizeof(struct htc_frame_hdr)
+#define ATH9K_HTC_AMPDU 1
+#define ATH9K_HTC_NORMAL 2
+
+#define ATH9K_HTC_TX_CTSONLY 0x1
+#define ATH9K_HTC_TX_RTSCTS 0x2
+#define ATH9K_HTC_TX_USE_MIN_RATE 0x100
+
+struct tx_frame_hdr {
+ u8 data_type;
+ u8 node_idx;
+ u8 vif_idx;
+ u8 tidno;
+ u32 flags; /* ATH9K_HTC_TX_* */
+ u8 key_type;
+ u8 keyix;
+ u8 reserved[26];
+} __packed;
+
+struct tx_mgmt_hdr {
+ u8 node_idx;
+ u8 vif_idx;
+ u8 tidno;
+ u8 flags;
+ u8 key_type;
+ u8 keyix;
+ u16 reserved;
+} __packed;
+
+struct tx_beacon_header {
+ u8 len_changed;
+ u8 vif_index;
+ u16 rev;
+} __packed;
+
+struct ath9k_htc_target_hw {
+ u32 flags;
+ u32 flags_ext;
+ u32 ampdu_limit;
+ u8 ampdu_subframes;
+ u8 tx_chainmask;
+ u8 tx_chainmask_legacy;
+ u8 rtscts_ratecode;
+ u8 protmode;
+} __packed;
+
+struct ath9k_htc_cap_target {
+ u32 flags;
+ u32 flags_ext;
+ u32 ampdu_limit;
+ u8 ampdu_subframes;
+ u8 tx_chainmask;
+ u8 tx_chainmask_legacy;
+ u8 rtscts_ratecode;
+ u8 protmode;
+} __packed;
+
+struct ath9k_htc_target_vif {
+ u8 index;
+ u8 des_bssid[ETH_ALEN];
+ __be32 opmode;
+ u8 myaddr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u32 flags;
+ u32 flags_ext;
+ u16 ps_sta;
+ __be16 rtsthreshold;
+ u8 ath_cap;
+ u8 node;
+ s8 mcast_rate;
+} __packed;
+
+#define ATH_HTC_STA_AUTH 0x0001
+#define ATH_HTC_STA_QOS 0x0002
+#define ATH_HTC_STA_ERP 0x0004
+#define ATH_HTC_STA_HT 0x0008
+
+/* FIXME: UAPSD variables */
+struct ath9k_htc_target_sta {
+ u16 associd;
+ u16 txpower;
+ u32 ucastkey;
+ u8 macaddr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u8 sta_index;
+ u8 vif_index;
+ u8 vif_sta;
+ __be16 flags; /* ATH_HTC_STA_* */
+ u16 htcap;
+ u8 valid;
+ u16 capinfo;
+ struct ath9k_htc_target_hw *hw;
+ struct ath9k_htc_target_vif *vif;
+ u16 txseqmgmt;
+ u8 is_vif_sta;
+ u16 maxampdu;
+ u16 iv16;
+ u32 iv32;
+} __packed;
+
+struct ath9k_htc_target_aggr {
+ u8 sta_index;
+ u8 tidno;
+ u8 aggr_enable;
+ u8 padding;
+} __packed;
+
+#define ATH_HTC_RATE_MAX 30
+
+#define WLAN_RC_DS_FLAG 0x01
+#define WLAN_RC_40_FLAG 0x02
+#define WLAN_RC_SGI_FLAG 0x04
+#define WLAN_RC_HT_FLAG 0x08
+
+struct ath9k_htc_rateset {
+ u8 rs_nrates;
+ u8 rs_rates[ATH_HTC_RATE_MAX];
+};
+
+struct ath9k_htc_rate {
+ struct ath9k_htc_rateset legacy_rates;
+ struct ath9k_htc_rateset ht_rates;
+} __packed;
+
+struct ath9k_htc_target_rate {
+ u8 sta_index;
+ u8 isnew;
+ __be32 capflags;
+ struct ath9k_htc_rate rates;
+};
+
+struct ath9k_htc_target_stats {
+ __be32 tx_shortretry;
+ __be32 tx_longretry;
+ __be32 tx_xretries;
+ __be32 ht_txunaggr_xretry;
+ __be32 ht_tx_xretries;
+} __packed;
+
+struct ath9k_htc_vif {
+ u8 index;
+};
+
+#define ATH9K_HTC_MAX_STA 8
+#define ATH9K_HTC_MAX_TID 8
+
+enum tid_aggr_state {
+ AGGR_STOP = 0,
+ AGGR_PROGRESS,
+ AGGR_START,
+ AGGR_OPERATIONAL
+};
+
+struct ath9k_htc_sta {
+ u8 index;
+ enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+};
+
+struct ath9k_htc_aggr_work {
+ u16 tid;
+ u8 sta_addr[ETH_ALEN];
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ enum ieee80211_ampdu_mlme_action action;
+ struct mutex mutex;
+};
+
+#define ATH9K_HTC_RXBUF 256
+#define HTC_RX_FRAME_HEADER_SIZE 40
+
+struct ath9k_htc_rxbuf {
+ bool in_process;
+ struct sk_buff *skb;
+ struct ath_htc_rx_status rxstatus;
+ struct list_head list;
+};
+
+struct ath9k_htc_rx {
+ int last_rssi; /* FIXME: per-STA */
+ struct list_head rxbuf;
+ spinlock_t rxbuflock;
+};
+
+struct ath9k_htc_tx_ctl {
+ u8 type; /* ATH9K_HTC_* */
+};
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+
+#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
+#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
+
+struct ath_tx_stats {
+ u32 buf_queued;
+ u32 buf_completed;
+ u32 skb_queued;
+ u32 skb_completed;
+ u32 skb_dropped;
+};
+
+struct ath_rx_stats {
+ u32 skb_allocated;
+ u32 skb_completed;
+ u32 skb_dropped;
+};
+
+struct ath9k_debug {
+ struct dentry *debugfs_phy;
+ struct dentry *debugfs_tgt_stats;
+ struct dentry *debugfs_xmit;
+ struct dentry *debugfs_recv;
+ struct ath_tx_stats tx_stats;
+ struct ath_rx_stats rx_stats;
+ u32 txrate;
+};
+
+#else
+
+#define TX_STAT_INC(c) do { } while (0)
+#define RX_STAT_INC(c) do { } while (0)
+
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+#define ATH_LED_PIN_DEF 1
+#define ATH_LED_PIN_9287 8
+#define ATH_LED_PIN_9271 15
+#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
+#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
+
+enum ath_led_type {
+ ATH_LED_RADIO,
+ ATH_LED_ASSOC,
+ ATH_LED_TX,
+ ATH_LED_RX
+};
+
+struct ath_led {
+ struct ath9k_htc_priv *priv;
+ struct led_classdev led_cdev;
+ enum ath_led_type led_type;
+ struct delayed_work brightness_work;
+ char name[32];
+ bool registered;
+ int brightness;
+};
+
+struct htc_beacon_config {
+ u16 beacon_interval;
+ u16 listen_interval;
+ u16 dtim_period;
+ u16 bmiss_timeout;
+ u8 dtim_count;
+};
+
+#define OP_INVALID BIT(0)
+#define OP_SCANNING BIT(1)
+#define OP_FULL_RESET BIT(2)
+#define OP_LED_ASSOCIATED BIT(3)
+#define OP_LED_ON BIT(4)
+#define OP_PREAMBLE_SHORT BIT(5)
+#define OP_PROTECT_ENABLE BIT(6)
+#define OP_TXAGGR BIT(7)
+#define OP_ASSOCIATED BIT(8)
+#define OP_ENABLE_BEACON BIT(9)
+#define OP_LED_DEINIT BIT(10)
+#define OP_UNPLUGGED BIT(11)
+
+struct ath9k_htc_priv {
+ struct device *dev;
+ struct ieee80211_hw *hw;
+ struct ath_hw *ah;
+ struct htc_target *htc;
+ struct wmi *wmi;
+
+ enum htc_endpoint_id wmi_cmd_ep;
+ enum htc_endpoint_id beacon_ep;
+ enum htc_endpoint_id cab_ep;
+ enum htc_endpoint_id uapsd_ep;
+ enum htc_endpoint_id mgmt_ep;
+ enum htc_endpoint_id data_be_ep;
+ enum htc_endpoint_id data_bk_ep;
+ enum htc_endpoint_id data_vi_ep;
+ enum htc_endpoint_id data_vo_ep;
+
+ u16 op_flags;
+ u16 curtxpow;
+ u16 txpowlimit;
+ u16 nvifs;
+ u16 nstations;
+ u16 seq_no;
+ u32 bmiss_cnt;
+
+ spinlock_t beacon_lock;
+
+ bool tx_queues_stop;
+ spinlock_t tx_lock;
+
+ struct ieee80211_vif *vif;
+ struct htc_beacon_config cur_beacon_conf;
+ unsigned int rxfilter;
+ struct tasklet_struct wmi_tasklet;
+ struct tasklet_struct rx_tasklet;
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ath9k_htc_rx rx;
+ struct tasklet_struct tx_tasklet;
+ struct sk_buff_head tx_queue;
+ struct ath9k_htc_aggr_work aggr_work;
+ struct delayed_work ath9k_aggr_work;
+ struct delayed_work ath9k_ani_work;
+ struct work_struct ps_work;
+
+ struct mutex htc_pm_lock;
+ unsigned long ps_usecount;
+ bool ps_enabled;
+ bool ps_idle;
+
+ struct ath_led radio_led;
+ struct ath_led assoc_led;
+ struct ath_led tx_led;
+ struct ath_led rx_led;
+ struct delayed_work ath9k_led_blink_work;
+ int led_on_duration;
+ int led_off_duration;
+ int led_on_cnt;
+ int led_off_cnt;
+ int hwq_map[ATH9K_WME_AC_VO+1];
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ struct ath9k_debug debug;
+#endif
+ struct ath9k_htc_target_rate tgt_rate;
+
+ struct mutex mutex;
+};
+
+static inline void ath_read_cachesize(struct ath_common *common, int *csz)
+{
+ common->bus_ops->read_cachesize(common, csz);
+}
+
+void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif);
+void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
+
+void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id);
+void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
+ bool txok);
+void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok);
+
+void ath9k_htc_station_work(struct work_struct *work);
+void ath9k_htc_aggr_work(struct work_struct *work);
+void ath9k_ani_work(struct work_struct *work);;
+
+int ath9k_tx_init(struct ath9k_htc_priv *priv);
+void ath9k_tx_tasklet(unsigned long data);
+int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb);
+void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
+ enum ath9k_tx_queue_subtype qtype);
+int get_hw_qnum(u16 queue, int *hwq_map);
+int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
+ struct ath9k_tx_queue_info *qinfo);
+
+int ath9k_rx_init(struct ath9k_htc_priv *priv);
+void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
+void ath9k_host_rx_init(struct ath9k_htc_priv *priv);
+void ath9k_rx_tasklet(unsigned long data);
+u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
+
+void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
+void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
+void ath9k_ps_work(struct work_struct *work);
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
+void ath9k_init_leds(struct ath9k_htc_priv *priv);
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
+
+int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid);
+void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
+#ifdef CONFIG_PM
+int ath9k_htc_resume(struct htc_target *htc_handle);
+#endif
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+int ath9k_htc_debug_create_root(void);
+void ath9k_htc_debug_remove_root(void);
+int ath9k_htc_init_debug(struct ath_hw *ah);
+void ath9k_htc_exit_debug(struct ath_hw *ah);
+#else
+static inline int ath9k_htc_debug_create_root(void) { return 0; };
+static inline void ath9k_htc_debug_remove_root(void) {};
+static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; };
+static inline void ath9k_htc_exit_debug(struct ath_hw *ah) {};
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+#endif /* HTC_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
new file mode 100644
index 0000000..c10c7d0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#define FUDGE 2
+
+static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
+ struct htc_beacon_config *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_beacon_state bs;
+ enum ath9k_int imask = 0;
+ int dtimperiod, dtimcount, sleepduration;
+ int cfpperiod, cfpcount, bmiss_timeout;
+ u32 nexttbtt = 0, intval, tsftu;
+ __be32 htc_imask = 0;
+ u64 tsf;
+ int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+ int ret;
+ u8 cmd_rsp;
+
+ memset(&bs, 0, sizeof(bs));
+
+ intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
+
+ /*
+ * Setup dtim and cfp parameters according to
+ * last beacon we received (which may be none).
+ */
+ dtimperiod = bss_conf->dtim_period;
+ if (dtimperiod <= 0) /* NB: 0 if not known */
+ dtimperiod = 1;
+ dtimcount = 1;
+ if (dtimcount >= dtimperiod) /* NB: sanity check */
+ dtimcount = 0;
+ cfpperiod = 1; /* NB: no PCF support yet */
+ cfpcount = 0;
+
+ sleepduration = intval;
+ if (sleepduration <= 0)
+ sleepduration = intval;
+
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF and calculate dtim+cfp state for the result.
+ */
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
+
+ num_beacons = tsftu / intval + 1;
+ offset = tsftu % intval;
+ nexttbtt = tsftu - offset;
+ if (offset)
+ nexttbtt += intval;
+
+ /* DTIM Beacon every dtimperiod Beacon */
+ dtim_dec_count = num_beacons % dtimperiod;
+ /* CFP every cfpperiod DTIM Beacon */
+ cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
+ if (dtim_dec_count)
+ cfp_dec_count++;
+
+ dtimcount -= dtim_dec_count;
+ if (dtimcount < 0)
+ dtimcount += dtimperiod;
+
+ cfpcount -= cfp_dec_count;
+ if (cfpcount < 0)
+ cfpcount += cfpperiod;
+
+ bs.bs_intval = intval;
+ bs.bs_nexttbtt = nexttbtt;
+ bs.bs_dtimperiod = dtimperiod*intval;
+ bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
+ bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
+ bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
+ bs.bs_cfpmaxduration = 0;
+
+ /*
+ * Calculate the number of consecutive beacons to miss* before taking
+ * a BMISS interrupt. The configuration is specified in TU so we only
+ * need calculate based on the beacon interval. Note that we clamp the
+ * result to at most 15 beacons.
+ */
+ if (sleepduration > intval) {
+ bs.bs_bmissthreshold = ATH_DEFAULT_BMISS_LIMIT / 2;
+ } else {
+ bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
+ if (bs.bs_bmissthreshold > 15)
+ bs.bs_bmissthreshold = 15;
+ else if (bs.bs_bmissthreshold <= 0)
+ bs.bs_bmissthreshold = 1;
+ }
+
+ /*
+ * Calculate sleep duration. The configuration is given in ms.
+ * We ensure a multiple of the beacon period is used. Also, if the sleep
+ * duration is greater than the DTIM period then it makes senses
+ * to make it a multiple of that.
+ *
+ * XXX fixed at 100ms
+ */
+
+ bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ if (bs.bs_sleepduration > bs.bs_dtimperiod)
+ bs.bs_sleepduration = bs.bs_dtimperiod;
+
+ /* TSF out of range threshold fixed at 1 second */
+ bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
+
+ ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
+ ath_print(common, ATH_DBG_BEACON,
+ "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration,
+ bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+
+ /* Set the computed STA beacon timers */
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
+ imask |= ATH9K_INT_BMISS;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
+static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
+ struct htc_beacon_config *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ enum ath9k_int imask = 0;
+ u32 nexttbtt, intval;
+ __be32 htc_imask = 0;
+ int ret;
+ u8 cmd_rsp;
+
+ intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ nexttbtt = intval;
+ intval |= ATH9K_BEACON_ENA;
+ if (priv->op_flags & OP_ENABLE_BEACON)
+ imask |= ATH9K_INT_SWBA;
+
+ ath_print(common, ATH_DBG_BEACON,
+ "IBSS Beacon config, intval: %d, imask: 0x%x\n",
+ bss_conf->beacon_interval, imask);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
+ priv->bmiss_cnt = 0;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
+void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
+{
+ struct ath9k_htc_vif *avp = (void *)priv->vif->drv_priv;
+ struct tx_beacon_header beacon_hdr;
+ struct ath9k_htc_tx_ctl tx_ctl;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *beacon;
+ u8 *tx_fhdr;
+
+ memset(&beacon_hdr, 0, sizeof(struct tx_beacon_header));
+ memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
+
+ /* FIXME: Handle BMISS */
+ if (beacon_pending != 0) {
+ priv->bmiss_cnt++;
+ return;
+ }
+
+ spin_lock_bh(&priv->beacon_lock);
+
+ if (unlikely(priv->op_flags & OP_SCANNING)) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+
+ /* Get a new beacon */
+ beacon = ieee80211_beacon_get(priv->hw, priv->vif);
+ if (!beacon) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(beacon);
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *) beacon->data;
+ priv->seq_no += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(priv->seq_no);
+ }
+
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+ beacon_hdr.vif_index = avp->index;
+ tx_fhdr = skb_push(beacon, sizeof(beacon_hdr));
+ memcpy(tx_fhdr, (u8 *) &beacon_hdr, sizeof(beacon_hdr));
+
+ htc_send(priv->htc, beacon, priv->beacon_ep, &tx_ctl);
+
+ spin_unlock_bh(&priv->beacon_lock);
+}
+
+
+void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ cur_conf->beacon_interval = bss_conf->beacon_int;
+ if (cur_conf->beacon_interval == 0)
+ cur_conf->beacon_interval = 100;
+
+ cur_conf->dtim_period = bss_conf->dtim_period;
+ cur_conf->listen_interval = 1;
+ cur_conf->dtim_count = 1;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ath9k_htc_beacon_config_sta(priv, cur_conf);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ath9k_htc_beacon_config_adhoc(priv, cur_conf);
+ break;
+ default:
+ ath_print(common, ATH_DBG_CONFIG,
+ "Unsupported beaconing mode\n");
+ return;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
new file mode 100644
index 0000000..dc01507
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -0,0 +1,834 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Atheros driver 802.11n HTC based wireless devices");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int htc_modparam_nohwcrypt;
+module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+#define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+static struct ieee80211_channel ath9k_2ghz_channels[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */
+ RATE(60, 0x0b, 0),
+ RATE(90, 0x0f, 0),
+ RATE(120, 0x0a, 0),
+ RATE(180, 0x0e, 0),
+ RATE(240, 0x09, 0),
+ RATE(360, 0x0d, 0),
+ RATE(480, 0x08, 0),
+ RATE(540, 0x0c, 0),
+};
+
+static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
+{
+ int time_left;
+
+ if (atomic_read(&priv->htc->tgt_ready) > 0) {
+ atomic_dec(&priv->htc->tgt_ready);
+ return 0;
+ }
+
+ /* Firmware can take up to 50ms to get ready, to be safe use 1 second */
+ time_left = wait_for_completion_timeout(&priv->htc->target_wait, HZ);
+ if (!time_left) {
+ dev_err(priv->dev, "ath9k_htc: Target is unresponsive\n");
+ return -ETIMEDOUT;
+ }
+
+ atomic_dec(&priv->htc->tgt_ready);
+
+ return 0;
+}
+
+static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
+{
+ ath9k_htc_exit_debug(priv->ah);
+ ath9k_hw_deinit(priv->ah);
+ tasklet_kill(&priv->wmi_tasklet);
+ tasklet_kill(&priv->rx_tasklet);
+ tasklet_kill(&priv->tx_tasklet);
+ kfree(priv->ah);
+ priv->ah = NULL;
+}
+
+static void ath9k_deinit_device(struct ath9k_htc_priv *priv)
+{
+ struct ieee80211_hw *hw = priv->hw;
+
+ wiphy_rfkill_stop_polling(hw->wiphy);
+ ath9k_deinit_leds(priv);
+ ieee80211_unregister_hw(hw);
+ ath9k_rx_cleanup(priv);
+ ath9k_tx_cleanup(priv);
+ ath9k_deinit_priv(priv);
+}
+
+static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
+ u16 service_id,
+ void (*tx) (void *,
+ struct sk_buff *,
+ enum htc_endpoint_id,
+ bool txok),
+ enum htc_endpoint_id *ep_id)
+{
+ struct htc_service_connreq req;
+
+ memset(&req, 0, sizeof(struct htc_service_connreq));
+
+ req.service_id = service_id;
+ req.ep_callbacks.priv = priv;
+ req.ep_callbacks.rx = ath9k_htc_rxep;
+ req.ep_callbacks.tx = tx;
+
+ return htc_connect_service(priv->htc, &req, ep_id);
+}
+
+static int ath9k_init_htc_services(struct ath9k_htc_priv *priv)
+{
+ int ret;
+
+ /* WMI CMD*/
+ ret = ath9k_wmi_connect(priv->htc, priv->wmi, &priv->wmi_cmd_ep);
+ if (ret)
+ goto err;
+
+ /* Beacon */
+ ret = ath9k_htc_connect_svc(priv, WMI_BEACON_SVC, ath9k_htc_beaconep,
+ &priv->beacon_ep);
+ if (ret)
+ goto err;
+
+ /* CAB */
+ ret = ath9k_htc_connect_svc(priv, WMI_CAB_SVC, ath9k_htc_txep,
+ &priv->cab_ep);
+ if (ret)
+ goto err;
+
+
+ /* UAPSD */
+ ret = ath9k_htc_connect_svc(priv, WMI_UAPSD_SVC, ath9k_htc_txep,
+ &priv->uapsd_ep);
+ if (ret)
+ goto err;
+
+ /* MGMT */
+ ret = ath9k_htc_connect_svc(priv, WMI_MGMT_SVC, ath9k_htc_txep,
+ &priv->mgmt_ep);
+ if (ret)
+ goto err;
+
+ /* DATA BE */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BE_SVC, ath9k_htc_txep,
+ &priv->data_be_ep);
+ if (ret)
+ goto err;
+
+ /* DATA BK */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BK_SVC, ath9k_htc_txep,
+ &priv->data_bk_ep);
+ if (ret)
+ goto err;
+
+ /* DATA VI */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VI_SVC, ath9k_htc_txep,
+ &priv->data_vi_ep);
+ if (ret)
+ goto err;
+
+ /* DATA VO */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VO_SVC, ath9k_htc_txep,
+ &priv->data_vo_ep);
+ if (ret)
+ goto err;
+
+ ret = htc_init(priv->htc);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(priv->dev, "ath9k_htc: Unable to initialize HTC services\n");
+ return ret;
+}
+
+static int ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ return ath_reg_notifier_apply(wiphy, request,
+ ath9k_hw_regulatory(priv->ah));
+}
+
+static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 val, reg = cpu_to_be32(reg_offset);
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ (u8 *) &reg, sizeof(reg),
+ (u8 *) &val, sizeof(val),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER READ FAILED: (0x%04x, %d)\n",
+ reg_offset, r);
+ return -EIO;
+ }
+
+ return be32_to_cpu(val);
+}
+
+static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 buf[2] = {
+ cpu_to_be32(reg_offset),
+ cpu_to_be32(val),
+ };
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &buf, sizeof(buf),
+ (u8 *) &val, sizeof(val),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED:(0x%04x, %d)\n",
+ reg_offset, r);
+ }
+}
+
+static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ mutex_lock(&priv->wmi->multi_write_mutex);
+
+ /* Store the register/value */
+ priv->wmi->multi_write[priv->wmi->multi_write_idx].reg =
+ cpu_to_be32(reg_offset);
+ priv->wmi->multi_write[priv->wmi->multi_write_idx].val =
+ cpu_to_be32(val);
+
+ priv->wmi->multi_write_idx++;
+
+ /* If the buffer is full, send it out. */
+ if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_write_mutex);
+}
+
+static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (atomic_read(&priv->wmi->mwrite_cnt))
+ ath9k_regwrite_buffer(hw_priv, val, reg_offset);
+ else
+ ath9k_regwrite_single(hw_priv, val, reg_offset);
+}
+
+static void ath9k_enable_regwrite_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ atomic_inc(&priv->wmi->mwrite_cnt);
+}
+
+static void ath9k_disable_regwrite_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ atomic_dec(&priv->wmi->mwrite_cnt);
+}
+
+static void ath9k_regwrite_flush(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ mutex_lock(&priv->wmi->multi_write_mutex);
+
+ if (priv->wmi->multi_write_idx) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_write_mutex);
+}
+
+static const struct ath_ops ath9k_common_ops = {
+ .read = ath9k_regread,
+ .write = ath9k_regwrite,
+ .enable_write_buffer = ath9k_enable_regwrite_buffer,
+ .disable_write_buffer = ath9k_disable_regwrite_buffer,
+ .write_flush = ath9k_regwrite_flush,
+};
+
+static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
+{
+ *csz = L1_CACHE_BYTES >> 2;
+}
+
+static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+{
+ struct ath_hw *ah = (struct ath_hw *) common->ah;
+
+ (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
+
+ if (!ath9k_hw_wait(ah,
+ AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA_BUSY |
+ AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
+ AH_WAIT_TIMEOUT))
+ return false;
+
+ *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
+ AR_EEPROM_STATUS_DATA_VAL);
+
+ return true;
+}
+
+static const struct ath_bus_ops ath9k_usb_bus_ops = {
+ .ath_bus_type = ATH_USB,
+ .read_cachesize = ath_usb_read_cachesize,
+ .eeprom_read = ath_usb_eeprom_read,
+};
+
+static void setup_ht_cap(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ ht_info->mcs.rx_mask[0] = 0xff;
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static int ath9k_init_queues(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
+ priv->hwq_map[i] = -1;
+
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BE traffic\n");
+ goto err;
+ }
+
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BK traffic\n");
+ goto err;
+ }
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VI traffic\n");
+ goto err;
+ }
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VO traffic\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
+static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = priv->ah->caps.keycache_size;
+ if (common->keymax > ATH_KEYMAX) {
+ ath_print(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
+ common->keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath9k_hw_keyreset(priv->ah, (u16) i);
+
+ if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
+ }
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT,
+ 0, NULL))
+ common->splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
+ (void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH,
+ 1, 1, NULL);
+}
+
+static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
+{
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) {
+ priv->sbands[IEEE80211_BAND_2GHZ].channels =
+ ath9k_2ghz_channels;
+ priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ priv->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_channels);
+ priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+}
+
+static void ath9k_init_misc(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ common->tx_chainmask = priv->ah->caps.tx_chainmask;
+ common->rx_chainmask = priv->ah->caps.rx_chainmask;
+
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+
+ priv->op_flags |= OP_TXAGGR;
+ priv->ah->opmode = NL80211_IFTYPE_STATION;
+}
+
+static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
+{
+ struct ath_hw *ah = NULL;
+ struct ath_common *common;
+ int ret = 0, csz = 0;
+
+ priv->op_flags |= OP_INVALID;
+
+ ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->hw_version.devid = devid;
+ ah->hw_version.subsysid = 0; /* FIXME */
+ priv->ah = ah;
+
+ common = ath9k_hw_common(ah);
+ common->ops = &ath9k_common_ops;
+ common->bus_ops = &ath9k_usb_bus_ops;
+ common->ah = ah;
+ common->hw = priv->hw;
+ common->priv = priv;
+ common->debug_mask = ath9k_debug;
+
+ spin_lock_init(&priv->wmi->wmi_lock);
+ spin_lock_init(&priv->beacon_lock);
+ spin_lock_init(&priv->tx_lock);
+ mutex_init(&priv->mutex);
+ mutex_init(&priv->aggr_work.mutex);
+ mutex_init(&priv->htc_pm_lock);
+ tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
+ INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
+ INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
+ INIT_WORK(&priv->ps_work, ath9k_ps_work);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ ret = ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ goto err_hw;
+ }
+
+ ret = ath9k_htc_init_debug(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to create debugfs files\n");
+ goto err_debug;
+ }
+
+ ret = ath9k_init_queues(priv);
+ if (ret)
+ goto err_queues;
+
+ ath9k_init_crypto(priv);
+ ath9k_init_channels_rates(priv);
+ ath9k_init_misc(priv);
+
+ return 0;
+
+err_queues:
+ ath9k_htc_exit_debug(ah);
+err_debug:
+ ath9k_hw_deinit(ah);
+err_hw:
+
+ kfree(ah);
+ priv->ah = NULL;
+
+ return ret;
+}
+
+static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
+ struct ieee80211_hw *hw)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->queues = 4;
+ hw->channel_change_time = 5000;
+ hw->max_listen_interval = 10;
+ hw->vif_data_size = sizeof(struct ath9k_htc_vif);
+ hw->sta_data_size = sizeof(struct ath9k_htc_sta);
+
+ /* tx_frame_hdr is larger than tx_mgmt_hdr anyway */
+ hw->extra_tx_headroom = sizeof(struct tx_frame_hdr) +
+ sizeof(struct htc_frame_hdr) + 4;
+
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &priv->sbands[IEEE80211_BAND_2GHZ];
+
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
+ setup_ht_cap(priv,
+ &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+
+ /* Bring up device */
+ error = ath9k_init_priv(priv, devid);
+ if (error != 0)
+ goto err_init;
+
+ ah = priv->ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(priv, hw);
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, priv->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto err_regd;
+
+ reg = &common->regulatory;
+
+ /* Setup TX */
+ error = ath9k_tx_init(priv);
+ if (error != 0)
+ goto err_tx;
+
+ /* Setup RX */
+ error = ath9k_rx_init(priv);
+ if (error != 0)
+ goto err_rx;
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto err_register;
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto err_world;
+ }
+
+ ath9k_init_leds(priv);
+ ath9k_start_rfkill_poll(priv);
+
+ return 0;
+
+err_world:
+ ieee80211_unregister_hw(hw);
+err_register:
+ ath9k_rx_cleanup(priv);
+err_rx:
+ ath9k_tx_cleanup(priv);
+err_tx:
+ /* Nothing */
+err_regd:
+ ath9k_deinit_priv(priv);
+err_init:
+ return error;
+}
+
+int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid)
+{
+ struct ieee80211_hw *hw;
+ struct ath9k_htc_priv *priv;
+ int ret;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
+ if (!hw)
+ return -ENOMEM;
+
+ priv = hw->priv;
+ priv->hw = hw;
+ priv->htc = htc_handle;
+ priv->dev = dev;
+ htc_handle->drv_priv = priv;
+ SET_IEEE80211_DEV(hw, priv->dev);
+
+ ret = ath9k_htc_wait_for_target(priv);
+ if (ret)
+ goto err_free;
+
+ priv->wmi = ath9k_init_wmi(priv);
+ if (!priv->wmi) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ ret = ath9k_init_htc_services(priv);
+ if (ret)
+ goto err_init;
+
+ /* The device may have been unplugged earlier. */
+ priv->op_flags &= ~OP_UNPLUGGED;
+
+ ret = ath9k_init_device(priv, devid);
+ if (ret)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ ath9k_deinit_wmi(priv);
+err_free:
+ ieee80211_free_hw(hw);
+ return ret;
+}
+
+void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
+{
+ if (htc_handle->drv_priv) {
+
+ /* Check if the device has been yanked out. */
+ if (hotunplug)
+ htc_handle->drv_priv->op_flags |= OP_UNPLUGGED;
+
+ ath9k_deinit_device(htc_handle->drv_priv);
+ ath9k_deinit_wmi(htc_handle->drv_priv);
+ ieee80211_free_hw(htc_handle->drv_priv->hw);
+ }
+}
+
+#ifdef CONFIG_PM
+int ath9k_htc_resume(struct htc_target *htc_handle)
+{
+ int ret;
+
+ ret = ath9k_htc_wait_for_target(htc_handle->drv_priv);
+ if (ret)
+ return ret;
+
+ ret = ath9k_init_htc_services(htc_handle->drv_priv);
+ return ret;
+}
+#endif
+
+static int __init ath9k_htc_init(void)
+{
+ int error;
+
+ error = ath9k_htc_debug_create_root();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k_htc: Unable to create debugfs root: %d\n",
+ error);
+ goto err_dbg;
+ }
+
+ error = ath9k_hif_usb_init();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k_htc: No USB devices found,"
+ " driver not installed.\n");
+ error = -ENODEV;
+ goto err_usb;
+ }
+
+ return 0;
+
+err_usb:
+ ath9k_htc_debug_remove_root();
+err_dbg:
+ return error;
+}
+module_init(ath9k_htc_init);
+
+static void __exit ath9k_htc_exit(void)
+{
+ ath9k_hif_usb_exit();
+ ath9k_htc_debug_remove_root();
+ printk(KERN_INFO "ath9k_htc: Driver unloaded\n");
+}
+module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
new file mode 100644
index 0000000..9d371c1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -0,0 +1,1775 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+static struct dentry *ath9k_debugfs_root;
+#endif
+
+/*************/
+/* Utilities */
+/*************/
+
+static void ath_update_txpow(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ u32 txpow;
+
+ if (priv->curtxpow != priv->txpowlimit) {
+ ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
+ /* read back in case value is clamped */
+ ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
+ priv->curtxpow = txpow;
+ }
+}
+
+/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
+static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
+ struct ath9k_channel *ichan)
+{
+ enum htc_phymode mode;
+
+ mode = HTC_MODE_AUTO;
+
+ switch (ichan->chanmode) {
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ mode = HTC_MODE_11NG;
+ break;
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ mode = HTC_MODE_11NA;
+ break;
+ default:
+ break;
+ }
+
+ return mode;
+}
+
+static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode)
+{
+ bool ret;
+
+ mutex_lock(&priv->htc_pm_lock);
+ ret = ath9k_hw_setpower(priv->ah, mode);
+ mutex_unlock(&priv->htc_pm_lock);
+
+ return ret;
+}
+
+void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv)
+{
+ mutex_lock(&priv->htc_pm_lock);
+ if (++priv->ps_usecount != 1)
+ goto unlock;
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_AWAKE);
+
+unlock:
+ mutex_unlock(&priv->htc_pm_lock);
+}
+
+void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
+{
+ mutex_lock(&priv->htc_pm_lock);
+ if (--priv->ps_usecount != 0)
+ goto unlock;
+
+ if (priv->ps_idle)
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
+ else if (priv->ps_enabled)
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
+
+unlock:
+ mutex_unlock(&priv->htc_pm_lock);
+}
+
+void ath9k_ps_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ps_work);
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+
+ /* The chip wakes up after receiving the first beacon
+ while network sleep is enabled. For the driver to
+ be in sync with the hw, set the chip to awake and
+ only then set it to sleep.
+ */
+ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
+}
+
+static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
+ struct ieee80211_hw *hw,
+ struct ath9k_channel *hchan)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &common->hw->conf;
+ bool fastcc = true;
+ struct ieee80211_channel *channel = hw->conf.channel;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+ int ret;
+
+ if (priv->op_flags & OP_INVALID)
+ return -EIO;
+
+ if (priv->op_flags & OP_FULL_RESET)
+ fastcc = false;
+
+ /* Fiddle around with fastcc later on, for now just use full reset */
+ fastcc = false;
+ ath9k_htc_ps_wakeup(priv);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "(%u MHz) -> (%u MHz), HT: %d, HT40: %d\n",
+ priv->ah->curchan->channel,
+ channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf));
+
+ ret = ath9k_hw_reset(ah, hchan, fastcc);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to reset channel (%u Mhz) "
+ "reset status %d\n", channel->center_freq, ret);
+ goto err;
+ }
+
+ ath_update_txpow(priv);
+
+ WMI_CMD(WMI_START_RECV_CMDID);
+ if (ret)
+ goto err;
+
+ ath9k_host_rx_init(priv);
+
+ mode = ath9k_htc_get_curmode(priv, hchan);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+ if (ret)
+ goto err;
+
+ WMI_CMD(WMI_ENABLE_INTR_CMDID);
+ if (ret)
+ goto err;
+
+ htc_start(priv->htc);
+
+ priv->op_flags &= ~OP_FULL_RESET;
+err:
+ ath9k_htc_ps_restore(priv);
+ return ret;
+}
+
+static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (priv->nvifs > 0)
+ return -ENOBUFS;
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+
+ hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
+ priv->ah->opmode = NL80211_IFTYPE_MONITOR;
+ hvif.index = priv->nvifs;
+
+ WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
+ if (ret)
+ return ret;
+
+ priv->nvifs++;
+ return 0;
+}
+
+static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+ hvif.index = 0; /* Should do for now */
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ priv->nvifs--;
+
+ return ret;
+}
+
+static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_sta tsta;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+ struct ath9k_htc_sta *ista;
+ int ret;
+ u8 cmd_rsp;
+
+ if (priv->nstations >= ATH9K_HTC_MAX_STA)
+ return -ENOBUFS;
+
+ memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
+
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
+ memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
+ tsta.associd = common->curaid;
+ tsta.is_vif_sta = 0;
+ tsta.valid = true;
+ ista->index = priv->nstations;
+ } else {
+ memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
+ tsta.is_vif_sta = 1;
+ }
+
+ tsta.sta_index = priv->nstations;
+ tsta.vif_index = avp->index;
+ tsta.maxampdu = 0xffff;
+ if (sta && sta->ht_cap.ht_supported)
+ tsta.flags = cpu_to_be16(ATH_HTC_STA_HT);
+
+ WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
+ if (ret) {
+ if (sta)
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to add station entry for: %pM\n", sta->addr);
+ return ret;
+ }
+
+ if (sta)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Added a station entry for: %pM (idx: %d)\n",
+ sta->addr, tsta.sta_index);
+
+ priv->nstations++;
+ return 0;
+}
+
+static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_sta *ista;
+ int ret;
+ u8 cmd_rsp, sta_idx;
+
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+ } else {
+ sta_idx = 0;
+ }
+
+ WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
+ if (ret) {
+ if (sta)
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to remove station entry for: %pM\n",
+ sta->addr);
+ return ret;
+ }
+
+ if (sta)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Removed a station entry for: %pM (idx: %d)\n",
+ sta->addr, sta_idx);
+
+ priv->nstations--;
+ return 0;
+}
+
+static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_htc_cap_target tcap;
+ int ret;
+ u8 cmd_rsp;
+
+ memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target));
+
+ /* FIXME: Values are hardcoded */
+ tcap.flags = 0x240c40;
+ tcap.flags_ext = 0x80601000;
+ tcap.ampdu_limit = 0xffff0000;
+ tcap.ampdu_subframes = 20;
+ tcap.tx_chainmask_legacy = 1;
+ tcap.protmode = 1;
+ tcap.tx_chainmask = 1;
+
+ WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
+
+ return ret;
+}
+
+static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ struct ieee80211_supported_band *sband;
+ struct ath9k_htc_target_rate trate;
+ u32 caps = 0;
+ u8 cmd_rsp;
+ int i, j, ret;
+
+ memset(&trate, 0, sizeof(trate));
+
+ /* Only 2GHz is supported */
+ sband = priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+
+ for (i = 0, j = 0; i < sband->n_bitrates; i++) {
+ if (sta->supp_rates[sband->band] & BIT(i)) {
+ priv->tgt_rate.rates.legacy_rates.rs_rates[j]
+ = (sband->bitrates[i].bitrate * 2) / 10;
+ j++;
+ }
+ }
+ priv->tgt_rate.rates.legacy_rates.rs_nrates = j;
+
+ if (sta->ht_cap.ht_supported) {
+ for (i = 0, j = 0; i < 77; i++) {
+ if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
+ priv->tgt_rate.rates.ht_rates.rs_rates[j++] = i;
+ if (j == ATH_HTC_RATE_MAX)
+ break;
+ }
+ priv->tgt_rate.rates.ht_rates.rs_nrates = j;
+
+ caps = WLAN_RC_HT_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ caps |= WLAN_RC_40_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ caps |= WLAN_RC_SGI_FLAG;
+
+ }
+
+ priv->tgt_rate.sta_index = ista->index;
+ priv->tgt_rate.isnew = 1;
+ trate = priv->tgt_rate;
+ priv->tgt_rate.capflags = cpu_to_be32(caps);
+ trate.capflags = cpu_to_be32(caps);
+
+ WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize Rate information on target\n");
+ return ret;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Updated target STA: %pM (caps: 0x%x)\n", sta->addr, caps);
+ return 0;
+}
+
+static bool check_rc_update(struct ieee80211_hw *hw, bool *cw40)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (!conf_is_ht(conf))
+ return false;
+
+ if (!(priv->op_flags & OP_ASSOCIATED) ||
+ (priv->op_flags & OP_SCANNING))
+ return false;
+
+ if (conf_is_ht40(conf)) {
+ if (priv->ah->curchan->chanmode &
+ (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)) {
+ return false;
+ } else {
+ *cw40 = true;
+ return true;
+ }
+ } else { /* ht20 */
+ if (priv->ah->curchan->chanmode & CHANNEL_HT20)
+ return false;
+ else
+ return true;
+ }
+}
+
+static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40)
+{
+ struct ath9k_htc_target_rate trate;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret;
+ u32 caps = be32_to_cpu(priv->tgt_rate.capflags);
+ u8 cmd_rsp;
+
+ memset(&trate, 0, sizeof(trate));
+
+ trate = priv->tgt_rate;
+
+ if (is_cw40)
+ caps |= WLAN_RC_40_FLAG;
+ else
+ caps &= ~WLAN_RC_40_FLAG;
+
+ priv->tgt_rate.capflags = cpu_to_be32(caps);
+ trate.capflags = cpu_to_be32(caps);
+
+ WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to update Rate information on target\n");
+ return;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG, "Rate control updated with "
+ "caps:0x%x on target\n", priv->tgt_rate.capflags);
+}
+
+static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ u8 *sta_addr, u8 tid, bool oper)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_aggr aggr;
+ struct ieee80211_sta *sta = NULL;
+ struct ath9k_htc_sta *ista;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (tid >= ATH9K_HTC_MAX_TID)
+ return -EINVAL;
+
+ memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
+
+ rcu_read_lock();
+
+ /* Check if we are able to retrieve the station */
+ sta = ieee80211_find_sta(vif, sta_addr);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+
+ if (oper)
+ ista->tid_state[tid] = AGGR_START;
+ else
+ ista->tid_state[tid] = AGGR_STOP;
+
+ aggr.sta_index = ista->index;
+
+ rcu_read_unlock();
+
+ aggr.tidno = tid;
+ aggr.aggr_enable = oper;
+
+ WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
+ if (ret)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Unable to %s TX aggregation for (%pM, %d)\n",
+ (oper) ? "start" : "stop", sta->addr, tid);
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "%s aggregation for (%pM, %d)\n",
+ (oper) ? "Starting" : "Stopping", sta->addr, tid);
+
+ return ret;
+}
+
+void ath9k_htc_aggr_work(struct work_struct *work)
+{
+ int ret = 0;
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ath9k_aggr_work.work);
+ struct ath9k_htc_aggr_work *wk = &priv->aggr_work;
+
+ mutex_lock(&wk->mutex);
+
+ switch (wk->action) {
+ case IEEE80211_AMPDU_TX_START:
+ ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
+ wk->tid, true);
+ if (!ret)
+ ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr,
+ wk->tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
+ wk->tid, false);
+ ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid);
+ break;
+ default:
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Unknown AMPDU action\n");
+ }
+
+ mutex_unlock(&wk->mutex);
+}
+
+/*********/
+/* DEBUG */
+/*********/
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+
+static int ath9k_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ struct ath9k_htc_target_stats cmd_rsp;
+ char buf[512];
+ unsigned int len = 0;
+ int ret = 0;
+
+ memset(&cmd_rsp, 0, sizeof(cmd_rsp));
+
+ WMI_CMD(WMI_TGT_STATS_CMDID);
+ if (ret)
+ return -EINVAL;
+
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Short Retries",
+ be32_to_cpu(cmd_rsp.tx_shortretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Long Retries",
+ be32_to_cpu(cmd_rsp.tx_longretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Xretries",
+ be32_to_cpu(cmd_rsp.tx_xretries));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Unaggr. Xretries",
+ be32_to_cpu(cmd_rsp.ht_txunaggr_xretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Xretries (HT)",
+ be32_to_cpu(cmd_rsp.ht_tx_xretries));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Rate", priv->debug.txrate);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tgt_stats = {
+ .read = read_file_tgt_stats,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers queued",
+ priv->debug.tx_stats.buf_queued);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers completed",
+ priv->debug.tx_stats.buf_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs queued",
+ priv->debug.tx_stats.skb_queued);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.tx_stats.skb_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs dropped",
+ priv->debug.tx_stats.skb_dropped);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_xmit = {
+ .read = read_file_xmit,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs allocated",
+ priv->debug.rx_stats.skb_allocated);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.rx_stats.skb_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs Dropped",
+ priv->debug.rx_stats.skb_dropped);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+int ath9k_htc_init_debug(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (!ath9k_debugfs_root)
+ return -ENOENT;
+
+ priv->debug.debugfs_phy = debugfs_create_dir(wiphy_name(priv->hw->wiphy),
+ ath9k_debugfs_root);
+ if (!priv->debug.debugfs_phy)
+ goto err;
+
+ priv->debug.debugfs_tgt_stats = debugfs_create_file("tgt_stats", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_tgt_stats);
+ if (!priv->debug.debugfs_tgt_stats)
+ goto err;
+
+
+ priv->debug.debugfs_xmit = debugfs_create_file("xmit", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_xmit);
+ if (!priv->debug.debugfs_xmit)
+ goto err;
+
+ priv->debug.debugfs_recv = debugfs_create_file("recv", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_recv);
+ if (!priv->debug.debugfs_recv)
+ goto err;
+
+ return 0;
+
+err:
+ ath9k_htc_exit_debug(ah);
+ return -ENOMEM;
+}
+
+void ath9k_htc_exit_debug(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ debugfs_remove(priv->debug.debugfs_recv);
+ debugfs_remove(priv->debug.debugfs_xmit);
+ debugfs_remove(priv->debug.debugfs_tgt_stats);
+ debugfs_remove(priv->debug.debugfs_phy);
+}
+
+int ath9k_htc_debug_create_root(void)
+{
+ ath9k_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!ath9k_debugfs_root)
+ return -ENOENT;
+
+ return 0;
+}
+
+void ath9k_htc_debug_remove_root(void)
+{
+ debugfs_remove(ath9k_debugfs_root);
+ ath9k_debugfs_root = NULL;
+}
+
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+/*******/
+/* ANI */
+/*******/
+
+static void ath_start_ani(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ unsigned long timestamp = jiffies_to_msecs(jiffies);
+
+ common->ani.longcal_timer = timestamp;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.checkani_timer = timestamp;
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
+}
+
+void ath9k_ani_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ath9k_ani_work.work);
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool longcal = false;
+ bool shortcal = false;
+ bool aniflag = false;
+ unsigned int timestamp = jiffies_to_msecs(jiffies);
+ u32 cal_interval, short_cal_interval;
+
+ short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
+
+ /* Only calibrate if awake */
+ if (ah->power_mode != ATH9K_PM_AWAKE)
+ goto set_timer;
+
+ /* Long calibration runs independently of short calibration. */
+ if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
+ longcal = true;
+ ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+ common->ani.longcal_timer = timestamp;
+ }
+
+ /* Short calibration applies only while caldone is false */
+ if (!common->ani.caldone) {
+ if ((timestamp - common->ani.shortcal_timer) >=
+ short_cal_interval) {
+ shortcal = true;
+ ath_print(common, ATH_DBG_ANI,
+ "shortcal @%lu\n", jiffies);
+ common->ani.shortcal_timer = timestamp;
+ common->ani.resetcal_timer = timestamp;
+ }
+ } else {
+ if ((timestamp - common->ani.resetcal_timer) >=
+ ATH_RESTART_CALINTERVAL) {
+ common->ani.caldone = ath9k_hw_reset_calvalid(ah);
+ if (common->ani.caldone)
+ common->ani.resetcal_timer = timestamp;
+ }
+ }
+
+ /* Verify whether we must check ANI */
+ if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ aniflag = true;
+ common->ani.checkani_timer = timestamp;
+ }
+
+ /* Skip all processing if there's nothing to do. */
+ if (longcal || shortcal || aniflag) {
+
+ ath9k_htc_ps_wakeup(priv);
+
+ /* Call ANI routine if necessary */
+ if (aniflag)
+ ath9k_hw_ani_monitor(ah, ah->curchan);
+
+ /* Perform calibration if necessary */
+ if (longcal || shortcal) {
+ common->ani.caldone =
+ ath9k_hw_calibrate(ah, ah->curchan,
+ common->rx_chainmask,
+ longcal);
+
+ if (longcal)
+ common->ani.noise_floor =
+ ath9k_hw_getchan_noise(ah, ah->curchan);
+
+ ath_print(common, ATH_DBG_ANI,
+ " calibrate chan %u/%x nf: %d\n",
+ ah->curchan->channel,
+ ah->curchan->channelFlags,
+ common->ani.noise_floor);
+ }
+
+ ath9k_htc_ps_restore(priv);
+ }
+
+set_timer:
+ /*
+ * Set timer interval based on previous results.
+ * The interval must be the shortest necessary to satisfy ANI,
+ * short calibration and long calibration.
+ */
+ cal_interval = ATH_LONG_CALINTERVAL;
+ if (priv->ah->config.enable_ani)
+ cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
+ if (!common->ani.caldone)
+ cal_interval = min(cal_interval, (u32)short_cal_interval);
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ msecs_to_jiffies(cal_interval));
+}
+
+/*******/
+/* LED */
+/*******/
+
+static void ath9k_led_blink_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
+ ath9k_led_blink_work.work);
+
+ if (!(priv->op_flags & OP_LED_ASSOCIATED))
+ return;
+
+ if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
+ else
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (priv->op_flags & OP_LED_ON) ? 1 : 0);
+
+ ieee80211_queue_delayed_work(priv->hw,
+ &priv->ath9k_led_blink_work,
+ (priv->op_flags & OP_LED_ON) ?
+ msecs_to_jiffies(priv->led_off_duration) :
+ msecs_to_jiffies(priv->led_on_duration));
+
+ priv->led_on_duration = priv->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ priv->led_off_duration = priv->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
+ priv->led_on_cnt = priv->led_off_cnt = 0;
+
+ if (priv->op_flags & OP_LED_ON)
+ priv->op_flags &= ~OP_LED_ON;
+ else
+ priv->op_flags |= OP_LED_ON;
+}
+
+static void ath9k_led_brightness_work(struct work_struct *work)
+{
+ struct ath_led *led = container_of(work, struct ath_led,
+ brightness_work.work);
+ struct ath9k_htc_priv *priv = led->priv;
+
+ switch (led->brightness) {
+ case LED_OFF:
+ if (led->led_type == ATH_LED_ASSOC ||
+ led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (led->led_type == ATH_LED_RADIO));
+ priv->op_flags &= ~OP_LED_ASSOCIATED;
+ if (led->led_type == ATH_LED_RADIO)
+ priv->op_flags &= ~OP_LED_ON;
+ } else {
+ priv->led_off_cnt++;
+ }
+ break;
+ case LED_FULL:
+ if (led->led_type == ATH_LED_ASSOC) {
+ priv->op_flags |= OP_LED_ASSOCIATED;
+ ieee80211_queue_delayed_work(priv->hw,
+ &priv->ath9k_led_blink_work, 0);
+ } else if (led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
+ priv->op_flags |= OP_LED_ON;
+ } else {
+ priv->led_on_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath9k_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+ struct ath9k_htc_priv *priv = led->priv;
+
+ led->brightness = brightness;
+ if (!(priv->op_flags & OP_LED_DEINIT))
+ ieee80211_queue_delayed_work(priv->hw,
+ &led->brightness_work, 0);
+}
+
+static void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->radio_led.brightness_work);
+ cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
+ cancel_delayed_work_sync(&priv->tx_led.brightness_work);
+ cancel_delayed_work_sync(&priv->rx_led.brightness_work);
+}
+
+static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
+ char *trigger)
+{
+ int ret;
+
+ led->priv = priv;
+ led->led_cdev.name = led->name;
+ led->led_cdev.default_trigger = trigger;
+ led->led_cdev.brightness_set = ath9k_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev);
+ if (ret)
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Failed to register led:%s", led->name);
+ else
+ led->registered = 1;
+
+ INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
+
+ return ret;
+}
+
+static void ath9k_unregister_led(struct ath_led *led)
+{
+ if (led->registered) {
+ led_classdev_unregister(&led->led_cdev);
+ led->registered = 0;
+ }
+}
+
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
+{
+ priv->op_flags |= OP_LED_DEINIT;
+ ath9k_unregister_led(&priv->assoc_led);
+ priv->op_flags &= ~OP_LED_ASSOCIATED;
+ ath9k_unregister_led(&priv->tx_led);
+ ath9k_unregister_led(&priv->rx_led);
+ ath9k_unregister_led(&priv->radio_led);
+}
+
+void ath9k_init_leds(struct ath9k_htc_priv *priv)
+{
+ char *trigger;
+ int ret;
+
+ if (AR_SREV_9287(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9271(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9271;
+ else
+ priv->ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+
+ INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work);
+
+ trigger = ieee80211_get_radio_led_name(priv->hw);
+ snprintf(priv->radio_led.name, sizeof(priv->radio_led.name),
+ "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->radio_led, trigger);
+ priv->radio_led.led_type = ATH_LED_RADIO;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_assoc_led_name(priv->hw);
+ snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
+ "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
+ priv->assoc_led.led_type = ATH_LED_ASSOC;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_tx_led_name(priv->hw);
+ snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
+ "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->tx_led, trigger);
+ priv->tx_led.led_type = ATH_LED_TX;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_rx_led_name(priv->hw);
+ snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
+ "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->rx_led, trigger);
+ priv->rx_led.led_type = ATH_LED_RX;
+ if (ret)
+ goto fail;
+
+ priv->op_flags &= ~OP_LED_DEINIT;
+
+ return;
+
+fail:
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_deinit_leds(priv);
+}
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
+{
+ return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
+ priv->ah->rfkill_polarity;
+}
+
+static void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ bool blocked = !!ath_is_rfkill_set(priv);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
+{
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(priv->hw->wiphy);
+}
+
+/**********************/
+/* mac80211 Callbacks */
+/**********************/
+
+static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ath9k_htc_priv *priv = hw->priv;
+ int padpos, padsize, ret;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ /* Add the padding after the header if this is not already done */
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize)
+ return -1;
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ ret = ath9k_htc_tx_start(priv, skb);
+ if (ret != 0) {
+ if (ret == -ENOMEM) {
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Stopping TX queues\n");
+ ieee80211_stop_queues(hw);
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_queues_stop = true;
+ spin_unlock_bh(&priv->tx_lock);
+ } else {
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Tx failed");
+ }
+ goto fail_tx;
+ }
+
+ return 0;
+
+fail_tx:
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath9k_channel *init_channel;
+ int ret = 0;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Starting driver with initial channel: %d MHz\n",
+ curchan->center_freq);
+
+ /* setup initial channel */
+ init_channel = ath9k_cmn_get_curchannel(hw, ah);
+
+ /* Reset SERDES registers */
+ ath9k_hw_configpcipowersave(ah, 0, 0);
+
+ ath9k_hw_htc_resetinit(ah);
+ ret = ath9k_hw_reset(ah, init_channel, false);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to reset hardware; reset status %d "
+ "(freq %u MHz)\n", ret, curchan->center_freq);
+ return ret;
+ }
+
+ ath_update_txpow(priv);
+
+ mode = ath9k_htc_get_curmode(priv, init_channel);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+ WMI_CMD(WMI_ATH_INIT_CMDID);
+ WMI_CMD(WMI_START_RECV_CMDID);
+
+ ath9k_host_rx_init(priv);
+
+ priv->op_flags &= ~OP_INVALID;
+ htc_start(priv->htc);
+
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_queues_stop = false;
+ spin_unlock_bh(&priv->tx_lock);
+
+ if (led) {
+ /* Enable LED */
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+ }
+
+ ieee80211_wake_queues(hw);
+
+ return ret;
+}
+
+static int ath9k_htc_start(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&priv->mutex);
+ ret = ath9k_htc_radio_enable(hw, false);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (priv->op_flags & OP_INVALID) {
+ ath_print(common, ATH_DBG_ANY, "Device not present\n");
+ return;
+ }
+
+ if (led) {
+ /* Disable LED */
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+ }
+
+ /* Cancel all the running timers/work .. */
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ cancel_delayed_work_sync(&priv->ath9k_aggr_work);
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_led_stop_brightness(priv);
+
+ ath9k_htc_ps_wakeup(priv);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+ ath9k_hw_phy_disable(ah);
+ ath9k_hw_disable(ah);
+ ath9k_hw_configpcipowersave(ah, 1, 1);
+ ath9k_htc_ps_restore(priv);
+ ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
+
+ skb_queue_purge(&priv->tx_queue);
+
+ /* Remove monitor interface here */
+ if (ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (ath9k_htc_remove_monitor_interface(priv))
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to remove monitor interface\n");
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "Monitor interface removed\n");
+ }
+
+ priv->op_flags |= OP_INVALID;
+
+ ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
+}
+
+static void ath9k_htc_stop(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_radio_disable(hw, false);
+ mutex_unlock(&priv->mutex);
+}
+
+
+static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ mutex_lock(&priv->mutex);
+
+ /* Only one interface for now */
+ if (priv->nvifs > 0) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ ath9k_htc_ps_wakeup(priv);
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ hvif.opmode = cpu_to_be32(HTC_M_STA);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ hvif.opmode = cpu_to_be32(HTC_M_IBSS);
+ break;
+ default:
+ ath_print(common, ATH_DBG_FATAL,
+ "Interface type %d not yet supported\n", vif->type);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d\n", vif->type);
+
+ priv->ah->opmode = vif->type;
+
+ /* Index starts from zero on the target */
+ avp->index = hvif.index = priv->nvifs;
+ hvif.rtsthreshold = cpu_to_be16(2304);
+ WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
+ if (ret)
+ goto out;
+
+ priv->nvifs++;
+
+ /*
+ * We need a node in target to tx mgmt frames
+ * before association.
+ */
+ ret = ath9k_htc_add_station(priv, vif, NULL);
+ if (ret)
+ goto out;
+
+ ret = ath9k_htc_update_cap_target(priv);
+ if (ret)
+ ath_print(common, ATH_DBG_CONFIG, "Failed to update"
+ " capability in target \n");
+
+ priv->vif = vif;
+out:
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
+static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
+
+ mutex_lock(&priv->mutex);
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
+ hvif.index = avp->index;
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ priv->nvifs--;
+
+ ath9k_htc_remove_station(priv, vif, NULL);
+ priv->vif = NULL;
+
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_conf *conf = &hw->conf;
+
+ mutex_lock(&priv->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ bool enable_radio = false;
+ bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+
+ if (!idle && priv->ps_idle)
+ enable_radio = true;
+
+ priv->ps_idle = idle;
+
+ if (enable_radio) {
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ ath9k_htc_radio_enable(hw, true);
+ ath_print(common, ATH_DBG_CONFIG,
+ "not-idle: enabling radio\n");
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ int pos = curchan->hw_value;
+ bool is_cw40 = false;
+
+ ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
+ curchan->center_freq);
+
+ if (check_rc_update(hw, &is_cw40))
+ ath9k_htc_rc_update(priv, is_cw40);
+
+ ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
+
+ if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to set channel\n");
+ mutex_unlock(&priv->mutex);
+ return -EINVAL;
+ }
+
+ }
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (conf->flags & IEEE80211_CONF_PS) {
+ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
+ priv->ps_enabled = true;
+ } else {
+ priv->ps_enabled = false;
+ cancel_work_sync(&priv->ps_work);
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ if (ath9k_htc_add_monitor_interface(priv))
+ ath_print(common, ATH_DBG_FATAL,
+ "Failed to set monitor mode\n");
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ }
+ }
+
+ if (priv->ps_idle) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "idle: disabling radio\n");
+ ath9k_htc_radio_disable(hw, true);
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
+#define SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_FCSFAIL)
+
+static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ u32 rfilt;
+
+ mutex_lock(&priv->mutex);
+
+ ath9k_htc_ps_wakeup(priv);
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+
+ priv->rxfilter = *total_flags;
+ rfilt = ath9k_htc_calcrxfilter(priv);
+ ath9k_hw_setrxfilter(priv->ah, rfilt);
+
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
+ "Set HW RX filter: 0x%x\n", rfilt);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int ret;
+
+ mutex_lock(&priv->mutex);
+
+ switch (cmd) {
+ case STA_NOTIFY_ADD:
+ ret = ath9k_htc_add_station(priv, vif, sta);
+ if (!ret)
+ ath9k_htc_init_rate(priv, vif, sta);
+ break;
+ case STA_NOTIFY_REMOVE:
+ ath9k_htc_remove_station(priv, vif, sta);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_tx_queue_info qi;
+ int ret = 0, qnum;
+
+ if (queue >= WME_NUM_AC)
+ return 0;
+
+ mutex_lock(&priv->mutex);
+
+ memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
+
+ qi.tqi_aifs = params->aifs;
+ qi.tqi_cwmin = params->cw_min;
+ qi.tqi_cwmax = params->cw_max;
+ qi.tqi_burstTime = params->txop;
+
+ qnum = get_hw_qnum(queue, priv->hwq_map);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Configure tx [queue/hwq] [%d/%d], "
+ "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ queue, qnum, params->aifs, params->cw_min,
+ params->cw_max, params->txop);
+
+ ret = ath_htc_txq_update(priv, qnum, &qi);
+ if (ret)
+ ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
+
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static int ath9k_htc_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret = 0;
+
+ if (htc_modparam_nohwcrypt)
+ return -ENOSPC;
+
+ mutex_lock(&priv->mutex);
+ ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath9k_htc_ps_wakeup(priv);
+
+ switch (cmd) {
+ case SET_KEY:
+ ret = ath9k_cmn_key_config(common, vif, sta, key);
+ if (ret >= 0) {
+ key->hw_key_idx = ret;
+ /* push IV and Michael MIC generation to stack */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ if (key->alg == ALG_TKIP)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ if (priv->ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+ ret = 0;
+ }
+ break;
+ case DISABLE_KEY:
+ ath9k_cmn_key_delete(common, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ common->curaid = bss_conf->assoc ?
+ bss_conf->aid : 0;
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+ bss_conf->assoc);
+
+ if (bss_conf->assoc) {
+ priv->op_flags |= OP_ASSOCIATED;
+ ath_start_ani(priv);
+ } else {
+ priv->op_flags &= ~OP_ASSOCIATED;
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ /* Set BSSID */
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ ath9k_hw_write_associd(ah);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "BSSID: %pM aid: 0x%x\n",
+ common->curbssid, common->curaid);
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_INT) ||
+ (changed & BSS_CHANGED_BEACON) ||
+ ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ bss_conf->enable_beacon)) {
+ priv->op_flags |= OP_ENABLE_BEACON;
+ ath9k_htc_beacon_config(priv, vif);
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ !bss_conf->enable_beacon) {
+ priv->op_flags &= ~OP_ENABLE_BEACON;
+ ath9k_htc_beacon_config(priv, vif);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
+ bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ priv->op_flags |= OP_PREAMBLE_SHORT;
+ else
+ priv->op_flags &= ~OP_PREAMBLE_SHORT;
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
+ bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot &&
+ hw->conf.channel->band != IEEE80211_BAND_5GHZ)
+ priv->op_flags |= OP_PROTECT_ENABLE;
+ else
+ priv->op_flags &= ~OP_PROTECT_ENABLE;
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ ah->slottime = 9;
+ else
+ ah->slottime = 20;
+
+ ath9k_hw_init_global_settings(ah);
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ u64 tsf;
+
+ mutex_lock(&priv->mutex);
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ mutex_unlock(&priv->mutex);
+
+ return tsf;
+}
+
+static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_hw_settsf64(priv->ah, tsf);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ ath9k_htc_ps_wakeup(priv);
+ mutex_lock(&priv->mutex);
+ ath9k_hw_reset_tsf(priv->ah);
+ mutex_unlock(&priv->mutex);
+ ath9k_htc_ps_restore(priv);
+}
+
+static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_aggr_work *work = &priv->aggr_work;
+ struct ath9k_htc_sta *ista;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP:
+ if (!(priv->op_flags & OP_TXAGGR))
+ return -ENOTSUPP;
+ memcpy(work->sta_addr, sta->addr, ETH_ALEN);
+ work->hw = hw;
+ work->vif = vif;
+ work->action = action;
+ work->tid = tid;
+ ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ ista->tid_state[tid] = AGGR_OPERATIONAL;
+ break;
+ default:
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Unknown AMPDU action\n");
+ }
+
+ return 0;
+}
+
+static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ spin_lock_bh(&priv->beacon_lock);
+ priv->op_flags |= OP_SCANNING;
+ spin_unlock_bh(&priv->beacon_lock);
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ ath9k_htc_ps_wakeup(priv);
+ mutex_lock(&priv->mutex);
+ spin_lock_bh(&priv->beacon_lock);
+ priv->op_flags &= ~OP_SCANNING;
+ spin_unlock_bh(&priv->beacon_lock);
+ priv->op_flags |= OP_FULL_RESET;
+ if (priv->op_flags & OP_ASSOCIATED)
+ ath9k_htc_beacon_config(priv, priv->vif);
+ ath_start_ani(priv);
+ mutex_unlock(&priv->mutex);
+ ath9k_htc_ps_restore(priv);
+}
+
+static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return 0;
+}
+
+static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
+ u8 coverage_class)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ priv->ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(priv->ah);
+ mutex_unlock(&priv->mutex);
+}
+
+struct ieee80211_ops ath9k_htc_ops = {
+ .tx = ath9k_htc_tx,
+ .start = ath9k_htc_start,
+ .stop = ath9k_htc_stop,
+ .add_interface = ath9k_htc_add_interface,
+ .remove_interface = ath9k_htc_remove_interface,
+ .config = ath9k_htc_config,
+ .configure_filter = ath9k_htc_configure_filter,
+ .sta_notify = ath9k_htc_sta_notify,
+ .conf_tx = ath9k_htc_conf_tx,
+ .bss_info_changed = ath9k_htc_bss_info_changed,
+ .set_key = ath9k_htc_set_key,
+ .get_tsf = ath9k_htc_get_tsf,
+ .set_tsf = ath9k_htc_set_tsf,
+ .reset_tsf = ath9k_htc_reset_tsf,
+ .ampdu_action = ath9k_htc_ampdu_action,
+ .sw_scan_start = ath9k_htc_sw_scan_start,
+ .sw_scan_complete = ath9k_htc_sw_scan_complete,
+ .set_rts_threshold = ath9k_htc_set_rts_threshold,
+ .rfkill_poll = ath9k_htc_rfkill_poll_state,
+ .set_coverage_class = ath9k_htc_set_coverage_class,
+};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
new file mode 100644
index 0000000..2571b44
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -0,0 +1,707 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+/******/
+/* TX */
+/******/
+
+int get_hw_qnum(u16 queue, int *hwq_map)
+{
+ switch (queue) {
+ case 0:
+ return hwq_map[ATH9K_WME_AC_VO];
+ case 1:
+ return hwq_map[ATH9K_WME_AC_VI];
+ case 2:
+ return hwq_map[ATH9K_WME_AC_BE];
+ case 3:
+ return hwq_map[ATH9K_WME_AC_BK];
+ default:
+ return hwq_map[ATH9K_WME_AC_BE];
+ }
+}
+
+int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
+ struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_hw *ah = priv->ah;
+ int error = 0;
+ struct ath9k_tx_queue_info qi;
+
+ ath9k_hw_get_txq_props(ah, qnum, &qi);
+
+ qi.tqi_aifs = qinfo->tqi_aifs;
+ qi.tqi_cwmin = qinfo->tqi_cwmin / 2; /* XXX */
+ qi.tqi_cwmax = qinfo->tqi_cwmax;
+ qi.tqi_burstTime = qinfo->tqi_burstTime;
+ qi.tqi_readyTime = qinfo->tqi_readyTime;
+
+ if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Unable to update hardware queue %u!\n", qnum);
+ error = -EIO;
+ } else {
+ ath9k_hw_resettxqueue(ah, qnum);
+ }
+
+ return error;
+}
+
+int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = tx_info->control.sta;
+ struct ath9k_htc_sta *ista;
+ struct ath9k_htc_vif *avp;
+ struct ath9k_htc_tx_ctl tx_ctl;
+ enum htc_endpoint_id epid;
+ u16 qnum, hw_qnum;
+ __le16 fc;
+ u8 *tx_fhdr;
+ u8 sta_idx;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+
+ avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+ } else {
+ sta_idx = 0;
+ }
+
+ memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
+
+ if (ieee80211_is_data(fc)) {
+ struct tx_frame_hdr tx_hdr;
+ u8 *qc;
+
+ memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
+
+ tx_hdr.node_idx = sta_idx;
+ tx_hdr.vif_idx = avp->index;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tx_ctl.type = ATH9K_HTC_AMPDU;
+ tx_hdr.data_type = ATH9K_HTC_AMPDU;
+ } else {
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+ tx_hdr.data_type = ATH9K_HTC_NORMAL;
+ }
+
+ if (ieee80211_is_data(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ }
+
+ /* Check for RTS protection */
+ if (priv->hw->wiphy->rts_threshold != (u32) -1)
+ if (skb->len > priv->hw->wiphy->rts_threshold)
+ tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;
+
+ /* CTS-to-self */
+ if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
+ (priv->op_flags & OP_PROTECT_ENABLE))
+ tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;
+
+ tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
+ if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
+ tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
+ else
+ tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
+
+ tx_fhdr = skb_push(skb, sizeof(tx_hdr));
+ memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
+
+ qnum = skb_get_queue_mapping(skb);
+ hw_qnum = get_hw_qnum(qnum, priv->hwq_map);
+
+ switch (hw_qnum) {
+ case 0:
+ epid = priv->data_be_ep;
+ break;
+ case 2:
+ epid = priv->data_vi_ep;
+ break;
+ case 3:
+ epid = priv->data_vo_ep;
+ break;
+ case 1:
+ default:
+ epid = priv->data_bk_ep;
+ break;
+ }
+ } else {
+ struct tx_mgmt_hdr mgmt_hdr;
+
+ memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));
+
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+
+ mgmt_hdr.node_idx = sta_idx;
+ mgmt_hdr.vif_idx = avp->index;
+ mgmt_hdr.tidno = 0;
+ mgmt_hdr.flags = 0;
+
+ mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
+ if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
+ mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
+ else
+ mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
+
+ tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
+ memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
+ epid = priv->mgmt_ep;
+ }
+
+ return htc_send(priv->htc, skb, epid, &tx_ctl);
+}
+
+void ath9k_tx_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb = NULL;
+ __le16 fc;
+
+ while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) {
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ memset(&tx_info->status, 0, sizeof(tx_info->status));
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ if (!sta) {
+ rcu_read_unlock();
+ ieee80211_tx_status(priv->hw, skb);
+ continue;
+ }
+
+ /* Check if we need to start aggregation */
+
+ if (sta && conf_is_ht(&priv->hw->conf) &&
+ (priv->op_flags & OP_TXAGGR)
+ && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc, tid;
+ struct ath9k_htc_sta *ista;
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ ista = (struct ath9k_htc_sta *)sta->drv_priv;
+
+ if ((tid < ATH9K_HTC_MAX_TID) &&
+ ista->tid_state[tid] == AGGR_STOP) {
+ ieee80211_start_tx_ba_session(sta, tid);
+ ista->tid_state[tid] = AGGR_PROGRESS;
+ }
+ }
+ }
+
+ rcu_read_unlock();
+
+ /* Send status to mac80211 */
+ ieee80211_tx_status(priv->hw, skb);
+ }
+
+ /* Wake TX queues if needed */
+ spin_lock_bh(&priv->tx_lock);
+ if (priv->tx_queues_stop) {
+ priv->tx_queues_stop = false;
+ spin_unlock_bh(&priv->tx_lock);
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Waking up TX queues\n");
+ ieee80211_wake_queues(priv->hw);
+ return;
+ }
+ spin_unlock_bh(&priv->tx_lock);
+}
+
+void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_tx_info *tx_info;
+
+ if (!skb)
+ return;
+
+ if (ep_id == priv->mgmt_ep) {
+ skb_pull(skb, sizeof(struct tx_mgmt_hdr));
+ } else if ((ep_id == priv->data_bk_ep) ||
+ (ep_id == priv->data_be_ep) ||
+ (ep_id == priv->data_vi_ep) ||
+ (ep_id == priv->data_vo_ep)) {
+ skb_pull(skb, sizeof(struct tx_frame_hdr));
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unsupported TX EPID: %d\n", ep_id);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ if (txok)
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+ skb_queue_tail(&priv->tx_queue, skb);
+ tasklet_schedule(&priv->tx_tasklet);
+}
+
+int ath9k_tx_init(struct ath9k_htc_priv *priv)
+{
+ skb_queue_head_init(&priv->tx_queue);
+ return 0;
+}
+
+void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
+{
+
+}
+
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
+ enum ath9k_tx_queue_subtype subtype)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info qi;
+ int qnum;
+
+ memset(&qi, 0, sizeof(qi));
+
+ qi.tqi_subtype = subtype;
+ qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_physCompBuf = 0;
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE;
+
+ qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
+ if (qnum == -1)
+ return false;
+
+ if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "qnum %u out of range, max %u!\n",
+ qnum, (unsigned int)ARRAY_SIZE(priv->hwq_map));
+ ath9k_hw_releasetxqueue(ah, qnum);
+ return false;
+ }
+
+ priv->hwq_map[subtype] = qnum;
+ return true;
+}
+
+/******/
+/* RX */
+/******/
+
+/*
+ * Calculate the RX filter to be set in the HW.
+ */
+u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
+{
+#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
+
+ struct ath_hw *ah = priv->ah;
+ u32 rfilt;
+
+ rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
+ | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
+ | ATH9K_RX_FILTER_MCAST;
+
+ /* If not a STA, enable processing of Probe Requests */
+ if (ah->opmode != NL80211_IFTYPE_STATION)
+ rfilt |= ATH9K_RX_FILTER_PROBEREQ;
+
+ /*
+ * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
+ * mode interface or when in monitor mode. AP mode does not need this
+ * since it receives all in-BSS frames anyway.
+ */
+ if (((ah->opmode != NL80211_IFTYPE_AP) &&
+ (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
+ (ah->opmode == NL80211_IFTYPE_MONITOR))
+ rfilt |= ATH9K_RX_FILTER_PROM;
+
+ if (priv->rxfilter & FIF_CONTROL)
+ rfilt |= ATH9K_RX_FILTER_CONTROL;
+
+ if ((ah->opmode == NL80211_IFTYPE_STATION) &&
+ !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
+ rfilt |= ATH9K_RX_FILTER_MYBEACON;
+ else
+ rfilt |= ATH9K_RX_FILTER_BEACON;
+
+ if (conf_is_ht(&priv->hw->conf))
+ rfilt |= ATH9K_RX_FILTER_COMP_BAR;
+
+ return rfilt;
+
+#undef RX_FILTER_PRESERVE
+}
+
+/*
+ * Recv initialization for opmode change.
+ */
+static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ u32 rfilt, mfilt[2];
+
+ /* configure rx filter */
+ rfilt = ath9k_htc_calcrxfilter(priv);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ /* configure bssid mask */
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ ath_hw_setbssidmask(common);
+
+ /* configure operational mode */
+ ath9k_hw_setopmode(ah);
+
+ /* Handle any link-level address change. */
+ ath9k_hw_setmac(ah, common->macaddr);
+
+ /* calculate and install multicast filter */
+ mfilt[0] = mfilt[1] = ~0;
+ ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
+}
+
+void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
+{
+ ath9k_hw_rxena(priv->ah);
+ ath9k_htc_opmode_init(priv);
+ ath9k_hw_startpcureceive(priv->ah);
+ priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
+}
+
+static void ath9k_process_rate(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *rxs,
+ u8 rx_rate, u8 rs_flags)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ unsigned int i = 0;
+
+ if (rx_rate & 0x80) {
+ /* HT rate */
+ rxs->flag |= RX_FLAG_HT;
+ if (rs_flags & ATH9K_RX_2040)
+ rxs->flag |= RX_FLAG_40MHZ;
+ if (rs_flags & ATH9K_RX_GI)
+ rxs->flag |= RX_FLAG_SHORT_GI;
+ rxs->rate_idx = rx_rate & 0x7f;
+ return;
+ }
+
+ band = hw->conf.channel->band;
+ sband = hw->wiphy->bands[band];
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].hw_value == rx_rate) {
+ rxs->rate_idx = i;
+ return;
+ }
+ if (sband->bitrates[i].hw_value_short == rx_rate) {
+ rxs->rate_idx = i;
+ rxs->flag |= RX_FLAG_SHORTPRE;
+ return;
+ }
+ }
+
+}
+
+static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_rxbuf *rxbuf,
+ struct ieee80211_rx_status *rx_status)
+
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_hw *hw = priv->hw;
+ struct sk_buff *skb = rxbuf->skb;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_htc_rx_status *rxstatus;
+ int hdrlen, padpos, padsize;
+ int last_rssi = ATH_RSSI_DUMMY_MARKER;
+ __le16 fc;
+
+ if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Corrupted RX frame, dropping\n");
+ goto rx_next;
+ }
+
+ rxstatus = (struct ath_htc_rx_status *)skb->data;
+
+ if (be16_to_cpu(rxstatus->rs_datalen) -
+ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Corrupted RX data len, dropping "
+ "(dlen: %d, skblen: %d)\n",
+ rxstatus->rs_datalen, skb->len);
+ goto rx_next;
+ }
+
+ /* Get the RX status information */
+ memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
+ skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+
+ padpos = ath9k_cmn_padpos(fc);
+
+ padsize = padpos & 3;
+ if (padsize && skb->len >= padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ if (rxbuf->rxstatus.rs_status != 0) {
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
+ goto rx_next;
+
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
+ /* FIXME */
+ } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
+ if (ieee80211_is_ctl(fc))
+ /*
+ * Sometimes, we get invalid
+ * MIC failures on valid control frames.
+ * Remove these mic errors.
+ */
+ rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
+ else
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+ }
+
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (rxbuf->rxstatus.rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_CRC))
+ goto rx_next;
+ } else {
+ if (rxbuf->rxstatus.rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
+ goto rx_next;
+ }
+ }
+ }
+
+ if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
+ u8 keyix;
+ keyix = rxbuf->rxstatus.rs_keyix;
+ if (keyix != ATH9K_RXKEYIX_INVALID) {
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc) &&
+ skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+ if (test_bit(keyix, common->keymap))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ }
+ }
+
+ ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
+ rxbuf->rxstatus.rs_flags);
+
+ if (priv->op_flags & OP_ASSOCIATED) {
+ if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
+ !rxbuf->rxstatus.rs_moreaggr)
+ ATH_RSSI_LPF(priv->rx.last_rssi,
+ rxbuf->rxstatus.rs_rssi);
+
+ last_rssi = priv->rx.last_rssi;
+
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
+ ATH_RSSI_EP_MULTIPLIER);
+
+ if (rxbuf->rxstatus.rs_rssi < 0)
+ rxbuf->rxstatus.rs_rssi = 0;
+
+ if (ieee80211_is_beacon(fc))
+ priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
+ }
+
+ rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
+ rx_status->band = hw->conf.channel->band;
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
+ rx_status->antenna = rxbuf->rxstatus.rs_antenna;
+ rx_status->flag |= RX_FLAG_TSFT;
+
+ return true;
+
+rx_next:
+ return false;
+}
+
+/*
+ * FIXME: Handle FLUSH later on.
+ */
+void ath9k_rx_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+ struct ieee80211_rx_status rx_status;
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct ieee80211_hdr *hdr;
+
+ do {
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
+ list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
+ if (tmp_buf->in_process) {
+ rxbuf = tmp_buf;
+ break;
+ }
+ }
+
+ if (rxbuf == NULL) {
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+ break;
+ }
+
+ if (!rxbuf->skb)
+ goto requeue;
+
+ if (!ath9k_rx_prepare(priv, rxbuf, &rx_status)) {
+ dev_kfree_skb_any(rxbuf->skb);
+ goto requeue;
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
+ sizeof(struct ieee80211_rx_status));
+ skb = rxbuf->skb;
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (ieee80211_is_beacon(hdr->frame_control) && priv->ps_enabled)
+ ieee80211_queue_work(priv->hw, &priv->ps_work);
+
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+
+ ieee80211_rx(priv->hw, skb);
+
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
+requeue:
+ rxbuf->in_process = false;
+ rxbuf->skb = NULL;
+ list_move_tail(&rxbuf->list, &priv->rx.rxbuf);
+ rxbuf = NULL;
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+ } while (1);
+
+}
+
+void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)drv_priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+
+ spin_lock(&priv->rx.rxbuflock);
+ list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
+ if (!tmp_buf->in_process) {
+ rxbuf = tmp_buf;
+ break;
+ }
+ }
+ spin_unlock(&priv->rx.rxbuflock);
+
+ if (rxbuf == NULL) {
+ ath_print(common, ATH_DBG_ANY,
+ "No free RX buffer\n");
+ goto err;
+ }
+
+ spin_lock(&priv->rx.rxbuflock);
+ rxbuf->skb = skb;
+ rxbuf->in_process = true;
+ spin_unlock(&priv->rx.rxbuflock);
+
+ tasklet_schedule(&priv->rx_tasklet);
+ return;
+err:
+ dev_kfree_skb_any(skb);
+}
+
+/* FIXME: Locking for cleanup/init */
+
+void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_htc_rxbuf *rxbuf, *tbuf;
+
+ list_for_each_entry_safe(rxbuf, tbuf, &priv->rx.rxbuf, list) {
+ list_del(&rxbuf->list);
+ if (rxbuf->skb)
+ dev_kfree_skb_any(rxbuf->skb);
+ kfree(rxbuf);
+ }
+}
+
+int ath9k_rx_init(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_rxbuf *rxbuf;
+ int i = 0;
+
+ INIT_LIST_HEAD(&priv->rx.rxbuf);
+ spin_lock_init(&priv->rx.rxbuflock);
+
+ for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
+ rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
+ if (rxbuf == NULL) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to allocate RX buffers\n");
+ goto err;
+ }
+ list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
+ }
+
+ return 0;
+
+err:
+ ath9k_rx_cleanup(priv);
+ return -ENOMEM;
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
new file mode 100644
index 0000000..064397f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
+ u16 len, u8 flags, u8 epid,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ struct htc_frame_hdr *hdr;
+ struct htc_endpoint *endpoint = &target->endpoint[epid];
+ int status;
+
+ hdr = (struct htc_frame_hdr *)
+ skb_push(skb, sizeof(struct htc_frame_hdr));
+ hdr->endpoint_id = epid;
+ hdr->flags = flags;
+ hdr->payload_len = cpu_to_be16(len);
+
+ status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb,
+ tx_ctl);
+ return status;
+}
+
+static struct htc_endpoint *get_next_avail_ep(struct htc_endpoint *endpoint)
+{
+ enum htc_endpoint_id avail_epid;
+
+ for (avail_epid = (ENDPOINT_MAX - 1); avail_epid > ENDPOINT0; avail_epid--)
+ if (endpoint[avail_epid].service_id == 0)
+ return &endpoint[avail_epid];
+ return NULL;
+}
+
+static u8 service_to_ulpipe(u16 service_id)
+{
+ switch (service_id) {
+ case WMI_CONTROL_SVC:
+ return 4;
+ case WMI_BEACON_SVC:
+ case WMI_CAB_SVC:
+ case WMI_UAPSD_SVC:
+ case WMI_MGMT_SVC:
+ case WMI_DATA_VO_SVC:
+ case WMI_DATA_VI_SVC:
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static u8 service_to_dlpipe(u16 service_id)
+{
+ switch (service_id) {
+ case WMI_CONTROL_SVC:
+ return 3;
+ case WMI_BEACON_SVC:
+ case WMI_CAB_SVC:
+ case WMI_UAPSD_SVC:
+ case WMI_MGMT_SVC:
+ case WMI_DATA_VO_SVC:
+ case WMI_DATA_VI_SVC:
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ return 2;
+ default:
+ return 0;
+ }
+}
+
+static void htc_process_target_rdy(struct htc_target *target,
+ void *buf)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
+
+ target->credits = be16_to_cpu(htc_ready_msg->credits);
+ target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
+
+ endpoint = &target->endpoint[ENDPOINT0];
+ endpoint->service_id = HTC_CTRL_RSVD_SVC;
+ endpoint->max_msglen = HTC_MAX_CONTROL_MESSAGE_LENGTH;
+ atomic_inc(&target->tgt_ready);
+ complete(&target->target_wait);
+}
+
+static void htc_process_conn_rsp(struct htc_target *target,
+ struct htc_frame_hdr *htc_hdr)
+{
+ struct htc_conn_svc_rspmsg *svc_rspmsg;
+ struct htc_endpoint *endpoint, *tmp_endpoint = NULL;
+ u16 service_id;
+ u16 max_msglen;
+ enum htc_endpoint_id epid, tepid;
+
+ svc_rspmsg = (struct htc_conn_svc_rspmsg *)
+ ((void *) htc_hdr + sizeof(struct htc_frame_hdr));
+
+ if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
+ epid = svc_rspmsg->endpoint_id;
+ service_id = be16_to_cpu(svc_rspmsg->service_id);
+ max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
+ endpoint = &target->endpoint[epid];
+
+ for (tepid = (ENDPOINT_MAX - 1); tepid > ENDPOINT0; tepid--) {
+ tmp_endpoint = &target->endpoint[tepid];
+ if (tmp_endpoint->service_id == service_id) {
+ tmp_endpoint->service_id = 0;
+ break;
+ }
+ }
+
+ if (tepid == ENDPOINT0)
+ return;
+
+ endpoint->service_id = service_id;
+ endpoint->max_txqdepth = tmp_endpoint->max_txqdepth;
+ endpoint->ep_callbacks = tmp_endpoint->ep_callbacks;
+ endpoint->ul_pipeid = tmp_endpoint->ul_pipeid;
+ endpoint->dl_pipeid = tmp_endpoint->dl_pipeid;
+ endpoint->max_msglen = max_msglen;
+ target->conn_rsp_epid = epid;
+ complete(&target->cmd_wait);
+ } else {
+ target->conn_rsp_epid = ENDPOINT_UNUSED;
+ }
+}
+
+static int htc_config_pipe_credits(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_config_pipe_msg *cp_msg;
+ int ret, time_left;
+
+ skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "failed to allocate send buffer\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ cp_msg = (struct htc_config_pipe_msg *)
+ skb_put(skb, sizeof(struct htc_config_pipe_msg));
+
+ cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
+ cp_msg->pipe_id = USB_WLAN_TX_PIPE;
+ cp_msg->credits = 28;
+
+ target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC credit config timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int htc_setup_complete(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_comp_msg *comp_msg;
+ int ret = 0, time_left;
+
+ skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "failed to allocate send buffer\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ comp_msg = (struct htc_comp_msg *)
+ skb_put(skb, sizeof(struct htc_comp_msg));
+ comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID);
+
+ target->htc_flags |= HTC_OP_START_WAIT;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC start timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+/* HTC APIs */
+
+int htc_init(struct htc_target *target)
+{
+ int ret;
+
+ ret = htc_config_pipe_credits(target);
+ if (ret)
+ return ret;
+
+ return htc_setup_complete(target);
+}
+
+int htc_connect_service(struct htc_target *target,
+ struct htc_service_connreq *service_connreq,
+ enum htc_endpoint_id *conn_rsp_epid)
+{
+ struct sk_buff *skb;
+ struct htc_endpoint *endpoint;
+ struct htc_conn_svc_msg *conn_msg;
+ int ret, time_left;
+
+ /* Find an available endpoint */
+ endpoint = get_next_avail_ep(target->endpoint);
+ if (!endpoint) {
+ dev_err(target->dev, "Endpoint is not available for"
+ "service %d\n", service_connreq->service_id);
+ return -EINVAL;
+ }
+
+ endpoint->service_id = service_connreq->service_id;
+ endpoint->max_txqdepth = service_connreq->max_send_qdepth;
+ endpoint->ul_pipeid = service_to_ulpipe(service_connreq->service_id);
+ endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id);
+ endpoint->ep_callbacks = service_connreq->ep_callbacks;
+
+ skb = alloc_skb(sizeof(struct htc_conn_svc_msg) +
+ sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "Failed to allocate buf to send"
+ "service connect req\n");
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ conn_msg = (struct htc_conn_svc_msg *)
+ skb_put(skb, sizeof(struct htc_conn_svc_msg));
+ conn_msg->service_id = cpu_to_be16(service_connreq->service_id);
+ conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID);
+ conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags);
+ conn_msg->dl_pipeid = endpoint->dl_pipeid;
+ conn_msg->ul_pipeid = endpoint->ul_pipeid;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "Service connection timeout for: %d\n",
+ service_connreq->service_id);
+ return -ETIMEDOUT;
+ }
+
+ *conn_rsp_epid = target->conn_rsp_epid;
+ return 0;
+err:
+ kfree_skb(skb);
+ return ret;
+}
+
+int htc_send(struct htc_target *target, struct sk_buff *skb,
+ enum htc_endpoint_id epid, struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ return htc_issue_send(target, skb, skb->len, 0, epid, tx_ctl);
+}
+
+void htc_stop(struct htc_target *target)
+{
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+
+ for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) {
+ endpoint = &target->endpoint[epid];
+ if (endpoint->service_id != 0)
+ target->hif->stop(target->hif_dev, endpoint->ul_pipeid);
+ }
+}
+
+void htc_start(struct htc_target *target)
+{
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+
+ for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) {
+ endpoint = &target->endpoint[epid];
+ if (endpoint->service_id != 0)
+ target->hif->start(target->hif_dev,
+ endpoint->ul_pipeid);
+ }
+}
+
+void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ struct sk_buff *skb, bool txok)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_frame_hdr *htc_hdr = NULL;
+
+ if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) {
+ complete(&htc_handle->cmd_wait);
+ htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS;
+ goto ret;
+ }
+
+ if (htc_handle->htc_flags & HTC_OP_START_WAIT) {
+ complete(&htc_handle->cmd_wait);
+ htc_handle->htc_flags &= ~HTC_OP_START_WAIT;
+ goto ret;
+ }
+
+ if (skb) {
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
+ endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+ if (endpoint->ep_callbacks.tx) {
+ endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
+ skb, htc_hdr->endpoint_id,
+ txok);
+ }
+ }
+
+ return;
+ret:
+ /* HTC-generated packets are freed here. */
+ if (htc_hdr && htc_hdr->endpoint_id != ENDPOINT0)
+ dev_kfree_skb_any(skb);
+ else
+ kfree_skb(skb);
+}
+
+/*
+ * HTC Messages are handled directly here and the obtained SKB
+ * is freed.
+ *
+ * Sevice messages (Data, WMI) passed to the corresponding
+ * endpoint RX handlers, which have to free the SKB.
+ */
+void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+ struct sk_buff *skb, u32 len, u8 pipe_id)
+{
+ struct htc_frame_hdr *htc_hdr;
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+ __be16 *msg_id;
+
+ if (!htc_handle || !skb)
+ return;
+
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
+ epid = htc_hdr->endpoint_id;
+
+ if (epid >= ENDPOINT_MAX) {
+ if (pipe_id != USB_REG_IN_PIPE)
+ dev_kfree_skb_any(skb);
+ else
+ kfree_skb(skb);
+ return;
+ }
+
+ if (epid == ENDPOINT0) {
+
+ /* Handle trailer */
+ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
+ if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
+ /* Move past the Watchdog pattern */
+ htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
+ }
+
+ /* Get the message ID */
+ msg_id = (__be16 *) ((void *) htc_hdr +
+ sizeof(struct htc_frame_hdr));
+
+ /* Now process HTC messages */
+ switch (be16_to_cpu(*msg_id)) {
+ case HTC_MSG_READY_ID:
+ htc_process_target_rdy(htc_handle, htc_hdr);
+ break;
+ case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
+ htc_process_conn_rsp(htc_handle, htc_hdr);
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+
+ } else {
+ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER)
+ skb_trim(skb, len - htc_hdr->control[0]);
+
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+ endpoint = &htc_handle->endpoint[epid];
+ if (endpoint->ep_callbacks.rx)
+ endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
+ skb, epid);
+ }
+}
+
+struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
+ struct ath9k_htc_hif *hif,
+ struct device *dev)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_target *target;
+
+ target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
+ if (!target) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ "target device\n");
+ return NULL;
+ }
+
+ init_completion(&target->target_wait);
+ init_completion(&target->cmd_wait);
+
+ target->hif = hif;
+ target->hif_dev = hif_handle;
+ target->dev = dev;
+
+ /* Assign control endpoint pipe IDs */
+ endpoint = &target->endpoint[ENDPOINT0];
+ endpoint->ul_pipeid = hif->control_ul_pipe;
+ endpoint->dl_pipeid = hif->control_dl_pipe;
+
+ atomic_set(&target->tgt_ready, 0);
+
+ return target;
+}
+
+void ath9k_htc_hw_free(struct htc_target *htc)
+{
+ kfree(htc);
+}
+
+int ath9k_htc_hw_init(struct htc_target *target,
+ struct device *dev, u16 devid)
+{
+ if (ath9k_htc_probe_device(target, dev, devid)) {
+ printk(KERN_ERR "Failed to initialize the device\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug)
+{
+ if (target)
+ ath9k_htc_disconnect_device(target, hot_unplug);
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
new file mode 100644
index 0000000..faba679
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_HST_H
+#define HTC_HST_H
+
+struct ath9k_htc_priv;
+struct htc_target;
+struct ath9k_htc_tx_ctl;
+
+enum ath9k_hif_transports {
+ ATH9K_HIF_USB,
+};
+
+struct ath9k_htc_hif {
+ struct list_head list;
+ const enum ath9k_hif_transports transport;
+ const char *name;
+
+ u8 control_dl_pipe;
+ u8 control_ul_pipe;
+
+ void (*start) (void *hif_handle, u8 pipe);
+ void (*stop) (void *hif_handle, u8 pipe);
+ int (*send) (void *hif_handle, u8 pipe, struct sk_buff *buf,
+ struct ath9k_htc_tx_ctl *tx_ctl);
+};
+
+enum htc_endpoint_id {
+ ENDPOINT_UNUSED = -1,
+ ENDPOINT0 = 0,
+ ENDPOINT1 = 1,
+ ENDPOINT2 = 2,
+ ENDPOINT3 = 3,
+ ENDPOINT4 = 4,
+ ENDPOINT5 = 5,
+ ENDPOINT6 = 6,
+ ENDPOINT7 = 7,
+ ENDPOINT8 = 8,
+ ENDPOINT_MAX = 22
+};
+
+/* Htc frame hdr flags */
+#define HTC_FLAGS_RECV_TRAILER (1 << 1)
+
+struct htc_frame_hdr {
+ u8 endpoint_id;
+ u8 flags;
+ __be16 payload_len;
+ u8 control[4];
+} __packed;
+
+struct htc_ready_msg {
+ __be16 message_id;
+ __be16 credits;
+ __be16 credit_size;
+ u8 max_endpoints;
+ u8 pad;
+} __packed;
+
+struct htc_config_pipe_msg {
+ __be16 message_id;
+ u8 pipe_id;
+ u8 credits;
+} __packed;
+
+struct htc_packet {
+ void *pktcontext;
+ u8 *buf;
+ u8 *buf_payload;
+ u32 buflen;
+ u32 payload_len;
+
+ int endpoint;
+ int status;
+
+ void *context;
+ u32 reserved;
+};
+
+struct htc_ep_callbacks {
+ void *priv;
+ void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
+ void (*rx) (void *, struct sk_buff *, enum htc_endpoint_id);
+};
+
+#define HTC_TX_QUEUE_SIZE 256
+
+struct htc_txq {
+ struct sk_buff *buf[HTC_TX_QUEUE_SIZE];
+ u32 txqdepth;
+ u16 txbuf_cnt;
+ u16 txq_head;
+ u16 txq_tail;
+};
+
+struct htc_endpoint {
+ u16 service_id;
+
+ struct htc_ep_callbacks ep_callbacks;
+ struct htc_txq htc_txq;
+ u32 max_txqdepth;
+ int max_msglen;
+
+ u8 ul_pipeid;
+ u8 dl_pipeid;
+};
+
+#define HTC_MAX_CONTROL_MESSAGE_LENGTH 255
+#define HTC_CONTROL_BUFFER_SIZE \
+ (HTC_MAX_CONTROL_MESSAGE_LENGTH + sizeof(struct htc_frame_hdr))
+
+struct htc_control_buf {
+ struct htc_packet htc_pkt;
+ u8 buf[HTC_CONTROL_BUFFER_SIZE];
+};
+
+#define HTC_OP_START_WAIT BIT(0)
+#define HTC_OP_CONFIG_PIPE_CREDITS BIT(1)
+
+struct htc_target {
+ void *hif_dev;
+ struct ath9k_htc_priv *drv_priv;
+ struct device *dev;
+ struct ath9k_htc_hif *hif;
+ struct htc_endpoint endpoint[ENDPOINT_MAX];
+ struct completion target_wait;
+ struct completion cmd_wait;
+ struct list_head list;
+ enum htc_endpoint_id conn_rsp_epid;
+ u16 credits;
+ u16 credit_size;
+ u8 htc_flags;
+ atomic_t tgt_ready;
+};
+
+enum htc_msg_id {
+ HTC_MSG_READY_ID = 1,
+ HTC_MSG_CONNECT_SERVICE_ID,
+ HTC_MSG_CONNECT_SERVICE_RESPONSE_ID,
+ HTC_MSG_SETUP_COMPLETE_ID,
+ HTC_MSG_CONFIG_PIPE_ID,
+ HTC_MSG_CONFIG_PIPE_RESPONSE_ID,
+};
+
+struct htc_service_connreq {
+ u16 service_id;
+ u16 con_flags;
+ u32 max_send_qdepth;
+ struct htc_ep_callbacks ep_callbacks;
+};
+
+/* Current service IDs */
+
+enum htc_service_group_ids{
+ RSVD_SERVICE_GROUP = 0,
+ WMI_SERVICE_GROUP = 1,
+
+ HTC_SERVICE_GROUP_LAST = 255
+};
+
+#define MAKE_SERVICE_ID(group, index) \
+ (int)(((int)group << 8) | (int)(index))
+
+/* NOTE: service ID of 0x0000 is reserved and should never be used */
+#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
+#define HTC_LOOPBACK_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 2)
+
+#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
+#define WMI_BEACON_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
+#define WMI_CAB_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
+#define WMI_UAPSD_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
+#define WMI_MGMT_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
+#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 5)
+#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 6)
+#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 7)
+#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 8)
+
+struct htc_conn_svc_msg {
+ __be16 msg_id;
+ __be16 service_id;
+ __be16 con_flags;
+ u8 dl_pipeid;
+ u8 ul_pipeid;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+/* connect response status codes */
+#define HTC_SERVICE_SUCCESS 0
+#define HTC_SERVICE_NOT_FOUND 1
+#define HTC_SERVICE_FAILED 2
+#define HTC_SERVICE_NO_RESOURCES 3
+#define HTC_SERVICE_NO_MORE_EP 4
+
+struct htc_conn_svc_rspmsg {
+ __be16 msg_id;
+ __be16 service_id;
+ u8 status;
+ u8 endpoint_id;
+ __be16 max_msg_len;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+struct htc_comp_msg {
+ __be16 msg_id;
+} __packed;
+
+int htc_init(struct htc_target *target);
+int htc_connect_service(struct htc_target *target,
+ struct htc_service_connreq *service_connreq,
+ enum htc_endpoint_id *conn_rsp_eid);
+int htc_send(struct htc_target *target, struct sk_buff *skb,
+ enum htc_endpoint_id eid, struct ath9k_htc_tx_ctl *tx_ctl);
+void htc_stop(struct htc_target *target);
+void htc_start(struct htc_target *target);
+
+void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+ struct sk_buff *skb, u32 len, u8 pipe_id);
+void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ struct sk_buff *skb, bool txok);
+
+struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
+ struct ath9k_htc_hif *hif,
+ struct device *dev);
+void ath9k_htc_hw_free(struct htc_target *htc);
+int ath9k_htc_hw_init(struct htc_target *target,
+ struct device *dev, u16 devid);
+void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
+
+#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
new file mode 100644
index 0000000..624422a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_HW_OPS_H
+#define ATH9K_HW_OPS_H
+
+#include "hw.h"
+
+/* Hardware core and driver accessible callbacks */
+
+static inline void ath9k_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ ath9k_hw_ops(ah)->config_pci_powersave(ah, restore, power_off);
+}
+
+static inline void ath9k_hw_rxena(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->rx_enable(ah);
+}
+
+static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds,
+ u32 link)
+{
+ ath9k_hw_ops(ah)->set_desc_link(ds, link);
+}
+
+static inline void ath9k_hw_get_desc_link(struct ath_hw *ah, void *ds,
+ u32 **link)
+{
+ ath9k_hw_ops(ah)->get_desc_link(ds, link);
+}
+static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
+}
+
+static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ return ath9k_hw_ops(ah)->get_isr(ah, masked);
+}
+
+static inline void ath9k_hw_filltxdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ ath9k_hw_ops(ah)->fill_txdesc(ah, ds, seglen, is_firstseg, is_lastseg,
+ ds0, buf_addr, qcu);
+}
+
+static inline int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ return ath9k_hw_ops(ah)->proc_txdesc(ah, ds, ts);
+}
+
+static inline void ath9k_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType,
+ u32 flags)
+{
+ ath9k_hw_ops(ah)->set11n_txdesc(ah, ds, pktLen, type, txPower, keyIx,
+ keyType, flags);
+}
+
+static inline void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ ath9k_hw_ops(ah)->set11n_ratescenario(ah, ds, lastds, durUpdateEn,
+ rtsctsRate, rtsctsDuration, series,
+ nseries, flags);
+}
+
+static inline void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_first(ah, ds, aggrLen);
+}
+
+static inline void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_middle(ah, ds, numDelims);
+}
+
+static inline void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_last(ah, ds);
+}
+
+static inline void ath9k_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ ath9k_hw_ops(ah)->clr11n_aggr(ah, ds);
+}
+
+static inline void ath9k_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ ath9k_hw_ops(ah)->set11n_burstduration(ah, ds, burstDuration);
+}
+
+static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
+}
+
+/* Private hardware call ops */
+
+/* PHY ops */
+
+static inline int ath9k_hw_rf_set_freq(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->rf_set_freq(ah, chan);
+}
+
+static inline void ath9k_hw_spur_mitigate_freq(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_private_ops(ah)->spur_mitigate_freq(ah, chan);
+}
+
+static inline int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->rf_alloc_ext_banks)
+ return 0;
+
+ return ath9k_hw_private_ops(ah)->rf_alloc_ext_banks(ah);
+}
+
+static inline void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->rf_free_ext_banks)
+ return;
+
+ ath9k_hw_private_ops(ah)->rf_free_ext_banks(ah);
+}
+
+static inline bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ if (!ath9k_hw_private_ops(ah)->set_rf_regs)
+ return true;
+
+ return ath9k_hw_private_ops(ah)->set_rf_regs(ah, chan, modesIndex);
+}
+
+static inline void ath9k_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->init_bb(ah, chan);
+}
+
+static inline void ath9k_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_channel_regs(ah, chan);
+}
+
+static inline int ath9k_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->process_ini(ah, chan);
+}
+
+static inline void ath9k_olc_init(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->olc_init)
+ return;
+
+ return ath9k_hw_private_ops(ah)->olc_init(ah);
+}
+
+static inline void ath9k_hw_set_rfmode(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_rfmode(ah, chan);
+}
+
+static inline void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->mark_phy_inactive(ah);
+}
+
+static inline void ath9k_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_delta_slope(ah, chan);
+}
+
+static inline bool ath9k_hw_rfbus_req(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->rfbus_req(ah);
+}
+
+static inline void ath9k_hw_rfbus_done(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->rfbus_done(ah);
+}
+
+static inline void ath9k_enable_rfkill(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->enable_rfkill(ah);
+}
+
+static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->restore_chainmask)
+ return;
+
+ return ath9k_hw_private_ops(ah)->restore_chainmask(ah);
+}
+
+static inline void ath9k_hw_set_diversity(struct ath_hw *ah, bool value)
+{
+ return ath9k_hw_private_ops(ah)->set_diversity(ah, value);
+}
+
+static inline bool ath9k_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ return ath9k_hw_private_ops(ah)->ani_control(ah, cmd, param);
+}
+
+static inline void ath9k_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ ath9k_hw_private_ops(ah)->do_getnf(ah, nfarray);
+}
+
+static inline void ath9k_hw_loadnf(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_private_ops(ah)->loadnf(ah, chan);
+}
+
+static inline bool ath9k_hw_init_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->init_cal(ah, chan);
+}
+
+static inline void ath9k_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ ath9k_hw_private_ops(ah)->setup_calibration(ah, currCal);
+}
+
+static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
+}
+
+#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 78b5711..c33f17d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,18 +19,16 @@
#include <asm/unaligned.h>
#include "hw.h"
+#include "hw-ops.h"
#include "rc.h"
-#include "initvals.h"
+#include "ar9003_mac.h"
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
+#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
-static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
-static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value);
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
@@ -49,6 +47,39 @@ static void __exit ath9k_exit(void)
}
module_exit(ath9k_exit);
+/* Private hardware callbacks */
+
+static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_cal_settings(ah);
+}
+
+static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_mode_regs(ah);
+}
+
+static bool ath9k_hw_macversion_supported(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ return priv_ops->macversion_supported(ah->hw_version.macVersion);
+}
+
+static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
+}
+
+static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
+}
+
/********************/
/* Helper Functions */
/********************/
@@ -61,7 +92,11 @@ static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
return usecs *ATH9K_CLOCK_RATE_CCK;
if (conf->channel->band == IEEE80211_BAND_2GHZ)
return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM;
- return usecs *ATH9K_CLOCK_RATE_5GHZ_OFDM;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
+ return usecs * ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
+ else
+ return usecs * ATH9K_CLOCK_RATE_5GHZ_OFDM;
}
static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
@@ -236,21 +271,6 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
}
}
-static int ath9k_hw_get_radiorev(struct ath_hw *ah)
-{
- u32 val;
- int i;
-
- REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
-
- for (i = 0; i < 8; i++)
- REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
- val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
- val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
-
- return ath9k_hw_reverse_bits(val, 8);
-}
-
/************************************/
/* HW Attach, Detach, Init Routines */
/************************************/
@@ -260,6 +280,8 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
if (AR_SREV_9100(ah))
return;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
@@ -271,20 +293,30 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
+/* This should work for all families including legacy */
static bool ath9k_hw_chip_test(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
+ u32 regAddr[2] = { AR_STA_ID0 };
u32 regHold[2];
u32 patternData[4] = { 0x55555555,
0xaaaaaaaa,
0x66666666,
0x99999999 };
- int i, j;
+ int i, j, loop_max;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ loop_max = 2;
+ regAddr[1] = AR_PHY_BASE + (8 << 2);
+ } else
+ loop_max = 1;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < loop_max; i++) {
u32 addr = regAddr[i];
u32 wrData, rdData;
@@ -339,7 +371,13 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
ah->config.cck_trig_low = 100;
- ah->config.enable_ani = 1;
+
+ /*
+ * For now ANI is disabled for AR9003, it is still
+ * being tested.
+ */
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ah->config.enable_ani = 1;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -354,6 +392,12 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.rx_intr_mitigation = true;
/*
+ * Tx IQ Calibration (ah->config.tx_iq_calibration) is only
+ * used by AR9003, but it is showing reliability issues.
+ * It will take a while to fix so this is currently disabled.
+ */
+
+ /*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
* _and_ if on non-uniprocessor systems (Multiprocessor/HT).
* This means we use it for all AR5416 devices, and the few
@@ -372,7 +416,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
if (num_possible_cpus() > 1)
ah->config.serialize_regmode = SER_REG_MODE_AUTO;
}
-EXPORT_SYMBOL(ath9k_hw_init);
static void ath9k_hw_init_defaults(struct ath_hw *ah)
{
@@ -386,8 +429,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->hw_version.subvendorid = 0;
ah->ah_flags = 0;
- if (ah->hw_version.devid == AR5416_AR9100_DEVID)
- ah->hw_version.macVersion = AR_SREV_VERSION_9100;
if (!AR_SREV_9100(ah))
ah->ah_flags = AH_USE_EEPROM;
@@ -400,44 +441,17 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->power_mode = ATH9K_PM_UNDEFINED;
}
-static int ath9k_hw_rf_claim(struct ath_hw *ah)
-{
- u32 val;
-
- REG_WRITE(ah, AR_PHY(0), 0x00000007);
-
- val = ath9k_hw_get_radiorev(ah);
- switch (val & AR_RADIO_SREV_MAJOR) {
- case 0:
- val = AR_RAD5133_SREV_MAJOR;
- break;
- case AR_RAD5133_SREV_MAJOR:
- case AR_RAD5122_SREV_MAJOR:
- case AR_RAD2133_SREV_MAJOR:
- case AR_RAD2122_SREV_MAJOR:
- break;
- default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Radio Chip Rev 0x%02X not supported\n",
- val & AR_RADIO_SREV_MAJOR);
- return -EOPNOTSUPP;
- }
-
- ah->hw_version.analog5GhzRev = val;
-
- return 0;
-}
-
static int ath9k_hw_init_macaddr(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sum;
int i;
u16 eeval;
+ u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
sum = 0;
for (i = 0; i < 3; i++) {
- eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i));
+ eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]);
sum += eeval;
common->macaddr[2 * i] = eeval >> 8;
common->macaddr[2 * i + 1] = eeval & 0xff;
@@ -448,64 +462,20 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
return 0;
}
-static void ath9k_hw_init_rxgain_ini(struct ath_hw *ah)
-{
- u32 rxgain_type;
-
- if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) {
- rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
-
- if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_13db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
- else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_23db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
- else
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
- }
-}
-
-static void ath9k_hw_init_txgain_ini(struct ath_hw *ah)
-{
- u32 txgain_type;
-
- if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) {
- txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
-
- if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_high_power_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
- else
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
- }
-}
-
static int ath9k_hw_post_init(struct ath_hw *ah)
{
int ecode;
- if (!ath9k_hw_chip_test(ah))
- return -ENODEV;
+ if (!AR_SREV_9271(ah)) {
+ if (!ath9k_hw_chip_test(ah))
+ return -ENODEV;
+ }
- ecode = ath9k_hw_rf_claim(ah);
- if (ecode != 0)
- return ecode;
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ ecode = ar9002_hw_rf_claim(ah);
+ if (ecode != 0)
+ return ecode;
+ }
ecode = ath9k_hw_eeprom_init(ah);
if (ecode != 0)
@@ -516,14 +486,12 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
- if (!AR_SREV_9280_10_OR_LATER(ah)) {
- ecode = ath9k_hw_rf_alloc_ext_banks(ah);
- if (ecode) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Failed allocating banks for "
- "external radio\n");
- return ecode;
- }
+ ecode = ath9k_hw_rf_alloc_ext_banks(ah);
+ if (ecode) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Failed allocating banks for "
+ "external radio\n");
+ return ecode;
}
if (!AR_SREV_9100(ah)) {
@@ -534,321 +502,22 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
return 0;
}
-static bool ath9k_hw_devid_supported(u16 devid)
+static void ath9k_hw_attach_ops(struct ath_hw *ah)
{
- switch (devid) {
- case AR5416_DEVID_PCI:
- case AR5416_DEVID_PCIE:
- case AR5416_AR9100_DEVID:
- case AR9160_DEVID_PCI:
- case AR9280_DEVID_PCI:
- case AR9280_DEVID_PCIE:
- case AR9285_DEVID_PCIE:
- case AR5416_DEVID_AR9287_PCI:
- case AR5416_DEVID_AR9287_PCIE:
- case AR9271_USB:
- case AR2427_DEVID_PCIE:
- return true;
- default:
- break;
- }
- return false;
-}
-
-static bool ath9k_hw_macversion_supported(u32 macversion)
-{
- switch (macversion) {
- case AR_SREV_VERSION_5416_PCI:
- case AR_SREV_VERSION_5416_PCIE:
- case AR_SREV_VERSION_9160:
- case AR_SREV_VERSION_9100:
- case AR_SREV_VERSION_9280:
- case AR_SREV_VERSION_9285:
- case AR_SREV_VERSION_9287:
- case AR_SREV_VERSION_9271:
- return true;
- default:
- break;
- }
- return false;
-}
-
-static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
-{
- if (AR_SREV_9160_10_OR_LATER(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- ah->iq_caldata.calData = &iq_cal_single_sample;
- ah->adcgain_caldata.calData =
- &adc_gain_cal_single_sample;
- ah->adcdc_caldata.calData =
- &adc_dc_cal_single_sample;
- ah->adcdc_calinitdata.calData =
- &adc_init_dc_cal;
- } else {
- ah->iq_caldata.calData = &iq_cal_multi_sample;
- ah->adcgain_caldata.calData =
- &adc_gain_cal_multi_sample;
- ah->adcdc_caldata.calData =
- &adc_dc_cal_multi_sample;
- ah->adcdc_calinitdata.calData =
- &adc_init_dc_cal;
- }
- ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
- }
-}
-
-static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
-{
- if (AR_SREV_9271(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
- ARRAY_SIZE(ar9271Modes_9271), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
- ARRAY_SIZE(ar9271Common_9271), 2);
- INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
- ar9271Modes_9271_1_0_only,
- ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
- return;
- }
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
- ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
- ARRAY_SIZE(ar9287Common_9287_1_1), 2);
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_off_L1_9287_1_1,
- ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
- ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
- 2);
- } else if (AR_SREV_9287_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
- ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
- ARRAY_SIZE(ar9287Common_9287_1_0), 2);
-
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_off_L1_9287_1_0,
- ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
- ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
- 2);
- } else if (AR_SREV_9285_12_OR_LATER(ah)) {
-
-
- INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
- ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
- ARRAY_SIZE(ar9285Common_9285_1_2), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_off_L1_9285_1_2,
- ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
- ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
- 2);
- }
- } else if (AR_SREV_9285_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
- ARRAY_SIZE(ar9285Modes_9285), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
- ARRAY_SIZE(ar9285Common_9285), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_off_L1_9285,
- ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_always_on_L1_9285,
- ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
- }
- } else if (AR_SREV_9280_20_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
- ARRAY_SIZE(ar9280Modes_9280_2), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
- ARRAY_SIZE(ar9280Common_9280_2), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_off_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_always_on_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
- }
- INIT_INI_ARRAY(&ah->iniModesAdditional,
- ar9280Modes_fast_clock_9280_2,
- ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
- } else if (AR_SREV_9280_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
- ARRAY_SIZE(ar9280Modes_9280), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
- ARRAY_SIZE(ar9280Common_9280), 2);
- } else if (AR_SREV_9160_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
- ARRAY_SIZE(ar5416Modes_9160), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
- ARRAY_SIZE(ar5416Common_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
- ARRAY_SIZE(ar5416Bank0_9160), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
- ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
- ARRAY_SIZE(ar5416Bank1_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
- ARRAY_SIZE(ar5416Bank2_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
- ARRAY_SIZE(ar5416Bank3_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
- ARRAY_SIZE(ar5416Bank6_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
- ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
- ARRAY_SIZE(ar5416Bank7_9160), 2);
- if (AR_SREV_9160_11(ah)) {
- INIT_INI_ARRAY(&ah->iniAddac,
- ar5416Addac_91601_1,
- ARRAY_SIZE(ar5416Addac_91601_1), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
- ARRAY_SIZE(ar5416Addac_9160), 2);
- }
- } else if (AR_SREV_9100_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
- ARRAY_SIZE(ar5416Modes_9100), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
- ARRAY_SIZE(ar5416Common_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
- ARRAY_SIZE(ar5416Bank0_9100), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
- ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
- ARRAY_SIZE(ar5416Bank1_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
- ARRAY_SIZE(ar5416Bank2_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
- ARRAY_SIZE(ar5416Bank3_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
- ARRAY_SIZE(ar5416Bank6_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
- ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
- ARRAY_SIZE(ar5416Bank7_9100), 2);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
- ARRAY_SIZE(ar5416Addac_9100), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
- ARRAY_SIZE(ar5416Modes), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
- ARRAY_SIZE(ar5416Common), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
- ARRAY_SIZE(ar5416Bank0), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
- ARRAY_SIZE(ar5416BB_RfGain), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
- ARRAY_SIZE(ar5416Bank1), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
- ARRAY_SIZE(ar5416Bank2), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
- ARRAY_SIZE(ar5416Bank3), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
- ARRAY_SIZE(ar5416Bank6), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
- ARRAY_SIZE(ar5416Bank6TPC), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
- ARRAY_SIZE(ar5416Bank7), 2);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
- ARRAY_SIZE(ar5416Addac), 2);
- }
-}
-
-static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
-{
- if (AR_SREV_9287_11_OR_LATER(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9287Modes_rx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
- else if (AR_SREV_9287_10(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9287Modes_rx_gain_9287_1_0,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
- else if (AR_SREV_9280_20(ah))
- ath9k_hw_init_rxgain_ini(ah);
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9287Modes_tx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
- } else if (AR_SREV_9287_10(ah)) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9287Modes_tx_gain_9287_1_0,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
- } else if (AR_SREV_9280_20(ah)) {
- ath9k_hw_init_txgain_ini(ah);
- } else if (AR_SREV_9285_12_OR_LATER(ah)) {
- u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
-
- /* txgain table */
- if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_high_power_tx_gain_9285_1_2,
- ARRAY_SIZE(ar9285Modes_high_power_tx_gain_9285_1_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_original_tx_gain_9285_1_2,
- ARRAY_SIZE(ar9285Modes_original_tx_gain_9285_1_2), 6);
- }
-
- }
-}
-
-static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
-{
- u32 i, j;
-
- if (ah->hw_version.devid == AR9280_DEVID_PCI) {
-
- /* EEPROM Fixup */
- for (i = 0; i < ah->iniModes.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniModes, i, 0);
-
- for (j = 1; j < ah->iniModes.ia_columns; j++) {
- u32 val = INI_RA(&ah->iniModes, i, j);
-
- INI_RA(&ah->iniModes, i, j) =
- ath9k_hw_ini_fixup(ah,
- &ah->eeprom.def,
- reg, val);
- }
- }
- }
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ar9003_hw_attach_ops(ah);
+ else
+ ar9002_hw_attach_ops(ah);
}
-int ath9k_hw_init(struct ath_hw *ah)
+/* Called for all hardware families */
+static int __ath9k_hw_init(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
int r = 0;
- if (!ath9k_hw_devid_supported(ah->hw_version.devid)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unsupported device ID: 0x%0x\n",
- ah->hw_version.devid);
- return -EOPNOTSUPP;
- }
-
- ath9k_hw_init_defaults(ah);
- ath9k_hw_init_config(ah);
+ if (ah->hw_version.devid == AR5416_AR9100_DEVID)
+ ah->hw_version.macVersion = AR_SREV_VERSION_9100;
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
ath_print(common, ATH_DBG_FATAL,
@@ -856,6 +525,11 @@ int ath9k_hw_init(struct ath_hw *ah)
return -EIO;
}
+ ath9k_hw_init_defaults(ah);
+ ath9k_hw_init_config(ah);
+
+ ath9k_hw_attach_ops(ah);
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
return -EIO;
@@ -880,7 +554,7 @@ int ath9k_hw_init(struct ath_hw *ah)
else
ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
- if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) {
+ if (!ath9k_hw_macversion_supported(ah)) {
ath_print(common, ATH_DBG_FATAL,
"Mac Chip Rev 0x%02x.%x is not supported by "
"this driver\n", ah->hw_version.macVersion,
@@ -888,45 +562,45 @@ int ath9k_hw_init(struct ath_hw *ah)
return -EOPNOTSUPP;
}
- if (AR_SREV_9100(ah)) {
- ah->iq_caldata.calData = &iq_cal_multi_sample;
- ah->supp_cals = IQ_MISMATCH_CAL;
- ah->is_pciexpress = false;
- }
-
- if (AR_SREV_9271(ah))
+ if (AR_SREV_9271(ah) || AR_SREV_9100(ah))
ah->is_pciexpress = false;
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
-
ath9k_hw_init_cal_settings(ah);
ah->ani_function = ATH9K_ANI_ALL;
- if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
- ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel;
- ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate;
- } else {
- ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel;
- ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate;
- }
ath9k_hw_init_mode_regs(ah);
+ /*
+ * Configire PCIE after Ini init. SERDES values now come from ini file
+ * This enables PCIe low power mode.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ u32 regval;
+ unsigned int i;
+
+ /* Set Bits 16 and 17 in the AR_WA register. */
+ regval = REG_READ(ah, AR_WA);
+ regval |= 0x00030000;
+ REG_WRITE(ah, AR_WA, regval);
+
+ for (i = 0; i < ah->iniPcieSerdesLowPower.ia_rows; i++) {
+ REG_WRITE(ah,
+ INI_RA(&ah->iniPcieSerdesLowPower, i, 0),
+ INI_RA(&ah->iniPcieSerdesLowPower, i, 1));
+ }
+ }
+
if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, 0, 0);
else
ath9k_hw_disablepcie(ah);
- /* Support for Japan ch.14 (2484) spread */
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniCckfirNormal,
- ar9287Common_normal_cck_fir_coeff_92871_1,
- ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2);
- INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
- ar9287Common_japan_2484_cck_fir_coeff_92871_1,
- ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2);
- }
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ar9002_hw_cck_chan14_spread(ah);
r = ath9k_hw_post_init(ah);
if (r)
@@ -937,8 +611,6 @@ int ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
- ath9k_hw_init_eeprom_fix(ah);
-
r = ath9k_hw_init_macaddr(ah);
if (r) {
ath_print(common, ATH_DBG_FATAL,
@@ -951,6 +623,9 @@ int ath9k_hw_init(struct ath_hw *ah)
else
ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ar9003_hw_set_nf_limits(ah);
+
ath9k_init_nfcal_hist_buffer(ah);
common->state = ATH_HW_INITIALIZED;
@@ -958,24 +633,50 @@ int ath9k_hw_init(struct ath_hw *ah)
return 0;
}
-static void ath9k_hw_init_bb(struct ath_hw *ah,
- struct ath9k_channel *chan)
+int ath9k_hw_init(struct ath_hw *ah)
{
- u32 synthDelay;
+ int ret;
+ struct ath_common *common = ath9k_hw_common(ah);
- synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
- if (IS_CHAN_B(chan))
- synthDelay = (4 * synthDelay) / 22;
- else
- synthDelay /= 10;
+ /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
+ switch (ah->hw_version.devid) {
+ case AR5416_DEVID_PCI:
+ case AR5416_DEVID_PCIE:
+ case AR5416_AR9100_DEVID:
+ case AR9160_DEVID_PCI:
+ case AR9280_DEVID_PCI:
+ case AR9280_DEVID_PCIE:
+ case AR9285_DEVID_PCIE:
+ case AR9287_DEVID_PCI:
+ case AR9287_DEVID_PCIE:
+ case AR2427_DEVID_PCIE:
+ case AR9300_DEVID_PCIE:
+ break;
+ default:
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ break;
+ ath_print(common, ATH_DBG_FATAL,
+ "Hardware device ID 0x%04x not supported\n",
+ ah->hw_version.devid);
+ return -EOPNOTSUPP;
+ }
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ ret = __ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ return ret;
+ }
- udelay(synthDelay + BASE_ACTIVATE_DELAY);
+ return 0;
}
+EXPORT_SYMBOL(ath9k_hw_init);
static void ath9k_hw_init_qos(struct ath_hw *ah)
{
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
@@ -989,105 +690,22 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
-}
-
-static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud)
-{
- u32 lcr;
- u32 baud_divider = freq * 1000 * 1000 / 16 / baud;
-
- lcr = REG_READ(ah , 0x5100c);
- lcr |= 0x80;
- REG_WRITE(ah, 0x5100c, lcr);
- REG_WRITE(ah, 0x51004, (baud_divider >> 8));
- REG_WRITE(ah, 0x51000, (baud_divider & 0xff));
-
- lcr &= ~0x80;
- REG_WRITE(ah, 0x5100c, lcr);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
static void ath9k_hw_init_pll(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- u32 pll;
-
- if (AR_SREV_9100(ah)) {
- if (chan && IS_CHAN_5GHZ(chan))
- pll = 0x1450;
- else
- pll = 0x1458;
- } else {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
-
- if (chan && IS_CHAN_5GHZ(chan)) {
- pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
-
-
- if (AR_SREV_9280_20(ah)) {
- if (((chan->channel % 20) == 0)
- || ((chan->channel % 10) == 0))
- pll = 0x2850;
- else
- pll = 0x142c;
- }
- } else {
- pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
- }
-
- } else if (AR_SREV_9160_10_OR_LATER(ah)) {
-
- pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
-
- if (chan && IS_CHAN_5GHZ(chan))
- pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
- else
- pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
- } else {
- pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+ u32 pll = ath9k_hw_compute_pll_control(ah, chan);
- if (chan && IS_CHAN_5GHZ(chan))
- pll |= SM(0xa, AR_RTC_PLL_DIV);
- else
- pll |= SM(0xb, AR_RTC_PLL_DIV);
- }
- }
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
/* Switch the core clock for ar9271 to 117Mhz */
if (AR_SREV_9271(ah)) {
- if ((pll == 0x142c) || (pll == 0x2850) ) {
- udelay(500);
- /* set CLKOBS to output AHB clock */
- REG_WRITE(ah, 0x7020, 0xe);
- /*
- * 0x304: 117Mhz, ahb_ratio: 1x1
- * 0x306: 40Mhz, ahb_ratio: 1x1
- */
- REG_WRITE(ah, 0x50040, 0x304);
- /*
- * makes adjustments for the baud dividor to keep the
- * targetted baud rate based on the used core clock.
- */
- ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK,
- AR9271_TARGET_BAUD_RATE);
- }
+ udelay(500);
+ REG_WRITE(ah, 0x50040, 0x304);
}
udelay(RTC_PLL_SETTLE_DELAY);
@@ -1095,70 +713,58 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
}
-static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
-{
- int rx_chainmask, tx_chainmask;
-
- rx_chainmask = ah->rxchainmask;
- tx_chainmask = ah->txchainmask;
-
- switch (rx_chainmask) {
- case 0x5:
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- case 0x3:
- if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
- break;
- }
- case 0x1:
- case 0x2:
- case 0x7:
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
- break;
- default:
- break;
- }
-
- REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
- if (tx_chainmask == 0x5) {
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- }
- if (AR_SREV_9100(ah))
- REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
- REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
-}
-
static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
enum nl80211_iftype opmode)
{
- ah->mask_reg = AR_IMR_TXERR |
+ u32 imr_reg = AR_IMR_TXERR |
AR_IMR_TXURN |
AR_IMR_RXERR |
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (ah->config.rx_intr_mitigation)
- ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
- else
- ah->mask_reg |= AR_IMR_RXOK;
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ imr_reg |= AR_IMR_RXOK_HP;
+ if (ah->config.rx_intr_mitigation)
+ imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ imr_reg |= AR_IMR_RXOK_LP;
- ah->mask_reg |= AR_IMR_TXOK;
+ } else {
+ if (ah->config.rx_intr_mitigation)
+ imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ imr_reg |= AR_IMR_RXOK;
+ }
+
+ if (ah->config.tx_intr_mitigation)
+ imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
+ else
+ imr_reg |= AR_IMR_TXOK;
if (opmode == NL80211_IFTYPE_AP)
- ah->mask_reg |= AR_IMR_MIB;
+ imr_reg |= AR_IMR_MIB;
+
+ ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_IMR, ah->mask_reg);
- REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
+ REG_WRITE(ah, AR_IMR, imr_reg);
+ ah->imrs2_reg |= AR_IMR_S2_GTT;
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
if (!AR_SREV_9100(ah)) {
REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0);
+ }
}
static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
@@ -1241,19 +847,13 @@ void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- if (common->state <= ATH_HW_INITIALIZED)
+ if (common->state < ATH_HW_INITIALIZED)
goto free_hw;
- if (!AR_SREV_9100(ah))
- ath9k_hw_ani_disable(ah);
-
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
free_hw:
- if (!AR_SREV_9280_10_OR_LATER(ah))
- ath9k_hw_rf_free_ext_banks(ah);
- kfree(ah);
- ah = NULL;
+ ath9k_hw_rf_free_ext_banks(ah);
}
EXPORT_SYMBOL(ath9k_hw_deinit);
@@ -1261,136 +861,7 @@ EXPORT_SYMBOL(ath9k_hw_deinit);
/* INI */
/*******/
-static void ath9k_hw_override_ini(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- u32 val;
-
- if (AR_SREV_9271(ah)) {
- /*
- * Enable spectral scan to solution for issues with stuck
- * beacons on AR9271 1.0. The beacon stuck issue is not seeon on
- * AR9271 1.1
- */
- if (AR_SREV_9271_10(ah)) {
- val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) |
- AR_PHY_SPECTRAL_SCAN_ENABLE;
- REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val);
- }
- else if (AR_SREV_9271_11(ah))
- /*
- * change AR_PHY_RF_CTL3 setting to fix MAC issue
- * present on AR9271 1.1
- */
- REG_WRITE(ah, AR_PHY_RF_CTL3, 0x3a020001);
- return;
- }
-
- /*
- * Set the RX_ABORT and RX_DIS and clear if off only after
- * RXE is set for MAC. This prevents frames with corrupted
- * descriptor status.
- */
- REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
-
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- val = REG_READ(ah, AR_PCU_MISC_MODE2) &
- (~AR_PCU_MISC_MODE2_HWWAR1);
-
- if (AR_SREV_9287_10_OR_LATER(ah))
- val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
-
- REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
- }
-
- if (!AR_SREV_5416_20_OR_LATER(ah) ||
- AR_SREV_9280_10_OR_LATER(ah))
- return;
- /*
- * Disable BB clock gating
- * Necessary to avoid issues on AR5416 2.0
- */
- REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
-
- /*
- * Disable RIFS search on some chips to avoid baseband
- * hang issues.
- */
- if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
- val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
- val &= ~AR_PHY_RIFS_INIT_DELAY;
- REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
- }
-}
-
-static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value)
-{
- struct base_eep_header *pBase = &(pEepData->baseEepHeader);
- struct ath_common *common = ath9k_hw_common(ah);
-
- switch (ah->hw_version.devid) {
- case AR9280_DEVID_PCI:
- if (reg == 0x7894) {
- ath_print(common, ATH_DBG_EEPROM,
- "ini VAL: %x EEPROM: %x\n", value,
- (pBase->version & 0xff));
-
- if ((pBase->version & 0xff) > 0x0a) {
- ath_print(common, ATH_DBG_EEPROM,
- "PWDCLKIND: %d\n",
- pBase->pwdclkind);
- value &= ~AR_AN_TOP2_PWDCLKIND;
- value |= AR_AN_TOP2_PWDCLKIND &
- (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
- } else {
- ath_print(common, ATH_DBG_EEPROM,
- "PWDCLKIND Earlier Rev\n");
- }
-
- ath_print(common, ATH_DBG_EEPROM,
- "final ini VAL: %x\n", value);
- }
- break;
- }
-
- return value;
-}
-
-static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value)
-{
- if (ah->eep_map == EEP_MAP_4KBITS)
- return value;
- else
- return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value);
-}
-
-static void ath9k_olc_init(struct ath_hw *ah)
-{
- u32 i;
-
- if (OLC_FOR_AR9287_10_LATER) {
- REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
- AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
- AR9287_AN_TXPC0_TXPCMODE,
- AR9287_AN_TXPC0_TXPCMODE_S,
- AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
- udelay(100);
- } else {
- for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
- ah->originalGain[i] =
- MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
- AR_PHY_TX_GAIN);
- ah->PDADCdelta = 0;
- }
-}
-
-static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
- struct ath9k_channel *chan)
+u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
{
u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
@@ -1404,173 +875,24 @@ static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
return ctl;
}
-static int ath9k_hw_process_ini(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
- int i, regWrites = 0;
- struct ieee80211_channel *channel = chan->chan;
- u32 modesIndex, freqIndex;
-
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- freqIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- freqIndex = 1;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- freqIndex = 2;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- freqIndex = 2;
- break;
-
- default:
- return -EINVAL;
- }
-
- REG_WRITE(ah, AR_PHY(0), 0x00000007);
- REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
- ah->eep_ops->set_addac(ah, chan);
-
- if (AR_SREV_5416_22_OR_LATER(ah)) {
- REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
- } else {
- struct ar5416IniArray temp;
- u32 addacSize =
- sizeof(u32) * ah->iniAddac.ia_rows *
- ah->iniAddac.ia_columns;
-
- memcpy(ah->addac5416_21,
- ah->iniAddac.ia_array, addacSize);
-
- (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
-
- temp.ia_array = ah->addac5416_21;
- temp.ia_columns = ah->iniAddac.ia_columns;
- temp.ia_rows = ah->iniAddac.ia_rows;
- REG_WRITE_ARRAY(&temp, 1, regWrites);
- }
-
- REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
-
- for (i = 0; i < ah->iniModes.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniModes, i, 0);
- u32 val = INI_RA(&ah->iniModes, i, modesIndex);
-
- REG_WRITE(ah, reg, val);
-
- if (reg >= 0x7800 && reg < 0x78a0
- && ah->config.analog_shiftreg) {
- udelay(100);
- }
-
- DO_DELAY(regWrites);
- }
-
- if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
- REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
-
- if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
- AR_SREV_9287_10_OR_LATER(ah))
- REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
-
- for (i = 0; i < ah->iniCommon.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniCommon, i, 0);
- u32 val = INI_RA(&ah->iniCommon, i, 1);
-
- REG_WRITE(ah, reg, val);
-
- if (reg >= 0x7800 && reg < 0x78a0
- && ah->config.analog_shiftreg) {
- udelay(100);
- }
-
- DO_DELAY(regWrites);
- }
-
- ath9k_hw_write_regs(ah, freqIndex, regWrites);
-
- if (AR_SREV_9271_10(ah))
- REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
- modesIndex, regWrites);
-
- if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
- REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
- regWrites);
- }
-
- ath9k_hw_override_ini(ah, chan);
- ath9k_hw_set_regs(ah, chan);
- ath9k_hw_init_chain_masks(ah);
-
- if (OLC_FOR_AR9280_20_LATER)
- ath9k_olc_init(ah);
-
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(regulatory, chan),
- channel->max_antenna_gain * 2,
- channel->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit));
-
- if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "ar5416SetRfRegs failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
/****************************************/
/* Reset and Channel Switching Routines */
/****************************************/
-static void ath9k_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u32 rfMode = 0;
-
- if (chan == NULL)
- return;
-
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
-
- if (!AR_SREV_9280_10_OR_LATER(ah))
- rfMode |= (IS_CHAN_5GHZ(chan)) ?
- AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
-
- if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
- rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
-
- REG_WRITE(ah, AR_PHY_MODE, rfMode);
-}
-
-static void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
-{
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
-}
-
static inline void ath9k_hw_set_dma(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
u32 regval;
+ ENABLE_REGWRITE_BUFFER(ah);
+
/*
* set AHB_MODE not to do cacheline prefetches
*/
- regval = REG_READ(ah, AR_AHB_MODE);
- REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ regval = REG_READ(ah, AR_AHB_MODE);
+ REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
+ }
/*
* let mac dma reads be in 128 byte chunks
@@ -1578,12 +900,18 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
/*
* Restore TX Trigger Level to its pre-reset value.
* The initial value depends on whether aggregation is enabled, and is
* adjusted whenever underruns are detected.
*/
- REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
+
+ ENABLE_REGWRITE_BUFFER(ah);
/*
* let mac dma writes be in 128 byte chunks
@@ -1596,6 +924,14 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
*/
REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1);
+ REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1);
+
+ ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
+ ah->caps.rx_status_len);
+ }
+
/*
* reduce the number of usable entries in PCU TXBUF to avoid
* wrap around issues.
@@ -1611,6 +947,12 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
AR_PCU_TXBUF_CTRL_USABLE_SIZE);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_reset_txstatus_ring(ah);
}
static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
@@ -1638,10 +980,8 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
}
}
-static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
- u32 coef_scaled,
- u32 *coef_mantissa,
- u32 *coef_exponent)
+void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
+ u32 *coef_mantissa, u32 *coef_exponent)
{
u32 coef_exp, coef_man;
@@ -1657,40 +997,6 @@ static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
*coef_exponent = coef_exp - 16;
}
-static void ath9k_hw_set_delta_slope(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- u32 coef_scaled, ds_coef_exp, ds_coef_man;
- u32 clockMhzScaled = 0x64000000;
- struct chan_centers centers;
-
- if (IS_CHAN_HALF_RATE(chan))
- clockMhzScaled = clockMhzScaled >> 1;
- else if (IS_CHAN_QUARTER_RATE(chan))
- clockMhzScaled = clockMhzScaled >> 2;
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- coef_scaled = clockMhzScaled / centers.synth_center;
-
- ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
- &ds_coef_exp);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING3,
- AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
- REG_RMW_FIELD(ah, AR_PHY_TIMING3,
- AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
-
- coef_scaled = (9 * coef_scaled) / 10;
-
- ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
- &ds_coef_exp);
-
- REG_RMW_FIELD(ah, AR_PHY_HALFGI,
- AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
- REG_RMW_FIELD(ah, AR_PHY_HALFGI,
- AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
-}
-
static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
{
u32 rst_flags;
@@ -1704,6 +1010,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
(void)REG_READ(ah, AR_RTC_DERIVED_CLK);
}
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
@@ -1715,11 +1023,16 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (tmpReg &
(AR_INTR_SYNC_LOCAL_TIMEOUT |
AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
+ u32 val;
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
- } else {
+
+ val = AR_RC_HOSTIF;
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ val |= AR_RC_AHB;
+ REG_WRITE(ah, AR_RC, val);
+
+ } else if (!AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
- }
rst_flags = AR_RTC_RC_MAC_WARM;
if (type == ATH9K_RESET_COLD)
@@ -1727,6 +1040,10 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
}
REG_WRITE(ah, AR_RTC_RC, rst_flags);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
udelay(50);
REG_WRITE(ah, AR_RTC_RC, 0);
@@ -1747,16 +1064,23 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
{
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
- if (!AR_SREV_9100(ah))
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
REG_WRITE(ah, AR_RTC_RESET, 0);
- udelay(2);
- if (!AR_SREV_9100(ah))
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ udelay(2);
+
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, 0);
REG_WRITE(ah, AR_RTC_RESET, 1);
@@ -1792,34 +1116,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
}
}
-static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u32 phymode;
- u32 enableDacFifo = 0;
-
- if (AR_SREV_9285_10_OR_LATER(ah))
- enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
- AR_PHY_FC_ENABLE_DAC_FIFO);
-
- phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
- | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
-
- if (IS_CHAN_HT40(chan)) {
- phymode |= AR_PHY_FC_DYN2040_EN;
-
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
- phymode |= AR_PHY_FC_DYN2040_PRI_CH;
-
- }
- REG_WRITE(ah, AR_PHY_TURBO, phymode);
-
- ath9k_hw_set11nmac2040(ah);
-
- REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
- REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
-}
-
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -1845,7 +1141,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_channel *channel = chan->chan;
- u32 synthDelay, qnum;
+ u32 qnum;
int r;
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1857,17 +1153,15 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
}
}
- REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
- if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
- AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) {
+ if (!ath9k_hw_rfbus_req(ah)) {
ath_print(common, ATH_DBG_FATAL,
"Could not kill baseband RX\n");
return false;
}
- ath9k_hw_set_regs(ah, chan);
+ ath9k_hw_set_channel_regs(ah, chan);
- r = ah->ath9k_hw_rf_set_freq(ah, chan);
+ r = ath9k_hw_rf_set_freq(ah, chan);
if (r) {
ath_print(common, ATH_DBG_FATAL,
"Failed to set channel\n");
@@ -1881,20 +1175,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
min((u32) MAX_RATE_POWER,
(u32) regulatory->power_limit));
- synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
- if (IS_CHAN_B(chan))
- synthDelay = (4 * synthDelay) / 22;
- else
- synthDelay /= 10;
-
- udelay(synthDelay + BASE_ACTIVATE_DELAY);
-
- REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+ ath9k_hw_rfbus_done(ah);
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
- ah->ath9k_hw_spur_mitigate_freq(ah, chan);
+ ath9k_hw_spur_mitigate_freq(ah, chan);
if (!chan->oneTimeCalsDone)
chan->oneTimeCalsDone = true;
@@ -1902,17 +1188,33 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
return true;
}
-static void ath9k_enable_rfkill(struct ath_hw *ah)
+bool ath9k_hw_check_alive(struct ath_hw *ah)
{
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
- AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+ int count = 50;
+ u32 reg;
+
+ if (AR_SREV_9285_10_OR_LATER(ah))
+ return true;
+
+ do {
+ reg = REG_READ(ah, AR_OBS_BUS_1);
- REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
- AR_GPIO_INPUT_MUX2_RFSILENT);
+ if ((reg & 0x7E7FFFEF) == 0x00702400)
+ continue;
- ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
- REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+ switch (reg & 0x7E000B00) {
+ case 0x1E000000:
+ case 0x52000B00:
+ case 0x18000B00:
+ continue;
+ default:
+ return true;
+ }
+ } while (count-- > 0);
+
+ return false;
}
+EXPORT_SYMBOL(ath9k_hw_check_alive);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange)
@@ -1923,11 +1225,18 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveDefAntenna;
u32 macStaId1;
u64 tsf = 0;
- int i, rx_chainmask, r;
+ int i, r;
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
+ if (!ah->chip_fullsleep) {
+ ath9k_hw_abortpcurecv(ah);
+ if (!ath9k_hw_stopdmarecv(ah))
+ ath_print(common, ATH_DBG_XMIT,
+ "Failed to stop receive dma\n");
+ }
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -1940,8 +1249,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
(chan->channel != ah->curchan->channel) &&
((chan->channelFlags & CHANNEL_ALL) ==
(ah->curchan->channelFlags & CHANNEL_ALL)) &&
- !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
- IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
+ !AR_SREV_9280(ah)) {
if (ath9k_hw_channel_change(ah, chan)) {
ath9k_hw_loadnf(ah, ah->curchan);
@@ -1966,6 +1274,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_mark_phy_inactive(ah);
+ /* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
REG_WRITE(ah,
AR9271_RESET_POWER_DOWN_CONTROL,
@@ -1978,6 +1287,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
return -EINVAL;
}
+ /* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
ah->htc_reset_init = false;
REG_WRITE(ah,
@@ -1993,16 +1303,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (AR_SREV_9280_10_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- /* Enable ASYNC FIFO */
- REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
- REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
- REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
- REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
- }
r = ath9k_hw_process_ini(ah, chan);
if (r)
return r;
@@ -2027,9 +1327,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
- ah->ath9k_hw_spur_mitigate_freq(ah, chan);
+ ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
+ ath9k_hw_set_operating_mode(ah, ah->opmode);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
| macStaId1
@@ -2037,25 +1341,27 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
| (ah->config.
ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
| ah->sta_id1_defaults);
- ath9k_hw_set_operating_mode(ah, ah->opmode);
-
ath_hw_setbssidmask(common);
-
REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
-
ath9k_hw_write_associd(ah);
-
REG_WRITE(ah, AR_ISR, ~0);
-
REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
- r = ah->ath9k_hw_rf_set_freq(ah, chan);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ r = ath9k_hw_rf_set_freq(ah, chan);
if (r)
return r;
+ ENABLE_REGWRITE_BUFFER(ah);
+
for (i = 0; i < AR_NUM_DCU; i++)
REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ah->intr_txqs = 0;
for (i = 0; i < ah->caps.total_queues; i++)
ath9k_hw_resettxqueue(ah, i);
@@ -2068,25 +1374,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_global_settings(ah);
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
- AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
- AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
- AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
-
- REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
-
- REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
- AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
- REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
- AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
- }
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
- AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ ar9002_hw_enable_async_fifo(ah);
+ ar9002_hw_enable_wep_aggregation(ah);
}
REG_WRITE(ah, AR_STA_ID1,
@@ -2101,19 +1391,24 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
}
+ if (ah->config.tx_intr_mitigation) {
+ REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
+ REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
+ }
+
ath9k_hw_init_bb(ah, chan);
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
- rx_chainmask = ah->rxchainmask;
- if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
- }
+ ENABLE_REGWRITE_BUFFER(ah);
+ ath9k_hw_restore_chainmask(ah);
REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
/*
* For big endian systems turn on swapping for descriptors
*/
@@ -2143,6 +1438,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->btcoex_hw.enabled)
ath9k_hw_btcoex_enable(ah);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ath9k_hw_loadnf(ah, curchan);
+ ath9k_hw_start_nfcal(ah);
+ }
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2429,21 +1729,35 @@ EXPORT_SYMBOL(ath9k_hw_keyisvalid);
/* Power Management (Chipset) */
/******************************/
+/*
+ * Notify Power Mgt is disabled in self-generated frames.
+ * If requested, force chip to sleep.
+ */
static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
if (setChip) {
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
- if (!AR_SREV_9100(ah))
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
- if(!AR_SREV_5416(ah))
+ /* Shutdown chip. Active low */
+ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah))
REG_CLR_BIT(ah, (AR_RTC_RESET),
AR_RTC_RESET_EN);
}
}
+/*
+ * Notify Power Management is enabled in self-generating
+ * frames. If request, set power mode of chip to
+ * auto/normal. Duration in units of 128us (1/8 TU).
+ */
static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
@@ -2451,9 +1765,14 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
struct ath9k_hw_capabilities *pCap = &ah->caps;
if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ /* Set WakeOnInterrupt bit; clear ForceWake bit */
REG_WRITE(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_ON_INT);
} else {
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
}
@@ -2472,7 +1791,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
ATH9K_RESET_POWER_ON) != true) {
return false;
}
- ath9k_hw_init_pll(ah, NULL);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_init_pll(ah, NULL);
}
if (AR_SREV_9100(ah))
REG_SET_BIT(ah, AR_RTC_RESET,
@@ -2542,424 +1862,6 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
}
EXPORT_SYMBOL(ath9k_hw_setpower);
-/*
- * Helper for ASPM support.
- *
- * Disable PLL when in L0s as well as receiver clock when in L1.
- * This power saving option must be enabled through the SerDes.
- *
- * Programming the SerDes must go through the same 288 bit serial shift
- * register as the other analog registers. Hence the 9 writes.
- */
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
-{
- u8 i;
- u32 val;
-
- if (ah->is_pciexpress != true)
- return;
-
- /* Do not touch SerDes registers */
- if (ah->config.pcie_powersave_enable == 2)
- return;
-
- /* Nothing to do on restore for 11N */
- if (!restore) {
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- /*
- * AR9280 2.0 or later chips use SerDes values from the
- * initvals.h initialized depending on chipset during
- * ath9k_hw_init()
- */
- for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
- REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
- INI_RA(&ah->iniPcieSerdes, i, 1));
- }
- } else if (AR_SREV_9280(ah) &&
- (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
-
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
-
- /* Shut off CLKREQ active in L1 */
- if (ah->config.pcie_clock_req)
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
- else
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
-
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
-
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
-
- } else {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
-
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
-
- /*
- * Ignore ah->ah_config.pcie_clock_req setting for
- * pre-AR9280 11n
- */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
-
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
-
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
- }
-
- udelay(1000);
-
- /* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
-
- /* Several PCIe massages to ensure proper behaviour */
- if (ah->config.pcie_waen) {
- val = ah->config.pcie_waen;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else {
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) {
- val = AR9285_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else if (AR_SREV_9280(ah)) {
- /*
- * On AR9280 chips bit 22 of 0x4004 needs to be
- * set otherwise card may disappear.
- */
- val = AR9280_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else
- val = AR_WA_DEFAULT;
- }
-
- REG_WRITE(ah, AR_WA, val);
- }
-
- if (power_off) {
- /*
- * Set PCIe workaround bits
- * bit 14 in WA register (disable L1) should only
- * be set when device enters D3 and be cleared
- * when device comes back to D0.
- */
- if (ah->config.pcie_waen) {
- if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
- REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
- } else {
- if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) &&
- (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
- (AR_SREV_9280(ah) &&
- (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
- REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
- }
- }
- }
-}
-EXPORT_SYMBOL(ath9k_hw_configpcipowersave);
-
-/**********************/
-/* Interrupt Handling */
-/**********************/
-
-bool ath9k_hw_intrpend(struct ath_hw *ah)
-{
- u32 host_isr;
-
- if (AR_SREV_9100(ah))
- return true;
-
- host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
- if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
- return true;
-
- host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
- if ((host_isr & AR_INTR_SYNC_DEFAULT)
- && (host_isr != AR_INTR_SPURIOUS))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(ath9k_hw_intrpend);
-
-bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
-{
- u32 isr = 0;
- u32 mask2 = 0;
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- u32 sync_cause = 0;
- bool fatal_int = false;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (!AR_SREV_9100(ah)) {
- if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
- if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
- == AR_RTC_STATUS_ON) {
- isr = REG_READ(ah, AR_ISR);
- }
- }
-
- sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
- AR_INTR_SYNC_DEFAULT;
-
- *masked = 0;
-
- if (!isr && !sync_cause)
- return false;
- } else {
- *masked = 0;
- isr = REG_READ(ah, AR_ISR);
- }
-
- if (isr) {
- if (isr & AR_ISR_BCNMISC) {
- u32 isr2;
- isr2 = REG_READ(ah, AR_ISR_S2);
- if (isr2 & AR_ISR_S2_TIM)
- mask2 |= ATH9K_INT_TIM;
- if (isr2 & AR_ISR_S2_DTIM)
- mask2 |= ATH9K_INT_DTIM;
- if (isr2 & AR_ISR_S2_DTIMSYNC)
- mask2 |= ATH9K_INT_DTIMSYNC;
- if (isr2 & (AR_ISR_S2_CABEND))
- mask2 |= ATH9K_INT_CABEND;
- if (isr2 & AR_ISR_S2_GTT)
- mask2 |= ATH9K_INT_GTT;
- if (isr2 & AR_ISR_S2_CST)
- mask2 |= ATH9K_INT_CST;
- if (isr2 & AR_ISR_S2_TSFOOR)
- mask2 |= ATH9K_INT_TSFOOR;
- }
-
- isr = REG_READ(ah, AR_ISR_RAC);
- if (isr == 0xffffffff) {
- *masked = 0;
- return false;
- }
-
- *masked = isr & ATH9K_INT_COMMON;
-
- if (ah->config.rx_intr_mitigation) {
- if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
- *masked |= ATH9K_INT_RX;
- }
-
- if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
- *masked |= ATH9K_INT_RX;
- if (isr &
- (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
- AR_ISR_TXEOL)) {
- u32 s0_s, s1_s;
-
- *masked |= ATH9K_INT_TX;
-
- s0_s = REG_READ(ah, AR_ISR_S0_S);
- ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
- ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
-
- s1_s = REG_READ(ah, AR_ISR_S1_S);
- ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
- ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
- }
-
- if (isr & AR_ISR_RXORN) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "receive FIFO overrun interrupt\n");
- }
-
- if (!AR_SREV_9100(ah)) {
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
- if (isr5 & AR_ISR_S5_TIM_TIMER)
- *masked |= ATH9K_INT_TIM_TIMER;
- }
- }
-
- *masked |= mask2;
- }
-
- if (AR_SREV_9100(ah))
- return true;
-
- if (isr & AR_ISR_GENTMR) {
- u32 s5_s;
-
- s5_s = REG_READ(ah, AR_ISR_S5_S);
- if (isr & AR_ISR_GENTMR) {
- ah->intr_gen_timer_trigger =
- MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
-
- ah->intr_gen_timer_thresh =
- MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
-
- if (ah->intr_gen_timer_trigger)
- *masked |= ATH9K_INT_GENTIMER;
-
- }
- }
-
- if (sync_cause) {
- fatal_int =
- (sync_cause &
- (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
- ? true : false;
-
- if (fatal_int) {
- if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
- ath_print(common, ATH_DBG_ANY,
- "received PCI FATAL interrupt\n");
- }
- if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
- ath_print(common, ATH_DBG_ANY,
- "received PCI PERR interrupt\n");
- }
- *masked |= ATH9K_INT_FATAL;
- }
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
- REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
- REG_WRITE(ah, AR_RC, 0);
- *masked |= ATH9K_INT_FATAL;
- }
- if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
- }
-
- REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
- (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
- }
-
- return true;
-}
-EXPORT_SYMBOL(ath9k_hw_getisr);
-
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
-{
- u32 omask = ah->mask_reg;
- u32 mask, mask2;
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
-
- if (omask & ATH9K_INT_GLOBAL) {
- ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
- REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
- (void) REG_READ(ah, AR_IER);
- if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
-
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
- }
- }
-
- mask = ints & ATH9K_INT_COMMON;
- mask2 = 0;
-
- if (ints & ATH9K_INT_TX) {
- if (ah->txok_interrupt_mask)
- mask |= AR_IMR_TXOK;
- if (ah->txdesc_interrupt_mask)
- mask |= AR_IMR_TXDESC;
- if (ah->txerr_interrupt_mask)
- mask |= AR_IMR_TXERR;
- if (ah->txeol_interrupt_mask)
- mask |= AR_IMR_TXEOL;
- }
- if (ints & ATH9K_INT_RX) {
- mask |= AR_IMR_RXERR;
- if (ah->config.rx_intr_mitigation)
- mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
- else
- mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
- mask |= AR_IMR_GENTMR;
- }
-
- if (ints & (ATH9K_INT_BMISC)) {
- mask |= AR_IMR_BCNMISC;
- if (ints & ATH9K_INT_TIM)
- mask2 |= AR_IMR_S2_TIM;
- if (ints & ATH9K_INT_DTIM)
- mask2 |= AR_IMR_S2_DTIM;
- if (ints & ATH9K_INT_DTIMSYNC)
- mask2 |= AR_IMR_S2_DTIMSYNC;
- if (ints & ATH9K_INT_CABEND)
- mask2 |= AR_IMR_S2_CABEND;
- if (ints & ATH9K_INT_TSFOOR)
- mask2 |= AR_IMR_S2_TSFOOR;
- }
-
- if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
- mask |= AR_IMR_BCNMISC;
- if (ints & ATH9K_INT_GTT)
- mask2 |= AR_IMR_S2_GTT;
- if (ints & ATH9K_INT_CST)
- mask2 |= AR_IMR_S2_CST;
- }
-
- ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
- REG_WRITE(ah, AR_IMR, mask);
- mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
- AR_IMR_S2_DTIM |
- AR_IMR_S2_DTIMSYNC |
- AR_IMR_S2_CABEND |
- AR_IMR_S2_CABTO |
- AR_IMR_S2_TSFOOR |
- AR_IMR_S2_GTT | AR_IMR_S2_CST);
- REG_WRITE(ah, AR_IMR_S2, mask | mask2);
- ah->mask_reg = ints;
-
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- if (ints & ATH9K_INT_TIM_TIMER)
- REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
- else
- REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
- }
-
- if (ints & ATH9K_INT_GLOBAL) {
- ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
- REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
- if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
- AR_INTR_MAC_IRQ);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
-
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
- AR_INTR_SYNC_DEFAULT);
- REG_WRITE(ah, AR_INTR_SYNC_MASK,
- AR_INTR_SYNC_DEFAULT);
- }
- ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
- REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
- }
-
- return omask;
-}
-EXPORT_SYMBOL(ath9k_hw_set_interrupts);
-
/*******************/
/* Beacon Handling */
/*******************/
@@ -2970,6 +1872,8 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
ah->beacon_interval = beacon_period;
+ ENABLE_REGWRITE_BUFFER(ah);
+
switch (ah->opmode) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_MONITOR:
@@ -3013,6 +1917,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
beacon_period &= ~ATH9K_BEACON_ENA;
if (beacon_period & ATH9K_BEACON_RESET_TSF) {
ath9k_hw_reset_tsf(ah);
@@ -3029,6 +1936,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
REG_WRITE(ah, AR_BEACON_PERIOD,
@@ -3036,6 +1945,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
REG_RMW_FIELD(ah, AR_RSSI_THR,
AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
@@ -3058,6 +1970,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_NEXT_DTIM,
TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
@@ -3077,6 +1991,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
REG_SET_BIT(ah, AR_TIMER_MODE,
AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
AR_DTIM_TIMER_EN);
@@ -3219,7 +2136,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
- if (AR_SREV_9285_10_OR_LATER(ah))
+ if (AR_SREV_9271(ah))
+ pCap->num_gpio_pins = AR9271_NUM_GPIO;
+ else if (AR_SREV_9285_10_OR_LATER(ah))
pCap->num_gpio_pins = AR9285_NUM_GPIO;
else if (AR_SREV_9280_10_OR_LATER(ah))
pCap->num_gpio_pins = AR928X_NUM_GPIO;
@@ -3246,8 +2165,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
}
#endif
-
- pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
+ if (AR_SREV_9271(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
+ else
+ pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
@@ -3291,6 +2212,26 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
}
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_LDPC |
+ ATH9K_HW_CAP_FASTCLOCK;
+ pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
+ pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
+ pCap->rx_status_len = sizeof(struct ar9003_rxs);
+ pCap->tx_desc_len = sizeof(struct ar9003_txc);
+ pCap->txs_len = sizeof(struct ar9003_txs);
+ } else {
+ pCap->tx_desc_len = sizeof(struct ath_desc);
+ if (AR_SREV_9280_20(ah) &&
+ ((ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) <=
+ AR5416_EEP_MINOR_VER_16) ||
+ ah->eep_ops->get_eeprom(ah, EEP_FSTCLK_5G)))
+ pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK;
+ }
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
+
return 0;
}
@@ -3323,10 +2264,6 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
case ATH9K_CAP_TKIP_SPLIT:
return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
false : true;
- case ATH9K_CAP_DIVERSITY:
- return (REG_READ(ah, AR_PHY_CCK_DETECT) &
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
- true : false;
case ATH9K_CAP_MCAST_KEYSRCH:
switch (capability) {
case 0:
@@ -3369,8 +2306,6 @@ EXPORT_SYMBOL(ath9k_hw_getcapability);
bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 setting, int *status)
{
- u32 v;
-
switch (type) {
case ATH9K_CAP_TKIP_MIC:
if (setting)
@@ -3380,14 +2315,6 @@ bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
ah->sta_id1_defaults &=
~AR_STA_ID1_CRPT_MIC_ENABLE;
return true;
- case ATH9K_CAP_DIVERSITY:
- v = REG_READ(ah, AR_PHY_CCK_DETECT);
- if (setting)
- v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- else
- v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
- return true;
case ATH9K_CAP_MCAST_KEYSRCH:
if (setting)
ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
@@ -3455,7 +2382,11 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
if (gpio >= ah->caps.num_gpio_pins)
return 0xffffffff;
- if (AR_SREV_9287_10_OR_LATER(ah))
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ return MS_REG_READ(AR9300, gpio) != 0;
+ else if (AR_SREV_9271(ah))
+ return MS_REG_READ(AR9271, gpio) != 0;
+ else if (AR_SREV_9287_10_OR_LATER(ah))
return MS_REG_READ(AR9287, gpio) != 0;
else if (AR_SREV_9285_10_OR_LATER(ah))
return MS_REG_READ(AR9285, gpio) != 0;
@@ -3484,6 +2415,9 @@ EXPORT_SYMBOL(ath9k_hw_cfg_output);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
{
+ if (AR_SREV_9271(ah))
+ val = ~val;
+
REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
AR_GPIO_BIT(gpio));
}
@@ -3523,6 +2457,8 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
{
u32 phybits;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RX_FILTER, bits);
phybits = 0;
@@ -3538,6 +2474,9 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
else
REG_WRITE(ah, AR_RXCFG,
REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
EXPORT_SYMBOL(ath9k_hw_setrxfilter);
@@ -3610,14 +2549,25 @@ void ath9k_hw_write_associd(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_write_associd);
+#define ATH9K_MAX_TSF_READ 10
+
u64 ath9k_hw_gettsf64(struct ath_hw *ah)
{
- u64 tsf;
+ u32 tsf_lower, tsf_upper1, tsf_upper2;
+ int i;
+
+ tsf_upper1 = REG_READ(ah, AR_TSF_U32);
+ for (i = 0; i < ATH9K_MAX_TSF_READ; i++) {
+ tsf_lower = REG_READ(ah, AR_TSF_L32);
+ tsf_upper2 = REG_READ(ah, AR_TSF_U32);
+ if (tsf_upper2 == tsf_upper1)
+ break;
+ tsf_upper1 = tsf_upper2;
+ }
- tsf = REG_READ(ah, AR_TSF_U32);
- tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
+ WARN_ON( i == ATH9K_MAX_TSF_READ );
- return tsf;
+ return (((u64)tsf_upper1 << 32) | tsf_lower);
}
EXPORT_SYMBOL(ath9k_hw_gettsf64);
@@ -3868,6 +2818,16 @@ void ath_gen_timer_isr(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath_gen_timer_isr);
+/********/
+/* HTC */
+/********/
+
+void ath9k_hw_htc_resetinit(struct ath_hw *ah)
+{
+ ah->htc_reset_init = true;
+}
+EXPORT_SYMBOL(ath9k_hw_htc_resetinit);
+
static struct {
u32 version;
const char * name;
@@ -3882,6 +2842,7 @@ static struct {
{ AR_SREV_VERSION_9285, "9285" },
{ AR_SREV_VERSION_9287, "9287" },
{ AR_SREV_VERSION_9271, "9271" },
+ { AR_SREV_VERSION_9300, "9300" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dbbf7ca..77245df 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -41,18 +41,16 @@
#define AR9280_DEVID_PCIE 0x002a
#define AR9285_DEVID_PCIE 0x002b
#define AR2427_DEVID_PCIE 0x002c
+#define AR9287_DEVID_PCI 0x002d
+#define AR9287_DEVID_PCIE 0x002e
+#define AR9300_DEVID_PCIE 0x0030
#define AR5416_AR9100_DEVID 0x000b
-#define AR9271_USB 0x9271
-
#define AR_SUBVENDOR_ID_NOG 0x0e11
#define AR_SUBVENDOR_ID_NEW_A 0x7065
#define AR5416_MAGIC 0x19641014
-#define AR5416_DEVID_AR9287_PCI 0x002D
-#define AR5416_DEVID_AR9287_PCIE 0x002E
-
#define AR9280_COEX2WIRE_SUBSYSID 0x309b
#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
@@ -70,6 +68,24 @@
#define REG_READ(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+#define ENABLE_REGWRITE_BUFFER(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->enable_write_buffer((_ah)); \
+ } while (0)
+
+#define DISABLE_REGWRITE_BUFFER(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->disable_write_buffer((_ah)); \
+ } while (0)
+
+#define REGWRITE_BUFFER_FLUSH(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->write_flush((_ah)); \
+ } while (0)
+
#define SM(_v, _f) (((_v) << _f##_S) & _f)
#define MS(_v, _f) (((_v) & _f) >> _f##_S)
#define REG_RMW(_a, _r, _set, _clr) \
@@ -77,6 +93,8 @@
#define REG_RMW_FIELD(_a, _r, _f, _v) \
REG_WRITE(_a, _r, \
(REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
+#define REG_READ_FIELD(_a, _r, _f) \
+ (((REG_READ(_a, _r) & _f) >> _f##_S))
#define REG_SET_BIT(_a, _r, _f) \
REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
#define REG_CLR_BIT(_a, _r, _f) \
@@ -137,6 +155,16 @@
#define TU_TO_USEC(_tu) ((_tu) << 10)
+#define ATH9K_HW_RX_HP_QDEPTH 16
+#define ATH9K_HW_RX_LP_QDEPTH 128
+
+enum ath_ini_subsys {
+ ATH_INI_PRE = 0,
+ ATH_INI_CORE,
+ ATH_INI_POST,
+ ATH_INI_NUM_SPLIT,
+};
+
enum wireless_mode {
ATH9K_MODE_11A = 0,
ATH9K_MODE_11G,
@@ -167,13 +195,16 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_ENHANCEDPM = BIT(14),
ATH9K_HW_CAP_AUTOSLEEP = BIT(15),
ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16),
+ ATH9K_HW_CAP_EDMA = BIT(17),
+ ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18),
+ ATH9K_HW_CAP_LDPC = BIT(19),
+ ATH9K_HW_CAP_FASTCLOCK = BIT(20),
};
enum ath9k_capability_type {
ATH9K_CAP_CIPHER = 0,
ATH9K_CAP_TKIP_MIC,
ATH9K_CAP_TKIP_SPLIT,
- ATH9K_CAP_DIVERSITY,
ATH9K_CAP_TXPOW,
ATH9K_CAP_MCAST_KEYSRCH,
ATH9K_CAP_DS
@@ -194,6 +225,11 @@ struct ath9k_hw_capabilities {
u8 num_gpio_pins;
u8 num_antcfg_2ghz;
u8 num_antcfg_5ghz;
+ u8 rx_hp_qdepth;
+ u8 rx_lp_qdepth;
+ u8 rx_status_len;
+ u8 tx_desc_len;
+ u8 txs_len;
};
struct ath9k_ops_config {
@@ -214,6 +250,7 @@ struct ath9k_ops_config {
u32 enable_ani;
int serialize_regmode;
bool rx_intr_mitigation;
+ bool tx_intr_mitigation;
#define SPUR_DISABLE 0
#define SPUR_ENABLE_IOCTL 1
#define SPUR_ENABLE_EEPROM 2
@@ -225,6 +262,7 @@ struct ath9k_ops_config {
#define AR_BASE_FREQ_5GHZ 4900
#define AR_SPUR_FEEQ_BOUND_HT40 19
#define AR_SPUR_FEEQ_BOUND_HT20 10
+ bool tx_iq_calibration; /* Only available for >= AR9003 */
int spurmode;
u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
@@ -233,6 +271,8 @@ struct ath9k_ops_config {
enum ath9k_int {
ATH9K_INT_RX = 0x00000001,
ATH9K_INT_RXDESC = 0x00000002,
+ ATH9K_INT_RXHP = 0x00000001,
+ ATH9K_INT_RXLP = 0x00000002,
ATH9K_INT_RXNOFRM = 0x00000008,
ATH9K_INT_RXEOL = 0x00000010,
ATH9K_INT_RXORN = 0x00000020,
@@ -329,10 +369,9 @@ struct ath9k_channel {
#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
-#define IS_CHAN_A_5MHZ_SPACED(_c) \
+#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
- (((_c)->channel % 20) != 0) && \
- (((_c)->channel % 10) != 0))
+ ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
/* These macros check chanmode and not channelFlags */
#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
@@ -365,6 +404,12 @@ enum ser_reg_mode {
SER_REG_MODE_AUTO = 2,
};
+enum ath9k_rx_qtype {
+ ATH9K_RX_QUEUE_HP,
+ ATH9K_RX_QUEUE_LP,
+ ATH9K_RX_QUEUE_MAX,
+};
+
struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
@@ -442,6 +487,124 @@ struct ath_gen_timer_table {
} timer_mask;
};
+/**
+ * struct ath_hw_private_ops - callbacks used internally by hardware code
+ *
+ * This structure contains private callbacks designed to only be used internally
+ * by the hardware core.
+ *
+ * @init_cal_settings: setup types of calibrations supported
+ * @init_cal: starts actual calibration
+ *
+ * @init_mode_regs: Initializes mode registers
+ * @init_mode_gain_regs: Initialize TX/RX gain registers
+ * @macversion_supported: If this specific mac revision is supported
+ *
+ * @rf_set_freq: change frequency
+ * @spur_mitigate_freq: spur mitigation
+ * @rf_alloc_ext_banks:
+ * @rf_free_ext_banks:
+ * @set_rf_regs:
+ * @compute_pll_control: compute the PLL control value to use for
+ * AR_RTC_PLL_CONTROL for a given channel
+ * @setup_calibration: set up calibration
+ * @iscal_supported: used to query if a type of calibration is supported
+ * @loadnf: load noise floor read from each chain on the CCA registers
+ */
+struct ath_hw_private_ops {
+ /* Calibration ops */
+ void (*init_cal_settings)(struct ath_hw *ah);
+ bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
+
+ void (*init_mode_regs)(struct ath_hw *ah);
+ void (*init_mode_gain_regs)(struct ath_hw *ah);
+ bool (*macversion_supported)(u32 macversion);
+ void (*setup_calibration)(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal);
+ bool (*iscal_supported)(struct ath_hw *ah,
+ enum ath9k_cal_types calType);
+
+ /* PHY ops */
+ int (*rf_set_freq)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ void (*spur_mitigate_freq)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ int (*rf_alloc_ext_banks)(struct ath_hw *ah);
+ void (*rf_free_ext_banks)(struct ath_hw *ah);
+ bool (*set_rf_regs)(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex);
+ void (*set_channel_regs)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*init_bb)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ int (*process_ini)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*olc_init)(struct ath_hw *ah);
+ void (*set_rfmode)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*mark_phy_inactive)(struct ath_hw *ah);
+ void (*set_delta_slope)(struct ath_hw *ah, struct ath9k_channel *chan);
+ bool (*rfbus_req)(struct ath_hw *ah);
+ void (*rfbus_done)(struct ath_hw *ah);
+ void (*enable_rfkill)(struct ath_hw *ah);
+ void (*restore_chainmask)(struct ath_hw *ah);
+ void (*set_diversity)(struct ath_hw *ah, bool value);
+ u32 (*compute_pll_control)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
+ int param);
+ void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
+ void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan);
+};
+
+/**
+ * struct ath_hw_ops - callbacks used by hardware code and driver code
+ *
+ * This structure contains callbacks designed to to be used internally by
+ * hardware code and also by the lower level driver.
+ *
+ * @config_pci_powersave:
+ * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
+ */
+struct ath_hw_ops {
+ void (*config_pci_powersave)(struct ath_hw *ah,
+ int restore,
+ int power_off);
+ void (*rx_enable)(struct ath_hw *ah);
+ void (*set_desc_link)(void *ds, u32 link);
+ void (*get_desc_link)(void *ds, u32 **link);
+ bool (*calibrate)(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal);
+ bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
+ void (*fill_txdesc)(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu);
+ int (*proc_txdesc)(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts);
+ void (*set11n_txdesc)(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType,
+ u32 flags);
+ void (*set11n_ratescenario)(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags);
+ void (*set11n_aggr_first)(struct ath_hw *ah, void *ds,
+ u32 aggrLen);
+ void (*set11n_aggr_middle)(struct ath_hw *ah, void *ds,
+ u32 numDelims);
+ void (*set11n_aggr_last)(struct ath_hw *ah, void *ds);
+ void (*clr11n_aggr)(struct ath_hw *ah, void *ds);
+ void (*set11n_burstduration)(struct ath_hw *ah, void *ds,
+ u32 burstDuration);
+ void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
+ u32 vmf);
+};
+
struct ath_hw {
struct ieee80211_hw *hw;
struct ath_common common;
@@ -455,13 +618,18 @@ struct ath_hw {
struct ar5416_eeprom_def def;
struct ar5416_eeprom_4k map4k;
struct ar9287_eeprom map9287;
+ struct ar9300_eeprom ar9300_eep;
} eeprom;
const struct eeprom_ops *eep_ops;
- enum ath9k_eep_map eep_map;
bool sw_mgmt_crypto;
bool is_pciexpress;
+ bool need_an_top2_fixup;
u16 tx_trig_level;
+ s16 nf_2g_max;
+ s16 nf_2g_min;
+ s16 nf_5g_max;
+ s16 nf_5g_min;
u16 rfsilent;
u32 rfkill_gpio;
u32 rfkill_polarity;
@@ -478,7 +646,8 @@ struct ath_hw {
struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
int16_t curchan_rad_index;
- u32 mask_reg;
+ enum ath9k_int imask;
+ u32 imrs2_reg;
u32 txok_interrupt_mask;
u32 txerr_interrupt_mask;
u32 txdesc_interrupt_mask;
@@ -493,6 +662,7 @@ struct ath_hw {
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_calinitdata;
struct ath9k_cal_list adcdc_caldata;
+ struct ath9k_cal_list tempCompCalData;
struct ath9k_cal_list *cal_list;
struct ath9k_cal_list *cal_list_last;
struct ath9k_cal_list *cal_list_curr;
@@ -533,12 +703,10 @@ struct ath_hw {
DONT_USE_32KHZ,
} enable_32kHz_clock;
- /* Callback for radio frequency change */
- int (*ath9k_hw_rf_set_freq)(struct ath_hw *ah, struct ath9k_channel *chan);
-
- /* Callback for baseband spur frequency */
- void (*ath9k_hw_spur_mitigate_freq)(struct ath_hw *ah,
- struct ath9k_channel *chan);
+ /* Private to hardware code */
+ struct ath_hw_private_ops private_ops;
+ /* Accessed by the lower level driver */
+ struct ath_hw_ops ops;
/* Used to program the radio on non single-chip devices */
u32 *analogBank0Data;
@@ -551,6 +719,7 @@ struct ath_hw {
u32 *addac5416_21;
u32 *bank6Temp;
+ u8 txpower_limit;
int16_t txpower_indexoffset;
int coverage_class;
u32 beacon_interval;
@@ -592,16 +761,34 @@ struct ath_hw {
struct ar5416IniArray iniBank7;
struct ar5416IniArray iniAddac;
struct ar5416IniArray iniPcieSerdes;
+ struct ar5416IniArray iniPcieSerdesLowPower;
struct ar5416IniArray iniModesAdditional;
struct ar5416IniArray iniModesRxGain;
struct ar5416IniArray iniModesTxGain;
struct ar5416IniArray iniModes_9271_1_0_only;
struct ar5416IniArray iniCckfirNormal;
struct ar5416IniArray iniCckfirJapan2484;
+ struct ar5416IniArray iniCommon_normal_cck_fir_coeff_9271;
+ struct ar5416IniArray iniCommon_japan_2484_cck_fir_coeff_9271;
+ struct ar5416IniArray iniModes_9271_ANI_reg;
+ struct ar5416IniArray iniModes_high_power_tx_gain_9271;
+ struct ar5416IniArray iniModes_normal_power_tx_gain_9271;
+
+ struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniRadio[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniSOC[ATH_INI_NUM_SPLIT];
u32 intr_gen_timer_trigger;
u32 intr_gen_timer_thresh;
struct ath_gen_timer_table hw_gen_timers;
+
+ struct ar9003_txs *ts_ring;
+ void *ts_start;
+ u32 ts_paddr_start;
+ u32 ts_paddr_end;
+ u16 ts_tail;
+ u8 ts_size;
};
static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -614,6 +801,16 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
return &(ath9k_hw_common(ah)->regulatory);
}
+static inline struct ath_hw_private_ops *ath9k_hw_private_ops(struct ath_hw *ah)
+{
+ return &ah->private_ops;
+}
+
+static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
+{
+ return &ah->ops;
+}
+
/* Initialization, Detach, Reset */
const char *ath9k_hw_probe(u16 vendorid, u16 devid);
void ath9k_hw_deinit(struct ath_hw *ah);
@@ -625,6 +822,7 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 *result);
bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 setting, int *status);
+u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
/* Key Cache Management */
bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
@@ -673,16 +871,10 @@ void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
+bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
-
-/* Interrupt Handling */
-bool ath9k_hw_intrpend(struct ath_hw *ah);
-bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked);
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
-
/* Generic hw timer primitives */
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
@@ -701,6 +893,39 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
+/* HTC */
+void ath9k_hw_htc_resetinit(struct ath_hw *ah);
+
+/* PHY */
+void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
+ u32 *coef_mantissa, u32 *coef_exponent);
+
+/*
+ * Code Specific to AR5008, AR9001 or AR9002,
+ * we stuff these here to avoid callbacks for AR9003.
+ */
+void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
+int ar9002_hw_rf_claim(struct ath_hw *ah);
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
+void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
+
+/*
+ * Code specifric to AR9003, we stuff these here to avoid callbacks
+ * for older families
+ */
+void ar9003_hw_set_nf_limits(struct ath_hw *ah);
+
+/* Hardware family op attach helpers */
+void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
+void ar9002_hw_attach_phy_ops(struct ath_hw *ah);
+void ar9003_hw_attach_phy_ops(struct ath_hw *ah);
+
+void ar9002_hw_attach_calib_ops(struct ath_hw *ah);
+void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
+
+void ar9002_hw_attach_ops(struct ath_hw *ah);
+void ar9003_hw_attach_ops(struct ath_hw *ah);
+
#define ATH_PCIE_CAP_LINK_CTRL 0x70
#define ATH_PCIE_CAP_LINK_L0S 1
#define ATH_PCIE_CAP_LINK_L1 2
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 3d4d897..d457cb3 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -175,6 +175,18 @@ static const struct ath_ops ath9k_common_ops = {
.write = ath9k_iowrite32,
};
+static int count_streams(unsigned int chainmask, int max)
+{
+ int streams = 0;
+
+ do {
+ if (++streams == max)
+ break;
+ } while ((chainmask = chainmask & (chainmask - 1)));
+
+ return streams;
+}
+
/**************************/
/* Initialization */
/**************************/
@@ -182,8 +194,10 @@ static const struct ath_ops ath9k_common_ops = {
static void setup_ht_cap(struct ath_softc *sc,
struct ieee80211_sta_ht_cap *ht_info)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
u8 tx_streams, rx_streams;
+ int i, max_streams;
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
@@ -191,28 +205,40 @@ static void setup_ht_cap(struct ath_softc *sc,
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ max_streams = 3;
+ else
+ max_streams = 2;
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (max_streams >= 2)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ }
+
/* set up supported mcs set */
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
- 1 : 2;
- rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
- 1 : 2;
+ tx_streams = count_streams(common->tx_chainmask, max_streams);
+ rx_streams = count_streams(common->rx_chainmask, max_streams);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
if (tx_streams != rx_streams) {
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
ht_info->mcs.tx_params |= ((tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
}
- ht_info->mcs.rx_mask[0] = 0xff;
- if (rx_streams >= 2)
- ht_info->mcs.rx_mask[1] = 0xff;
+ for (i = 0; i < rx_streams; i++)
+ ht_info->mcs.rx_mask[i] = 0xff;
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
}
@@ -235,31 +261,37 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
- int nbuf, int ndesc)
+ int nbuf, int ndesc, bool is_tx)
{
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_desc *ds;
+ u8 *ds;
struct ath_buf *bf;
- int i, bsize, error;
+ int i, bsize, error, desc_len;
ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
name, nbuf, ndesc);
INIT_LIST_HEAD(head);
+
+ if (is_tx)
+ desc_len = sc->sc_ah->caps.tx_desc_len;
+ else
+ desc_len = sizeof(struct ath_desc);
+
/* ath_desc must be a multiple of DWORDs */
- if ((sizeof(struct ath_desc) % 4) != 0) {
+ if ((desc_len % 4) != 0) {
ath_print(common, ATH_DBG_FATAL,
"ath_desc not DWORD aligned\n");
- BUG_ON((sizeof(struct ath_desc) % 4) != 0);
+ BUG_ON((desc_len % 4) != 0);
error = -ENOMEM;
goto fail;
}
- dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+ dd->dd_desc_len = desc_len * nbuf * ndesc;
/*
* Need additional DMA memory because we can't use
@@ -272,11 +304,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
u32 dma_len;
while (ndesc_skipped) {
- dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dma_len = ndesc_skipped * desc_len;
dd->dd_desc_len += dma_len;
ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
- };
+ }
}
/* allocate descriptors */
@@ -286,7 +318,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
error = -ENOMEM;
goto fail;
}
- ds = dd->dd_desc;
+ ds = (u8 *) dd->dd_desc;
ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
@@ -300,7 +332,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
}
dd->dd_bufptr = bf;
- for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(dd, ds);
@@ -316,7 +348,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
((caddr_t) dd->dd_desc +
dd->dd_desc_len));
- ds += ndesc;
+ ds += (desc_len * ndesc);
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(dd, ds);
}
@@ -514,7 +546,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
+ ath9k_hw_set_diversity(sc->sc_ah, true);
sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
@@ -568,13 +600,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
ath_read_cachesize(common, &csz);
common->cachelsz = csz << 2; /* convert to bytes */
+ /* Initializes the hardware for all supported chipsets */
ret = ath9k_hw_init(ah);
- if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", ret);
+ if (ret)
goto err_hw;
- }
ret = ath9k_init_debug(ah);
if (ret) {
@@ -760,6 +789,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
tasklet_kill(&sc->intr_tq);
tasklet_kill(&sc->bcon_tasklet);
+
+ kfree(sc->sc_ah);
+ sc->sc_ah = NULL;
}
void ath9k_deinit_device(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index efc420c..0e425cb 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -25,14 +25,21 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
ah->txurn_interrupt_mask);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_IMR_S0,
SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
| SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
REG_WRITE(ah, AR_IMR_S1,
SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
| SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
- REG_RMW_FIELD(ah, AR_IMR_S2,
- AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask);
+
+ ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
+ ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
@@ -55,6 +62,18 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
}
EXPORT_SYMBOL(ath9k_hw_txstart);
+void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+}
+EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
+
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
{
u32 npend;
@@ -103,7 +122,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
if (ah->tx_trig_level >= ah->config.max_txtrig_level)
return false;
- omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
+ omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
txcfg = REG_READ(ah, AR_TXCFG);
curLevel = MS(txcfg, AR_FTRIG);
@@ -205,280 +224,6 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
}
EXPORT_SYMBOL(ath9k_hw_stoptxdma);
-void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 segLen, bool firstSeg,
- bool lastSeg, const struct ath_desc *ds0)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if (firstSeg) {
- ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
- } else if (lastSeg) {
- ads->ds_ctl0 = 0;
- ads->ds_ctl1 = segLen;
- ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
- ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
- } else {
- ads->ds_ctl0 = 0;
- ads->ds_ctl1 = segLen | AR_TxMore;
- ads->ds_ctl2 = 0;
- ads->ds_ctl3 = 0;
- }
- ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
- ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
- ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
- ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
- ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
-}
-EXPORT_SYMBOL(ath9k_hw_filltxdesc);
-
-void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
- ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
- ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
- ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
- ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
-}
-EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
-
-int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if ((ads->ds_txstatus9 & AR_TxDone) == 0)
- return -EINPROGRESS;
-
- ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
- ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
- ds->ds_txstat.ts_status = 0;
- ds->ds_txstat.ts_flags = 0;
-
- if (ads->ds_txstatus1 & AR_FrmXmitOK)
- ds->ds_txstat.ts_status |= ATH9K_TX_ACKED;
- if (ads->ds_txstatus1 & AR_ExcessiveRetries)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
- if (ads->ds_txstatus1 & AR_Filtered)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
- if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
- ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus9 & AR_TxOpExceeded)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
- if (ads->ds_txstatus1 & AR_TxTimerExpired)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
-
- if (ads->ds_txstatus1 & AR_DescCfgErr)
- ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
- if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus0 & AR_TxBaStatus) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
- ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
- ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
- }
-
- ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
- switch (ds->ds_txstat.ts_rateindex) {
- case 0:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
- break;
- case 1:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
- break;
- case 2:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
- break;
- case 3:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
- break;
- }
-
- ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
- ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
- ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
- ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
- ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
- ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
- ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
- ds->ds_txstat.evm0 = ads->AR_TxEVM0;
- ds->ds_txstat.evm1 = ads->AR_TxEVM1;
- ds->ds_txstat.evm2 = ads->AR_TxEVM2;
- ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
- ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
- ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
- ds->ds_txstat.ts_antenna = 0;
-
- return 0;
-}
-EXPORT_SYMBOL(ath9k_hw_txprocdesc);
-
-void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
- u32 keyIx, enum ath9k_key_type keyType, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- txPower += ah->txpower_indexoffset;
- if (txPower > 63)
- txPower = 63;
-
- ads->ds_ctl0 = (pktLen & AR_FrameLen)
- | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(txPower, AR_XmitPower)
- | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
- | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
- | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
- | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
-
- ads->ds_ctl1 =
- (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
- | SM(type, AR_FrameType)
- | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
- | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
- | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
-
- ads->ds_ctl6 = SM(keyType, AR_EncrType);
-
- if (AR_SREV_9285(ah)) {
- ads->ds_ctl8 = 0;
- ads->ds_ctl9 = 0;
- ads->ds_ctl10 = 0;
- ads->ds_ctl11 = 0;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
-
-void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
- struct ath_desc *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- struct ar5416_desc *last_ads = AR5416DESC(lastds);
- u32 ds_ctl0;
-
- if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
- ds_ctl0 = ads->ds_ctl0;
-
- if (flags & ATH9K_TXDESC_RTSENA) {
- ds_ctl0 &= ~AR_CTSEnable;
- ds_ctl0 |= AR_RTSEnable;
- } else {
- ds_ctl0 &= ~AR_RTSEnable;
- ds_ctl0 |= AR_CTSEnable;
- }
-
- ads->ds_ctl0 = ds_ctl0;
- } else {
- ads->ds_ctl0 =
- (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
- }
-
- ads->ds_ctl2 = set11nTries(series, 0)
- | set11nTries(series, 1)
- | set11nTries(series, 2)
- | set11nTries(series, 3)
- | (durUpdateEn ? AR_DurUpdateEna : 0)
- | SM(0, AR_BurstDur);
-
- ads->ds_ctl3 = set11nRate(series, 0)
- | set11nRate(series, 1)
- | set11nRate(series, 2)
- | set11nRate(series, 3);
-
- ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
- | set11nPktDurRTSCTS(series, 1);
-
- ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
- | set11nPktDurRTSCTS(series, 3);
-
- ads->ds_ctl7 = set11nRateFlags(series, 0)
- | set11nRateFlags(series, 1)
- | set11nRateFlags(series, 2)
- | set11nRateFlags(series, 3)
- | SM(rtsctsRate, AR_RTSCTSRate);
- last_ads->ds_ctl2 = ads->ds_ctl2;
- last_ads->ds_ctl3 = ads->ds_ctl3;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
-
-void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
- u32 aggrLen)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
- ads->ds_ctl6 &= ~AR_AggrLen;
- ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
-
-void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
- u32 numDelims)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- unsigned int ctl6;
-
- ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
-
- ctl6 = ads->ds_ctl6;
- ctl6 &= ~AR_PadDelim;
- ctl6 |= SM(numDelims, AR_PadDelim);
- ads->ds_ctl6 = ctl6;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
-
-void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 |= AR_IsAggr;
- ads->ds_ctl1 &= ~AR_MoreAggr;
- ads->ds_ctl6 &= ~AR_PadDelim;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
-
-void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
-}
-EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
-
-void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
- u32 burstDuration)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl2 &= ~AR_BurstDur;
- ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
-
-void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
- u32 vmf)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if (vmf)
- ads->ds_ctl0 |= AR_VirtMoreFrag;
- else
- ads->ds_ctl0 &= ~AR_VirtMoreFrag;
-}
-
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
{
*txqs &= ah->intr_txqs;
@@ -730,6 +475,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
} else
cwMin = qi->tqi_cwmin;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_DLCL_IFS(q),
SM(cwMin, AR_D_LCL_IFS_CWMIN) |
SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
@@ -744,6 +491,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_WRITE(ah, AR_DMISC(q),
AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
+ REGWRITE_BUFFER_FLUSH(ah);
+
if (qi->tqi_cbrPeriod) {
REG_WRITE(ah, AR_QCBRCFG(q),
SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
@@ -759,6 +508,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_Q_RDYTIMECFG_EN);
}
+ REGWRITE_BUFFER_FLUSH(ah);
+
REG_WRITE(ah, AR_DCHNTIME(q),
SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
(qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
@@ -776,6 +527,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_READ(ah, AR_DMISC(q)) |
AR_D_MISC_POST_FR_BKOFF_DIS);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
REG_WRITE(ah, AR_DMISC(q),
REG_READ(ah, AR_DMISC(q)) |
@@ -783,6 +538,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
}
switch (qi->tqi_type) {
case ATH9K_TX_QUEUE_BEACON:
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
| AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_BEACON_USE
@@ -793,8 +550,20 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
| AR_D_MISC_BEACON_USE
| AR_D_MISC_POST_FR_BKOFF_DIS);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ /* cwmin and cwmax should be 0 for beacon queue */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
+ | SM(0, AR_D_LCL_IFS_CWMAX)
+ | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
+ }
break;
case ATH9K_TX_QUEUE_CAB:
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
| AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_CBR_INCR_DIS1
@@ -808,6 +577,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
| (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
break;
case ATH9K_TX_QUEUE_PSPOLL:
REG_WRITE(ah, AR_QMISC(q),
@@ -829,6 +602,9 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_D_MISC_POST_FR_BKOFF_DIS);
}
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
+
if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
ah->txok_interrupt_mask |= 1 << q;
else
@@ -856,7 +632,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pa, struct ath_desc *nds, u64 tsf)
+ struct ath_rx_status *rs, u64 tsf)
{
struct ar5416_desc ads;
struct ar5416_desc *adsp = AR5416DESC(ds);
@@ -867,92 +643,76 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
ads.u.rx = adsp->u.rx;
- ds->ds_rxstat.rs_status = 0;
- ds->ds_rxstat.rs_flags = 0;
+ rs->rs_status = 0;
+ rs->rs_flags = 0;
- ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
- ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
+ rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
+ rs->rs_tstamp = ads.AR_RcvTimestamp;
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
- ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
} else {
- ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
- ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
+ rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt00);
- ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt01);
- ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt02);
- ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt10);
- ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt11);
- ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt12);
}
if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
- ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
+ rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
else
- ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
+ rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
- ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
- ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
+ rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
+ rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
- ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
- ds->ds_rxstat.rs_moreaggr =
+ rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
+ rs->rs_moreaggr =
(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
- ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
- ds->ds_rxstat.rs_flags =
+ rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
+ rs->rs_flags =
(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
- ds->ds_rxstat.rs_flags |=
+ rs->rs_flags |=
(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+ rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+ rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+ rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
if (ads.ds_rxstatus8 & AR_CRCErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
+ rs->rs_status |= ATH9K_RXERR_CRC;
else if (ads.ds_rxstatus8 & AR_PHYErr) {
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
+ rs->rs_status |= ATH9K_RXERR_PHY;
phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
- ds->ds_rxstat.rs_phyerr = phyerr;
+ rs->rs_phyerr = phyerr;
} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
+ rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
+ rs->rs_status |= ATH9K_RXERR_MIC;
}
return 0;
}
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
-void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 size, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- struct ath9k_hw_capabilities *pCap = &ah->caps;
-
- ads->ds_ctl1 = size & AR_BufLen;
- if (flags & ATH9K_RXDESC_INTREQ)
- ads->ds_ctl1 |= AR_RxIntrReq;
-
- ads->ds_rxstatus8 &= ~AR_RxDone;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
- memset(&(ads->u), 0, sizeof(ads->u));
-}
-EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
-
/*
* This can stop or re-enables RX.
*
@@ -996,12 +756,6 @@ void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
}
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
-void ath9k_hw_rxena(struct ath_hw *ah)
-{
- REG_WRITE(ah, AR_CR, AR_CR_RXE);
-}
-EXPORT_SYMBOL(ath9k_hw_rxena);
-
void ath9k_hw_startpcureceive(struct ath_hw *ah)
{
ath9k_enable_mib_counters(ah);
@@ -1020,6 +774,14 @@ void ath9k_hw_stoppcurecv(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
+void ath9k_hw_abortpcurecv(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
+
+ ath9k_hw_disable_mib_counters(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
+
bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
{
#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
@@ -1065,3 +827,142 @@ int ath9k_hw_beaconq_setup(struct ath_hw *ah)
return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
+
+bool ath9k_hw_intrpend(struct ath_hw *ah)
+{
+ u32 host_isr;
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+ if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ if ((host_isr & AR_INTR_SYNC_DEFAULT)
+ && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ath9k_hw_intrpend);
+
+enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
+ enum ath9k_int ints)
+{
+ enum ath9k_int omask = ah->imask;
+ u32 mask, mask2;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
+
+ if (omask & ATH9K_INT_GLOBAL) {
+ ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
+ (void) REG_READ(ah, AR_IER);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
+ }
+ }
+
+ /* TODO: global int Ref count */
+ mask = ints & ATH9K_INT_COMMON;
+ mask2 = 0;
+
+ if (ints & ATH9K_INT_TX) {
+ if (ah->config.tx_intr_mitigation)
+ mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
+ else {
+ if (ah->txok_interrupt_mask)
+ mask |= AR_IMR_TXOK;
+ if (ah->txdesc_interrupt_mask)
+ mask |= AR_IMR_TXDESC;
+ }
+ if (ah->txerr_interrupt_mask)
+ mask |= AR_IMR_TXERR;
+ if (ah->txeol_interrupt_mask)
+ mask |= AR_IMR_TXEOL;
+ }
+ if (ints & ATH9K_INT_RX) {
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
+ if (ah->config.rx_intr_mitigation) {
+ mask &= ~AR_IMR_RXOK_LP;
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ } else {
+ mask |= AR_IMR_RXOK_LP;
+ }
+ } else {
+ if (ah->config.rx_intr_mitigation)
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ else
+ mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
+ }
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ mask |= AR_IMR_GENTMR;
+ }
+
+ if (ints & (ATH9K_INT_BMISC)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_TIM)
+ mask2 |= AR_IMR_S2_TIM;
+ if (ints & ATH9K_INT_DTIM)
+ mask2 |= AR_IMR_S2_DTIM;
+ if (ints & ATH9K_INT_DTIMSYNC)
+ mask2 |= AR_IMR_S2_DTIMSYNC;
+ if (ints & ATH9K_INT_CABEND)
+ mask2 |= AR_IMR_S2_CABEND;
+ if (ints & ATH9K_INT_TSFOOR)
+ mask2 |= AR_IMR_S2_TSFOOR;
+ }
+
+ if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_GTT)
+ mask2 |= AR_IMR_S2_GTT;
+ if (ints & ATH9K_INT_CST)
+ mask2 |= AR_IMR_S2_CST;
+ }
+
+ ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
+ REG_WRITE(ah, AR_IMR, mask);
+ ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
+ AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
+ AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
+ ah->imrs2_reg |= mask2;
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if (ints & ATH9K_INT_TIM_TIMER)
+ REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ else
+ REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ }
+
+ if (ints & ATH9K_INT_GLOBAL) {
+ ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
+ AR_INTR_MAC_IRQ);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
+
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
+ AR_INTR_SYNC_DEFAULT);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK,
+ AR_INTR_SYNC_DEFAULT);
+ }
+ ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+ REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+ }
+
+ return omask;
+}
+EXPORT_SYMBOL(ath9k_hw_set_interrupts);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 29851e6..00f3e0c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -37,6 +37,8 @@
AR_2040_##_index : 0) \
|((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
AR_GI##_index : 0) \
+ |((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \
+ AR_STBC##_index : 0) \
|SM((_series)[_index].ChSel, AR_ChainSel##_index))
#define CCK_SIFS_TIME 10
@@ -86,7 +88,6 @@
#define ATH9K_TX_DESC_CFG_ERR 0x04
#define ATH9K_TX_DATA_UNDERRUN 0x08
#define ATH9K_TX_DELIM_UNDERRUN 0x10
-#define ATH9K_TX_SW_ABORTED 0x40
#define ATH9K_TX_SW_FILTERED 0x80
/* 64 bytes */
@@ -117,7 +118,10 @@ struct ath_tx_status {
int8_t ts_rssi_ext0;
int8_t ts_rssi_ext1;
int8_t ts_rssi_ext2;
- u8 pad[3];
+ u8 qid;
+ u16 desc_id;
+ u8 tid;
+ u8 pad[2];
u32 ba_low;
u32 ba_high;
u32 evm0;
@@ -148,6 +152,34 @@ struct ath_rx_status {
u32 evm0;
u32 evm1;
u32 evm2;
+ u32 evm3;
+ u32 evm4;
+};
+
+struct ath_htc_rx_status {
+ __be64 rs_tstamp;
+ __be16 rs_datalen;
+ u8 rs_status;
+ u8 rs_phyerr;
+ int8_t rs_rssi;
+ int8_t rs_rssi_ctl0;
+ int8_t rs_rssi_ctl1;
+ int8_t rs_rssi_ctl2;
+ int8_t rs_rssi_ext0;
+ int8_t rs_rssi_ext1;
+ int8_t rs_rssi_ext2;
+ u8 rs_keyix;
+ u8 rs_rate;
+ u8 rs_antenna;
+ u8 rs_more;
+ u8 rs_isaggr;
+ u8 rs_moreaggr;
+ u8 rs_num_delims;
+ u8 rs_flags;
+ u8 rs_dummy;
+ __be32 evm0;
+ __be32 evm1;
+ __be32 evm2;
};
#define ATH9K_RXERR_CRC 0x01
@@ -207,18 +239,9 @@ struct ath_desc {
u32 ds_ctl0;
u32 ds_ctl1;
u32 ds_hw[20];
- union {
- struct ath_tx_status tx;
- struct ath_rx_status rx;
- void *stats;
- } ds_us;
void *ds_vdata;
} __packed;
-#define ds_txstat ds_us.tx
-#define ds_rxstat ds_us.rx
-#define ds_stat ds_us.stats
-
#define ATH9K_TXDESC_CLRDMASK 0x0001
#define ATH9K_TXDESC_NOACK 0x0002
#define ATH9K_TXDESC_RTSENA 0x0004
@@ -242,7 +265,8 @@ struct ath_desc {
#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
#define ATH9K_TXDESC_VMF 0x0100
#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
-#define ATH9K_TXDESC_CAB 0x0400
+#define ATH9K_TXDESC_LOWRXCHAIN 0x0400
+#define ATH9K_TXDESC_LDPC 0x00010000
#define ATH9K_RXDESC_INTREQ 0x0020
@@ -336,7 +360,6 @@ struct ar5416_desc {
#define AR_DestIdxValid 0x40000000
#define AR_CTSEnable 0x80000000
-#define AR_BufLen 0x00000fff
#define AR_TxMore 0x00001000
#define AR_DestIdx 0x000fe000
#define AR_DestIdx_S 13
@@ -393,6 +416,7 @@ struct ar5416_desc {
#define AR_EncrType 0x0c000000
#define AR_EncrType_S 26
#define AR_TxCtlRsvd61 0xf0000000
+#define AR_LDPC 0x80000000
#define AR_2040_0 0x00000001
#define AR_GI0 0x00000002
@@ -412,7 +436,10 @@ struct ar5416_desc {
#define AR_ChainSel3_S 17
#define AR_RTSCTSRate 0x0ff00000
#define AR_RTSCTSRate_S 20
-#define AR_TxCtlRsvd70 0xf0000000
+#define AR_STBC0 0x10000000
+#define AR_STBC1 0x20000000
+#define AR_STBC2 0x40000000
+#define AR_STBC3 0x80000000
#define AR_TxRSSIAnt00 0x000000ff
#define AR_TxRSSIAnt00_S 0
@@ -476,7 +503,6 @@ struct ar5416_desc {
#define AR_RxCTLRsvd00 0xffffffff
-#define AR_BufLen 0x00000fff
#define AR_RxCtlRsvd00 0x00001000
#define AR_RxIntrReq 0x00002000
#define AR_RxCtlRsvd01 0xffffc000
@@ -626,6 +652,7 @@ enum ath9k_rx_filter {
#define ATH9K_RATESERIES_RTS_CTS 0x0001
#define ATH9K_RATESERIES_2040 0x0002
#define ATH9K_RATESERIES_HALFGI 0x0004
+#define ATH9K_RATESERIES_STBC 0x0008
struct ath9k_11n_rate_series {
u32 Tries;
@@ -669,33 +696,10 @@ struct ath9k_channel;
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
+void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q);
-void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 segLen, bool firstSeg,
- bool lastSeg, const struct ath_desc *ds0);
-void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds);
-int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds);
-void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
- u32 keyIx, enum ath9k_key_type keyType, u32 flags);
-void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
- struct ath_desc *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags);
-void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
- u32 aggrLen);
-void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
- u32 numDelims);
-void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds);
-void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds);
-void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
- u32 burstDuration);
-void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
- u32 vmf);
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
const struct ath9k_tx_queue_info *qinfo);
@@ -706,15 +710,22 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q);
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pa, struct ath_desc *nds, u64 tsf);
+ struct ath_rx_status *rs, u64 tsf);
void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 size, u32 flags);
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
-void ath9k_hw_rxena(struct ath_hw *ah);
void ath9k_hw_startpcureceive(struct ath_hw *ah);
void ath9k_hw_stoppcurecv(struct ath_hw *ah);
+void ath9k_hw_abortpcurecv(struct ath_hw *ah);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
int ath9k_hw_beaconq_setup(struct ath_hw *ah);
+/* Interrupt Handling */
+bool ath9k_hw_intrpend(struct ath_hw *ah);
+enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
+ enum ath9k_int ints);
+
+void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
+
#endif /* MAC_H */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 115e1ae..893b552 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -225,7 +225,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
ath_cache_conf_rate(sc, &hw->conf);
ath_update_txpow(sc);
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
ps_restore:
ath9k_ps_restore(sc);
@@ -401,23 +401,41 @@ void ath9k_tasklet(unsigned long data)
struct ath_common *common = ath9k_hw_common(ah);
u32 status = sc->intrstatus;
+ u32 rxmask;
ath9k_ps_wakeup(sc);
- if (status & ATH9K_INT_FATAL) {
+ if ((status & ATH9K_INT_FATAL) ||
+ !ath9k_hw_check_alive(ah)) {
ath_reset(sc, false);
ath9k_ps_restore(sc);
return;
}
- if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
+ ATH9K_INT_RXORN);
+ else
+ rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+
+ if (status & rxmask) {
spin_lock_bh(&sc->rx.rxflushlock);
- ath_rx_tasklet(sc, 0);
+
+ /* Check for high priority Rx first */
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ (status & ATH9K_INT_RXHP))
+ ath_rx_tasklet(sc, 0, true);
+
+ ath_rx_tasklet(sc, 0, false);
spin_unlock_bh(&sc->rx.rxflushlock);
}
- if (status & ATH9K_INT_TX)
- ath_tx_tasklet(sc);
+ if (status & ATH9K_INT_TX) {
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_tx_edma_tasklet(sc);
+ else
+ ath_tx_tasklet(sc);
+ }
if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
/*
@@ -434,7 +452,7 @@ void ath9k_tasklet(unsigned long data)
ath_gen_timer_isr(sc->sc_ah);
/* re-enable hardware interrupt */
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
ath9k_ps_restore(sc);
}
@@ -445,6 +463,8 @@ irqreturn_t ath_isr(int irq, void *dev)
ATH9K_INT_RXORN | \
ATH9K_INT_RXEOL | \
ATH9K_INT_RX | \
+ ATH9K_INT_RXLP | \
+ ATH9K_INT_RXHP | \
ATH9K_INT_TX | \
ATH9K_INT_BMISS | \
ATH9K_INT_CST | \
@@ -477,7 +497,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* value to insure we only process bits we requested.
*/
ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
- status &= sc->imask; /* discard unasked-for bits */
+ status &= ah->imask; /* discard unasked-for bits */
/*
* If there are no status bits set, then this interrupt was not
@@ -496,7 +516,8 @@ irqreturn_t ath_isr(int irq, void *dev)
* If a FATAL or RXORN interrupt is received, we have to reset the
* chip immediately.
*/
- if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN))
+ if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) &&
+ !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
goto chip_reset;
if (status & ATH9K_INT_SWBA)
@@ -505,6 +526,13 @@ irqreturn_t ath_isr(int irq, void *dev)
if (status & ATH9K_INT_TXURN)
ath9k_hw_updatetxtriglevel(ah, true);
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (status & ATH9K_INT_RXEOL) {
+ ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+ ath9k_hw_set_interrupts(ah, ah->imask);
+ }
+ }
+
if (status & ATH9K_INT_MIB) {
/*
* Disable interrupts until we service the MIB
@@ -518,7 +546,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* the interrupt.
*/
ath9k_hw_procmibevent(ah);
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
@@ -536,7 +564,7 @@ chip_reset:
if (sched) {
/* turn off every interrupt except SWBA */
- ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
+ ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA));
tasklet_schedule(&sc->intr_tq);
}
@@ -724,6 +752,7 @@ static int ath_key_config(struct ath_common *common,
struct ath_hw *ah = common->ah;
struct ath9k_keyval hk;
const u8 *mac = NULL;
+ u8 gmac[ETH_ALEN];
int ret = 0;
int idx;
@@ -747,9 +776,30 @@ static int ath_key_config(struct ath_common *common,
memcpy(hk.kv_val, key->key, key->keylen);
if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
- /* For now, use the default keys for broadcast keys. This may
- * need to change with virtual interfaces. */
- idx = key->keyidx;
+
+ if (key->ap_addr) {
+ /*
+ * Group keys on hardware that supports multicast frame
+ * key search use a mac that is the sender's address with
+ * the high bit set instead of the app-specified address.
+ */
+ memcpy(gmac, key->ap_addr, ETH_ALEN);
+ gmac[0] |= 0x80;
+ mac = gmac;
+
+ if (key->alg == ALG_TKIP)
+ idx = ath_reserve_key_cache_slot_tkip(common);
+ else
+ idx = ath_reserve_key_cache_slot(common);
+ if (idx < 0)
+ mac = NULL; /* no free key cache entries */
+ }
+
+ if (!mac) {
+ /* For now, use the default keys for broadcast keys. This may
+ * need to change with virtual interfaces. */
+ idx = key->keyidx;
+ }
} else if (key->keyidx) {
if (WARN_ON(!sta))
return -EOPNOTSUPP;
@@ -887,7 +937,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath_beacon_config(sc, NULL); /* restart beacons */
/* Re-Enable interrupts */
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
/* Enable LED */
ath9k_hw_cfg_output(ah, ah->led_pin,
@@ -977,7 +1027,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
if (sc->sc_flags & SC_OP_BEACONS)
ath_beacon_config(sc, NULL); /* restart beacons */
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
if (retry_tx) {
int i;
@@ -1162,23 +1212,28 @@ static int ath9k_start(struct ieee80211_hw *hw)
}
/* Setup our intr mask. */
- sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
- | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
- | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
+ ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
+ ATH9K_INT_RXORN | ATH9K_INT_FATAL |
+ ATH9K_INT_GLOBAL;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ah->imask |= ATH9K_INT_RXHP | ATH9K_INT_RXLP;
+ else
+ ah->imask |= ATH9K_INT_RX;
if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
- sc->imask |= ATH9K_INT_GTT;
+ ah->imask |= ATH9K_INT_GTT;
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- sc->imask |= ATH9K_INT_CST;
+ ah->imask |= ATH9K_INT_CST;
ath_cache_conf_rate(sc, &hw->conf);
sc->sc_flags &= ~SC_OP_INVALID;
/* Disable BMISS interrupt when we're not associated */
- sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
+ ath9k_hw_set_interrupts(ah, ah->imask);
ieee80211_wake_queues(hw);
@@ -1372,14 +1427,15 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
int ret = 0;
mutex_lock(&sc->mutex);
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
sc->nvifs > 0) {
ret = -ENOBUFS;
goto out;
@@ -1414,19 +1470,19 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
sc->nvifs++;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
ath9k_set_bssid_mask(hw);
if (sc->nvifs > 1)
goto out; /* skip global settings for secondary vif */
if (ic_opmode == NL80211_IFTYPE_AP) {
- ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
+ ath9k_hw_set_tsfadjust(ah, 1);
sc->sc_flags |= SC_OP_TSF_RESET;
}
/* Set the device opmode */
- sc->sc_ah->opmode = ic_opmode;
+ ah->opmode = ic_opmode;
/*
* Enable MIB interrupts when there are hardware phy counters.
@@ -1435,11 +1491,12 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_ADHOC) ||
(vif->type == NL80211_IFTYPE_MESH_POINT)) {
- sc->imask |= ATH9K_INT_MIB;
- sc->imask |= ATH9K_INT_TSFOOR;
+ if (ah->config.enable_ani)
+ ah->imask |= ATH9K_INT_MIB;
+ ah->imask |= ATH9K_INT_TSFOOR;
}
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC ||
@@ -1495,15 +1552,16 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
void ath9k_enable_ps(struct ath_softc *sc)
{
+ struct ath_hw *ah = sc->sc_ah;
+
sc->ps_enabled = true;
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
- sc->imask |= ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
+ ah->imask |= ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
- ath9k_hw_setrxabort(sc->sc_ah, 1);
+ ath9k_hw_setrxabort(ah, 1);
}
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1579,10 +1637,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK);
- if (sc->imask & ATH9K_INT_TIM_TIMER) {
- sc->imask &= ~ATH9K_INT_TIM_TIMER;
+ if (ah->imask & ATH9K_INT_TIM_TIMER) {
+ ah->imask &= ~ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
+ ah->imask);
}
}
}
@@ -1986,6 +2044,25 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
return ret;
}
+static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = common->ani.noise_floor;
+
+ return 0;
+}
+
static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2057,6 +2134,7 @@ struct ieee80211_ops ath9k_ops = {
.set_tsf = ath9k_set_tsf,
.reset_tsf = ath9k_reset_tsf,
.ampdu_action = ath9k_ampdu_action,
+ .get_survey = ath9k_get_survey,
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 9441c67..257b10b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,6 +28,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
{ 0 }
};
@@ -88,6 +89,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
}
static const struct ath_bus_ops ath_pci_bus_ops = {
+ .ath_bus_type = ATH_PCI,
.read_cachesize = ath_pci_read_cachesize,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c
deleted file mode 100644
index 2547b3c..0000000
--- a/drivers/net/wireless/ath/ath9k/phy.c
+++ /dev/null
@@ -1,978 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/**
- * DOC: Programming Atheros 802.11n analog front end radios
- *
- * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
- * devices have either an external AR2133 analog front end radio for single
- * band 2.4 GHz communication or an AR5133 analog front end radio for dual
- * band 2.4 GHz / 5 GHz communication.
- *
- * All devices after the AR5416 and AR5418 family starting with the AR9280
- * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
- * into a single-chip and require less programming.
- *
- * The following single-chips exist with a respective embedded radio:
- *
- * AR9280 - 11n dual-band 2x2 MIMO for PCIe
- * AR9281 - 11n single-band 1x2 MIMO for PCIe
- * AR9285 - 11n single-band 1x1 for PCIe
- * AR9287 - 11n single-band 2x2 MIMO for PCIe
- *
- * AR9220 - 11n dual-band 2x2 MIMO for PCI
- * AR9223 - 11n single-band 2x2 MIMO for PCI
- *
- * AR9287 - 11n single-band 1x1 MIMO for USB
- */
-
-#include <linux/slab.h>
-
-#include "hw.h"
-
-/**
- * ath9k_hw_write_regs - ??
- *
- * @ah: atheros hardware structure
- * @freqIndex:
- * @regWrites:
- *
- * Used for both the chipsets with an external AR2133/AR5133 radios and
- * single-chip devices.
- */
-void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites)
-{
- REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
-}
-
-/**
- * ath9k_hw_ar9280_set_channel - set channel on single-chip device
- * @ah: atheros hardware structure
- * @chan:
- *
- * This is the function to change channel on single-chip devices, that is
- * all devices after ar9280.
- *
- * This function takes the channel value in MHz and sets
- * hardware channel value. Assumes writes have been enabled to analog bus.
- *
- * Actual Expression,
- *
- * For 2GHz channel,
- * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
- * (freq_ref = 40MHz)
- *
- * For 5GHz channel,
- * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
- * (freq_ref = 40MHz/(24>>amodeRefSel))
- */
-int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u16 bMode, fracMode, aModeRefSel = 0;
- u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
- struct chan_centers centers;
- u32 refDivA = 24;
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- freq = centers.synth_center;
-
- reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
- reg32 &= 0xc0000000;
-
- if (freq < 4800) { /* 2 GHz, fractional mode */
- u32 txctl;
- int regWrites = 0;
-
- bMode = 1;
- fracMode = 1;
- aModeRefSel = 0;
- channelSel = (freq * 0x10000) / 15;
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- if (freq == 2484) {
- /* Enable channel spreading for channel 14 */
- REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
- 1, regWrites);
- } else {
- REG_WRITE_ARRAY(&ah->iniCckfirNormal,
- 1, regWrites);
- }
- } else {
- txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
- if (freq == 2484) {
- /* Enable channel spreading for channel 14 */
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
- } else {
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl &~ AR_PHY_CCK_TX_CTRL_JAPAN);
- }
- }
- } else {
- bMode = 0;
- fracMode = 0;
-
- switch(ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
- case 0:
- if ((freq % 20) == 0) {
- aModeRefSel = 3;
- } else if ((freq % 10) == 0) {
- aModeRefSel = 2;
- }
- if (aModeRefSel)
- break;
- case 1:
- default:
- aModeRefSel = 0;
- /*
- * Enable 2G (fractional) mode for channels
- * which are 5MHz spaced.
- */
- fracMode = 1;
- refDivA = 1;
- channelSel = (freq * 0x8000) / 15;
-
- /* RefDivA setting */
- REG_RMW_FIELD(ah, AR_AN_SYNTH9,
- AR_AN_SYNTH9_REFDIVA, refDivA);
-
- }
-
- if (!fracMode) {
- ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
- channelSel = ndiv & 0x1ff;
- channelFrac = (ndiv & 0xfffffe00) * 2;
- channelSel = (channelSel << 17) | channelFrac;
- }
- }
-
- reg32 = reg32 |
- (bMode << 29) |
- (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
-
- REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
-
- ah->curchan = chan;
- ah->curchan_rad_index = -1;
-
- return 0;
-}
-
-/**
- * ath9k_hw_9280_spur_mitigate - convert baseband spur frequency
- * @ah: atheros hardware structure
- * @chan:
- *
- * For single-chip solutions. Converts to baseband spur frequency given the
- * input channel frequency and compute register settings below.
- */
-void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- int bb_spur = AR_NO_SPUR;
- int freq;
- int bin, cur_bin;
- int bb_spur_off, spur_subchannel_sd;
- int spur_freq_sd;
- int spur_delta_phase;
- int denominator;
- int upper, lower, cur_vit_mask;
- int tmp, newVal;
- int i;
- int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
- AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
- };
- int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
- AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
- };
- int inc[4] = { 0, 100, 0, 0 };
- struct chan_centers centers;
-
- int8_t mask_m[123];
- int8_t mask_p[123];
- int8_t mask_amt;
- int tmp_mask;
- int cur_bb_spur;
- bool is2GHz = IS_CHAN_2GHZ(chan);
-
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- freq = centers.synth_center;
-
- ah->config.spurmode = SPUR_ENABLE_EEPROM;
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
-
- if (is2GHz)
- cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
- else
- cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
-
- if (AR_NO_SPUR == cur_bb_spur)
- break;
- cur_bb_spur = cur_bb_spur - freq;
-
- if (IS_CHAN_HT40(chan)) {
- if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
- (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
- bb_spur = cur_bb_spur;
- break;
- }
- } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
- (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
- bb_spur = cur_bb_spur;
- break;
- }
- }
-
- if (AR_NO_SPUR == bb_spur) {
- REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
- AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
- return;
- } else {
- REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
- AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
- }
-
- bin = bb_spur * 320;
-
- tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
-
- newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
- AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
- AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
- AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
- REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
-
- newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
- AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
- AR_PHY_SPUR_REG_MASK_RATE_SELECT |
- AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
- SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
- REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
-
- if (IS_CHAN_HT40(chan)) {
- if (bb_spur < 0) {
- spur_subchannel_sd = 1;
- bb_spur_off = bb_spur + 10;
- } else {
- spur_subchannel_sd = 0;
- bb_spur_off = bb_spur - 10;
- }
- } else {
- spur_subchannel_sd = 0;
- bb_spur_off = bb_spur;
- }
-
- if (IS_CHAN_HT40(chan))
- spur_delta_phase =
- ((bb_spur * 262144) /
- 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
- else
- spur_delta_phase =
- ((bb_spur * 524288) /
- 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
- denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
- spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
-
- newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
- SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
- SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
- REG_WRITE(ah, AR_PHY_TIMING11, newVal);
-
- newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
- REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
-
- cur_bin = -6000;
- upper = bin + 100;
- lower = bin - 100;
-
- for (i = 0; i < 4; i++) {
- int pilot_mask = 0;
- int chan_mask = 0;
- int bp = 0;
- for (bp = 0; bp < 30; bp++) {
- if ((cur_bin > lower) && (cur_bin < upper)) {
- pilot_mask = pilot_mask | 0x1 << bp;
- chan_mask = chan_mask | 0x1 << bp;
- }
- cur_bin += 100;
- }
- cur_bin += inc[i];
- REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
- REG_WRITE(ah, chan_mask_reg[i], chan_mask);
- }
-
- cur_vit_mask = 6100;
- upper = bin + 120;
- lower = bin - 120;
-
- for (i = 0; i < 123; i++) {
- if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
- /* workaround for gcc bug #37014 */
- volatile int tmp_v = abs(cur_vit_mask - bin);
-
- if (tmp_v < 75)
- mask_amt = 1;
- else
- mask_amt = 0;
- if (cur_vit_mask < 0)
- mask_m[abs(cur_vit_mask / 100)] = mask_amt;
- else
- mask_p[cur_vit_mask / 100] = mask_amt;
- }
- cur_vit_mask -= 100;
- }
-
- tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
- | (mask_m[48] << 26) | (mask_m[49] << 24)
- | (mask_m[50] << 22) | (mask_m[51] << 20)
- | (mask_m[52] << 18) | (mask_m[53] << 16)
- | (mask_m[54] << 14) | (mask_m[55] << 12)
- | (mask_m[56] << 10) | (mask_m[57] << 8)
- | (mask_m[58] << 6) | (mask_m[59] << 4)
- | (mask_m[60] << 2) | (mask_m[61] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
- tmp_mask = (mask_m[31] << 28)
- | (mask_m[32] << 26) | (mask_m[33] << 24)
- | (mask_m[34] << 22) | (mask_m[35] << 20)
- | (mask_m[36] << 18) | (mask_m[37] << 16)
- | (mask_m[48] << 14) | (mask_m[39] << 12)
- | (mask_m[40] << 10) | (mask_m[41] << 8)
- | (mask_m[42] << 6) | (mask_m[43] << 4)
- | (mask_m[44] << 2) | (mask_m[45] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
- tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
- | (mask_m[18] << 26) | (mask_m[18] << 24)
- | (mask_m[20] << 22) | (mask_m[20] << 20)
- | (mask_m[22] << 18) | (mask_m[22] << 16)
- | (mask_m[24] << 14) | (mask_m[24] << 12)
- | (mask_m[25] << 10) | (mask_m[26] << 8)
- | (mask_m[27] << 6) | (mask_m[28] << 4)
- | (mask_m[29] << 2) | (mask_m[30] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
- tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
- | (mask_m[2] << 26) | (mask_m[3] << 24)
- | (mask_m[4] << 22) | (mask_m[5] << 20)
- | (mask_m[6] << 18) | (mask_m[7] << 16)
- | (mask_m[8] << 14) | (mask_m[9] << 12)
- | (mask_m[10] << 10) | (mask_m[11] << 8)
- | (mask_m[12] << 6) | (mask_m[13] << 4)
- | (mask_m[14] << 2) | (mask_m[15] << 0);
- REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
- tmp_mask = (mask_p[15] << 28)
- | (mask_p[14] << 26) | (mask_p[13] << 24)
- | (mask_p[12] << 22) | (mask_p[11] << 20)
- | (mask_p[10] << 18) | (mask_p[9] << 16)
- | (mask_p[8] << 14) | (mask_p[7] << 12)
- | (mask_p[6] << 10) | (mask_p[5] << 8)
- | (mask_p[4] << 6) | (mask_p[3] << 4)
- | (mask_p[2] << 2) | (mask_p[1] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
- tmp_mask = (mask_p[30] << 28)
- | (mask_p[29] << 26) | (mask_p[28] << 24)
- | (mask_p[27] << 22) | (mask_p[26] << 20)
- | (mask_p[25] << 18) | (mask_p[24] << 16)
- | (mask_p[23] << 14) | (mask_p[22] << 12)
- | (mask_p[21] << 10) | (mask_p[20] << 8)
- | (mask_p[19] << 6) | (mask_p[18] << 4)
- | (mask_p[17] << 2) | (mask_p[16] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
- tmp_mask = (mask_p[45] << 28)
- | (mask_p[44] << 26) | (mask_p[43] << 24)
- | (mask_p[42] << 22) | (mask_p[41] << 20)
- | (mask_p[40] << 18) | (mask_p[39] << 16)
- | (mask_p[38] << 14) | (mask_p[37] << 12)
- | (mask_p[36] << 10) | (mask_p[35] << 8)
- | (mask_p[34] << 6) | (mask_p[33] << 4)
- | (mask_p[32] << 2) | (mask_p[31] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
- tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
- | (mask_p[59] << 26) | (mask_p[58] << 24)
- | (mask_p[57] << 22) | (mask_p[56] << 20)
- | (mask_p[55] << 18) | (mask_p[54] << 16)
- | (mask_p[53] << 14) | (mask_p[52] << 12)
- | (mask_p[51] << 10) | (mask_p[50] << 8)
- | (mask_p[49] << 6) | (mask_p[48] << 4)
- | (mask_p[47] << 2) | (mask_p[46] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
-}
-
-/* All code below is for non single-chip solutions */
-
-/**
- * ath9k_phy_modify_rx_buffer() - perform analog swizzling of parameters
- * @rfbuf:
- * @reg32:
- * @numBits:
- * @firstBit:
- * @column:
- *
- * Performs analog "swizzling" of parameters into their location.
- * Used on external AR2133/AR5133 radios.
- */
-static void ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
- u32 numBits, u32 firstBit,
- u32 column)
-{
- u32 tmp32, mask, arrayEntry, lastBit;
- int32_t bitPosition, bitsLeft;
-
- tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
- arrayEntry = (firstBit - 1) / 8;
- bitPosition = (firstBit - 1) % 8;
- bitsLeft = numBits;
- while (bitsLeft > 0) {
- lastBit = (bitPosition + bitsLeft > 8) ?
- 8 : bitPosition + bitsLeft;
- mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
- (column * 8);
- rfBuf[arrayEntry] &= ~mask;
- rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
- (column * 8)) & mask;
- bitsLeft -= 8 - bitPosition;
- tmp32 = tmp32 >> (8 - bitPosition);
- bitPosition = 0;
- arrayEntry++;
- }
-}
-
-/*
- * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
- * rf_pwd_icsyndiv.
- *
- * Theoretical Rules:
- * if 2 GHz band
- * if forceBiasAuto
- * if synth_freq < 2412
- * bias = 0
- * else if 2412 <= synth_freq <= 2422
- * bias = 1
- * else // synth_freq > 2422
- * bias = 2
- * else if forceBias > 0
- * bias = forceBias & 7
- * else
- * no change, use value from ini file
- * else
- * no change, invalid band
- *
- * 1st Mod:
- * 2422 also uses value of 2
- * <approved>
- *
- * 2nd Mod:
- * Less than 2412 uses value of 0, 2412 and above uses value of 2
- */
-static void ath9k_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 tmp_reg;
- int reg_writes = 0;
- u32 new_bias = 0;
-
- if (!AR_SREV_5416(ah) || synth_freq >= 3000) {
- return;
- }
-
- BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
-
- if (synth_freq < 2412)
- new_bias = 0;
- else if (synth_freq < 2422)
- new_bias = 1;
- else
- new_bias = 2;
-
- /* pre-reverse this field */
- tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
-
- ath_print(common, ATH_DBG_CONFIG,
- "Force rf_pwd_icsyndiv to %1d on %4d\n",
- new_bias, synth_freq);
-
- /* swizzle rf_pwd_icsyndiv */
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
-
- /* write Bank 6 with new params */
- REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
-}
-
-/**
- * ath9k_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
- * @ah: atheros hardware stucture
- * @chan:
- *
- * For the external AR2133/AR5133 radios, takes the MHz channel value and set
- * the channel value. Assumes writes enabled to analog bus and bank6 register
- * cache in ah->analogBank6Data.
- */
-int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 channelSel = 0;
- u32 bModeSynth = 0;
- u32 aModeRefSel = 0;
- u32 reg32 = 0;
- u16 freq;
- struct chan_centers centers;
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- freq = centers.synth_center;
-
- if (freq < 4800) {
- u32 txctl;
-
- if (((freq - 2192) % 5) == 0) {
- channelSel = ((freq - 672) * 2 - 3040) / 10;
- bModeSynth = 0;
- } else if (((freq - 2224) % 5) == 0) {
- channelSel = ((freq - 704) * 2 - 3040) / 10;
- bModeSynth = 1;
- } else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid channel %u MHz\n", freq);
- return -EINVAL;
- }
-
- channelSel = (channelSel << 2) & 0xff;
- channelSel = ath9k_hw_reverse_bits(channelSel, 8);
-
- txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
- if (freq == 2484) {
-
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
- } else {
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
- }
-
- } else if ((freq % 20) == 0 && freq >= 5120) {
- channelSel =
- ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
- aModeRefSel = ath9k_hw_reverse_bits(1, 2);
- } else if ((freq % 10) == 0) {
- channelSel =
- ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
- if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
- aModeRefSel = ath9k_hw_reverse_bits(2, 2);
- else
- aModeRefSel = ath9k_hw_reverse_bits(1, 2);
- } else if ((freq % 5) == 0) {
- channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
- aModeRefSel = ath9k_hw_reverse_bits(1, 2);
- } else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid channel %u MHz\n", freq);
- return -EINVAL;
- }
-
- ath9k_hw_force_bias(ah, freq);
-
- reg32 =
- (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
- (1 << 5) | 0x1;
-
- REG_WRITE(ah, AR_PHY(0x37), reg32);
-
- ah->curchan = chan;
- ah->curchan_rad_index = -1;
-
- return 0;
-}
-
-/**
- * ath9k_hw_spur_mitigate - convert baseband spur frequency for external radios
- * @ah: atheros hardware structure
- * @chan:
- *
- * For non single-chip solutions. Converts to baseband spur frequency given the
- * input channel frequency and compute register settings below.
- */
-void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- int bb_spur = AR_NO_SPUR;
- int bin, cur_bin;
- int spur_freq_sd;
- int spur_delta_phase;
- int denominator;
- int upper, lower, cur_vit_mask;
- int tmp, new;
- int i;
- int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
- AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
- };
- int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
- AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
- };
- int inc[4] = { 0, 100, 0, 0 };
-
- int8_t mask_m[123];
- int8_t mask_p[123];
- int8_t mask_amt;
- int tmp_mask;
- int cur_bb_spur;
- bool is2GHz = IS_CHAN_2GHZ(chan);
-
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
- if (AR_NO_SPUR == cur_bb_spur)
- break;
- cur_bb_spur = cur_bb_spur - (chan->channel * 10);
- if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
- bb_spur = cur_bb_spur;
- break;
- }
- }
-
- if (AR_NO_SPUR == bb_spur)
- return;
-
- bin = bb_spur * 32;
-
- tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
- new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
- AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
- AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
- AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
-
- REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
-
- new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
- AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
- AR_PHY_SPUR_REG_MASK_RATE_SELECT |
- AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
- SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
- REG_WRITE(ah, AR_PHY_SPUR_REG, new);
-
- spur_delta_phase = ((bb_spur * 524288) / 100) &
- AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
- denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
- spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
-
- new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
- SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
- SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
- REG_WRITE(ah, AR_PHY_TIMING11, new);
-
- cur_bin = -6000;
- upper = bin + 100;
- lower = bin - 100;
-
- for (i = 0; i < 4; i++) {
- int pilot_mask = 0;
- int chan_mask = 0;
- int bp = 0;
- for (bp = 0; bp < 30; bp++) {
- if ((cur_bin > lower) && (cur_bin < upper)) {
- pilot_mask = pilot_mask | 0x1 << bp;
- chan_mask = chan_mask | 0x1 << bp;
- }
- cur_bin += 100;
- }
- cur_bin += inc[i];
- REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
- REG_WRITE(ah, chan_mask_reg[i], chan_mask);
- }
-
- cur_vit_mask = 6100;
- upper = bin + 120;
- lower = bin - 120;
-
- for (i = 0; i < 123; i++) {
- if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
- /* workaround for gcc bug #37014 */
- volatile int tmp_v = abs(cur_vit_mask - bin);
-
- if (tmp_v < 75)
- mask_amt = 1;
- else
- mask_amt = 0;
- if (cur_vit_mask < 0)
- mask_m[abs(cur_vit_mask / 100)] = mask_amt;
- else
- mask_p[cur_vit_mask / 100] = mask_amt;
- }
- cur_vit_mask -= 100;
- }
-
- tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
- | (mask_m[48] << 26) | (mask_m[49] << 24)
- | (mask_m[50] << 22) | (mask_m[51] << 20)
- | (mask_m[52] << 18) | (mask_m[53] << 16)
- | (mask_m[54] << 14) | (mask_m[55] << 12)
- | (mask_m[56] << 10) | (mask_m[57] << 8)
- | (mask_m[58] << 6) | (mask_m[59] << 4)
- | (mask_m[60] << 2) | (mask_m[61] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
- tmp_mask = (mask_m[31] << 28)
- | (mask_m[32] << 26) | (mask_m[33] << 24)
- | (mask_m[34] << 22) | (mask_m[35] << 20)
- | (mask_m[36] << 18) | (mask_m[37] << 16)
- | (mask_m[48] << 14) | (mask_m[39] << 12)
- | (mask_m[40] << 10) | (mask_m[41] << 8)
- | (mask_m[42] << 6) | (mask_m[43] << 4)
- | (mask_m[44] << 2) | (mask_m[45] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
- tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
- | (mask_m[18] << 26) | (mask_m[18] << 24)
- | (mask_m[20] << 22) | (mask_m[20] << 20)
- | (mask_m[22] << 18) | (mask_m[22] << 16)
- | (mask_m[24] << 14) | (mask_m[24] << 12)
- | (mask_m[25] << 10) | (mask_m[26] << 8)
- | (mask_m[27] << 6) | (mask_m[28] << 4)
- | (mask_m[29] << 2) | (mask_m[30] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
- tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
- | (mask_m[2] << 26) | (mask_m[3] << 24)
- | (mask_m[4] << 22) | (mask_m[5] << 20)
- | (mask_m[6] << 18) | (mask_m[7] << 16)
- | (mask_m[8] << 14) | (mask_m[9] << 12)
- | (mask_m[10] << 10) | (mask_m[11] << 8)
- | (mask_m[12] << 6) | (mask_m[13] << 4)
- | (mask_m[14] << 2) | (mask_m[15] << 0);
- REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
- tmp_mask = (mask_p[15] << 28)
- | (mask_p[14] << 26) | (mask_p[13] << 24)
- | (mask_p[12] << 22) | (mask_p[11] << 20)
- | (mask_p[10] << 18) | (mask_p[9] << 16)
- | (mask_p[8] << 14) | (mask_p[7] << 12)
- | (mask_p[6] << 10) | (mask_p[5] << 8)
- | (mask_p[4] << 6) | (mask_p[3] << 4)
- | (mask_p[2] << 2) | (mask_p[1] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
- tmp_mask = (mask_p[30] << 28)
- | (mask_p[29] << 26) | (mask_p[28] << 24)
- | (mask_p[27] << 22) | (mask_p[26] << 20)
- | (mask_p[25] << 18) | (mask_p[24] << 16)
- | (mask_p[23] << 14) | (mask_p[22] << 12)
- | (mask_p[21] << 10) | (mask_p[20] << 8)
- | (mask_p[19] << 6) | (mask_p[18] << 4)
- | (mask_p[17] << 2) | (mask_p[16] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
- tmp_mask = (mask_p[45] << 28)
- | (mask_p[44] << 26) | (mask_p[43] << 24)
- | (mask_p[42] << 22) | (mask_p[41] << 20)
- | (mask_p[40] << 18) | (mask_p[39] << 16)
- | (mask_p[38] << 14) | (mask_p[37] << 12)
- | (mask_p[36] << 10) | (mask_p[35] << 8)
- | (mask_p[34] << 6) | (mask_p[33] << 4)
- | (mask_p[32] << 2) | (mask_p[31] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
- tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
- | (mask_p[59] << 26) | (mask_p[58] << 24)
- | (mask_p[57] << 22) | (mask_p[56] << 20)
- | (mask_p[55] << 18) | (mask_p[54] << 16)
- | (mask_p[53] << 14) | (mask_p[52] << 12)
- | (mask_p[51] << 10) | (mask_p[50] << 8)
- | (mask_p[49] << 6) | (mask_p[48] << 4)
- | (mask_p[47] << 2) | (mask_p[46] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
-}
-
-/**
- * ath9k_hw_rf_alloc_ext_banks - allocates banks for external radio programming
- * @ah: atheros hardware structure
- *
- * Only required for older devices with external AR2133/AR5133 radios.
- */
-int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
-{
-#define ATH_ALLOC_BANK(bank, size) do { \
- bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
- if (!bank) { \
- ath_print(common, ATH_DBG_FATAL, \
- "Cannot allocate RF banks\n"); \
- return -ENOMEM; \
- } \
- } while (0);
-
- struct ath_common *common = ath9k_hw_common(ah);
-
- BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
-
- ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
- ATH_ALLOC_BANK(ah->addac5416_21,
- ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
- ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
-
- return 0;
-#undef ATH_ALLOC_BANK
-}
-
-
-/**
- * ath9k_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
- * @ah: atheros hardware struture
- * For the external AR2133/AR5133 radios banks.
- */
-void
-ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
-{
-#define ATH_FREE_BANK(bank) do { \
- kfree(bank); \
- bank = NULL; \
- } while (0);
-
- BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
-
- ATH_FREE_BANK(ah->analogBank0Data);
- ATH_FREE_BANK(ah->analogBank1Data);
- ATH_FREE_BANK(ah->analogBank2Data);
- ATH_FREE_BANK(ah->analogBank3Data);
- ATH_FREE_BANK(ah->analogBank6Data);
- ATH_FREE_BANK(ah->analogBank6TPCData);
- ATH_FREE_BANK(ah->analogBank7Data);
- ATH_FREE_BANK(ah->addac5416_21);
- ATH_FREE_BANK(ah->bank6Temp);
-
-#undef ATH_FREE_BANK
-}
-
-/* *
- * ath9k_hw_set_rf_regs - programs rf registers based on EEPROM
- * @ah: atheros hardware structure
- * @chan:
- * @modesIndex:
- *
- * Used for the external AR2133/AR5133 radios.
- *
- * Reads the EEPROM header info from the device structure and programs
- * all rf registers. This routine requires access to the analog
- * rf device. This is not required for single-chip devices.
- */
-bool ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
- u16 modesIndex)
-{
- u32 eepMinorRev;
- u32 ob5GHz = 0, db5GHz = 0;
- u32 ob2GHz = 0, db2GHz = 0;
- int regWrites = 0;
-
- /*
- * Software does not need to program bank data
- * for single chip devices, that is AR9280 or anything
- * after that.
- */
- if (AR_SREV_9280_10_OR_LATER(ah))
- return true;
-
- /* Setup rf parameters */
- eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
-
- /* Setup Bank 0 Write */
- RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
-
- /* Setup Bank 1 Write */
- RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
-
- /* Setup Bank 2 Write */
- RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
-
- /* Setup Bank 6 Write */
- RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
- modesIndex);
- {
- int i;
- for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
- ah->analogBank6Data[i] =
- INI_RA(&ah->iniBank6TPC, i, modesIndex);
- }
- }
-
- /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
- if (eepMinorRev >= 2) {
- if (IS_CHAN_2GHZ(chan)) {
- ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
- db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- ob2GHz, 3, 197, 0);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- db2GHz, 3, 194, 0);
- } else {
- ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
- db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- ob5GHz, 3, 203, 0);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- db5GHz, 3, 200, 0);
- }
- }
-
- /* Setup Bank 7 Setup */
- RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
-
- /* Write Analog registers */
- REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
- regWrites);
-
- return true;
-}
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 0999a49..e724c2c 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -17,589 +17,25 @@
#ifndef PHY_H
#define PHY_H
-/* Common between single chip and non single-chip solutions */
-void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites);
-
-/* Single chip radio settings */
-int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
-void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
-
-/* Routines below are for non single-chip solutions */
-int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
-void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
-
-int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah);
-void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah);
-
-bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u16 modesIndex);
+#define CHANSEL_DIV 15
+#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV)
+#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV)
#define AR_PHY_BASE 0x9800
#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
-#define AR_PHY_TEST 0x9800
-#define PHY_AGC_CLR 0x10000000
-#define RFSILENT_BB 0x00002000
-
-#define AR_PHY_TURBO 0x9804
-#define AR_PHY_FC_TURBO_MODE 0x00000001
-#define AR_PHY_FC_TURBO_SHORT 0x00000002
-#define AR_PHY_FC_DYN2040_EN 0x00000004
-#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
-#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
-/* For 25 MHz channel spacing -- not used but supported by hw */
-#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
-#define AR_PHY_FC_HT_EN 0x00000040
-#define AR_PHY_FC_SHORT_GI_40 0x00000080
-#define AR_PHY_FC_WALSH 0x00000100
-#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
-#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
-
-#define AR_PHY_TEST2 0x9808
-
-#define AR_PHY_TIMING2 0x9810
-#define AR_PHY_TIMING3 0x9814
-#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
-#define AR_PHY_TIMING3_DSC_MAN_S 17
-#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
-#define AR_PHY_TIMING3_DSC_EXP_S 13
-
-#define AR_PHY_CHIP_ID 0x9818
-#define AR_PHY_CHIP_ID_REV_0 0x80
-#define AR_PHY_CHIP_ID_REV_1 0x81
-#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
-
-#define AR_PHY_ACTIVE 0x981C
-#define AR_PHY_ACTIVE_EN 0x00000001
-#define AR_PHY_ACTIVE_DIS 0x00000000
-
-#define AR_PHY_RF_CTL2 0x9824
-#define AR_PHY_TX_END_DATA_START 0x000000FF
-#define AR_PHY_TX_END_DATA_START_S 0
-#define AR_PHY_TX_END_PA_ON 0x0000FF00
-#define AR_PHY_TX_END_PA_ON_S 8
-
-#define AR_PHY_RF_CTL3 0x9828
-#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
-#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
-
-#define AR_PHY_ADC_CTL 0x982C
-#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
-#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
-#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
-#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
-#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
-#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
-#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
-
-#define AR_PHY_ADC_SERIAL_CTL 0x9830
-#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
-#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
-
-#define AR_PHY_RF_CTL4 0x9834
-#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
-#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
-#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
-#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
-#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
-#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
-#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
-#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
-
-#define AR_PHY_TSTDAC_CONST 0x983c
-
-#define AR_PHY_SETTLING 0x9844
-#define AR_PHY_SETTLING_SWITCH 0x00003F80
-#define AR_PHY_SETTLING_SWITCH_S 7
-
-#define AR_PHY_RXGAIN 0x9848
-#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
-#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
-#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
-#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
-#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
-#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
-#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
-#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
-
-#define AR_PHY_DESIRED_SZ 0x9850
-#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
-#define AR_PHY_DESIRED_SZ_ADC_S 0
-#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
-#define AR_PHY_DESIRED_SZ_PGA_S 8
-#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
-#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
-
-#define AR_PHY_FIND_SIG 0x9858
-#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
-#define AR_PHY_FIND_SIG_FIRSTEP_S 12
-#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
-#define AR_PHY_FIND_SIG_FIRPWR_S 18
-
-#define AR_PHY_AGC_CTL1 0x985C
-#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
-#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
-#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
-#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
-
-#define AR_PHY_AGC_CONTROL 0x9860
-#define AR_PHY_AGC_CONTROL_CAL 0x00000001
-#define AR_PHY_AGC_CONTROL_NF 0x00000002
-#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
-#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
-#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
-
-#define AR_PHY_CCA 0x9864
-#define AR_PHY_MINCCA_PWR 0x0FF80000
-#define AR_PHY_MINCCA_PWR_S 19
-#define AR_PHY_CCA_THRESH62 0x0007F000
-#define AR_PHY_CCA_THRESH62_S 12
-#define AR9280_PHY_MINCCA_PWR 0x1FF00000
-#define AR9280_PHY_MINCCA_PWR_S 20
-#define AR9280_PHY_CCA_THRESH62 0x000FF000
-#define AR9280_PHY_CCA_THRESH62_S 12
-
-#define AR_PHY_SFCORR_LOW 0x986C
-#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
-#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
-#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
-#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
-#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
-#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
-#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
-
-#define AR_PHY_SFCORR 0x9868
-#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
-#define AR_PHY_SFCORR_M2COUNT_THR_S 0
-#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
-#define AR_PHY_SFCORR_M1_THRESH_S 17
-#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
-#define AR_PHY_SFCORR_M2_THRESH_S 24
-
-#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
-#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
-#define AR_PHY_SYNTH_CONTROL 0x9874
-#define AR_PHY_SLEEP_SCAL 0x9878
-
-#define AR_PHY_PLL_CTL 0x987c
-#define AR_PHY_PLL_CTL_40 0xaa
-#define AR_PHY_PLL_CTL_40_5413 0x04
-#define AR_PHY_PLL_CTL_44 0xab
-#define AR_PHY_PLL_CTL_44_2133 0xeb
-#define AR_PHY_PLL_CTL_40_2133 0xea
-
-#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
-#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
-#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
-#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
-#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
-#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
-#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
-#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
-#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
-#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
-#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
-#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
-
-#define AR_PHY_RX_DELAY 0x9914
-#define AR_PHY_SEARCH_START_DELAY 0x9918
-#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
-
-#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
-#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
-#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
-#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
-#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
-
-#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
-#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
-#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
-#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
-
-#define AR_PHY_TIMING5 0x9924
-#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
-#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
-
-#define AR_PHY_POWER_TX_RATE1 0x9934
-#define AR_PHY_POWER_TX_RATE2 0x9938
-#define AR_PHY_POWER_TX_RATE_MAX 0x993c
-#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
-
-#define AR_PHY_FRAME_CTL 0x9944
-#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
-#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
-
-#define AR_PHY_TXPWRADJ 0x994C
-#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
-#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
-#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
-#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
-
-#define AR_PHY_RADAR_EXT 0x9940
-#define AR_PHY_RADAR_EXT_ENA 0x00004000
-
-#define AR_PHY_RADAR_0 0x9954
-#define AR_PHY_RADAR_0_ENA 0x00000001
-#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
-#define AR_PHY_RADAR_0_INBAND 0x0000003e
-#define AR_PHY_RADAR_0_INBAND_S 1
-#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
-#define AR_PHY_RADAR_0_PRSSI_S 6
-#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
-#define AR_PHY_RADAR_0_HEIGHT_S 12
-#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
-#define AR_PHY_RADAR_0_RRSSI_S 18
-#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
-#define AR_PHY_RADAR_0_FIRPWR_S 24
-
-#define AR_PHY_RADAR_1 0x9958
-#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
-#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
-#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
-#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
-#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
-#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
-#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
-#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
-#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
-#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
-#define AR_PHY_RADAR_1_MAXLEN_S 0
-
-#define AR_PHY_SWITCH_CHAIN_0 0x9960
-#define AR_PHY_SWITCH_COM 0x9964
-
-#define AR_PHY_SIGMA_DELTA 0x996C
-#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
-#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
-#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
-#define AR_PHY_SIGMA_DELTA_FILT2_S 3
-#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
-#define AR_PHY_SIGMA_DELTA_FILT1_S 8
-#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
-#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
-
-#define AR_PHY_RESTART 0x9970
-#define AR_PHY_RESTART_DIV_GC 0x001C0000
-#define AR_PHY_RESTART_DIV_GC_S 18
-
-#define AR_PHY_RFBUS_REQ 0x997C
-#define AR_PHY_RFBUS_REQ_EN 0x00000001
-
-#define AR_PHY_TIMING7 0x9980
-#define AR_PHY_TIMING8 0x9984
-#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
-#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
-
-#define AR_PHY_BIN_MASK2_1 0x9988
-#define AR_PHY_BIN_MASK2_2 0x998c
-#define AR_PHY_BIN_MASK2_3 0x9990
-#define AR_PHY_BIN_MASK2_4 0x9994
-
-#define AR_PHY_BIN_MASK_1 0x9900
-#define AR_PHY_BIN_MASK_2 0x9904
-#define AR_PHY_BIN_MASK_3 0x9908
-
-#define AR_PHY_MASK_CTL 0x990c
-
-#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
-#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
-
-#define AR_PHY_TIMING9 0x9998
-#define AR_PHY_TIMING10 0x999c
-#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
-#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
-
-#define AR_PHY_TIMING11 0x99a0
-#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
-#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
-#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
-#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
-#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
-#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
-
-#define AR_PHY_RX_CHAINMASK 0x99a4
-#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
-#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
-#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
-
-#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
-#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
-#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
-#define AR_PHY_9285_ANT_DIV_CTL_S 24
-#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
-#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
-#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
-#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
-#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
-#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
-#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
-#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
-#define AR_PHY_9285_ANT_DIV_LNA1 2
-#define AR_PHY_9285_ANT_DIV_LNA2 1
-#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
-#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
-#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
-#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
+#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX 0x0007E000
+#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX_S 13
+#define AR_PHY_TX_GAIN_CLC 0x0000001E
+#define AR_PHY_TX_GAIN_CLC_S 1
+#define AR_PHY_TX_GAIN 0x0007F000
+#define AR_PHY_TX_GAIN_S 12
-#define AR_PHY_EXT_CCA0 0x99b8
-#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
-#define AR_PHY_EXT_CCA0_THRESH62_S 0
-
-#define AR_PHY_EXT_CCA 0x99bc
-#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
-#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
-#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
-#define AR_PHY_EXT_CCA_THRESH62_S 16
-#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
-#define AR_PHY_EXT_MINCCA_PWR_S 23
-#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
-#define AR9280_PHY_EXT_MINCCA_PWR_S 16
-
-#define AR_PHY_SFCORR_EXT 0x99c0
-#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
-#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
-#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
-#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
-#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
-#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
-#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
-#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
-#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
-
-#define AR_PHY_HALFGI 0x99D0
-#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
-#define AR_PHY_HALFGI_DSC_MAN_S 4
-#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
-#define AR_PHY_HALFGI_DSC_EXP_S 0
-
-#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
-#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
-
-#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
-
-#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
-#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
-
-#define AR_PHY_M_SLEEP 0x99f0
-#define AR_PHY_REFCLKDLY 0x99f4
-#define AR_PHY_REFCLKPD 0x99f8
-
-#define AR_PHY_CALMODE 0x99f0
-
-#define AR_PHY_CALMODE_IQ 0x00000000
-#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
-#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
-#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
-
-#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
-#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
-#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
-#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
-
-#define AR_PHY_CURRENT_RSSI 0x9c1c
-#define AR9280_PHY_CURRENT_RSSI 0x9c3c
-
-#define AR_PHY_RFBUS_GRANT 0x9C20
-#define AR_PHY_RFBUS_GRANT_EN 0x00000001
-
-#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
-#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
-
-#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
-
-#define AR_PHY_MODE 0xA200
-#define AR_PHY_MODE_ASYNCFIFO 0x80
-#define AR_PHY_MODE_AR2133 0x08
-#define AR_PHY_MODE_AR5111 0x00
-#define AR_PHY_MODE_AR5112 0x08
-#define AR_PHY_MODE_DYNAMIC 0x04
-#define AR_PHY_MODE_RF2GHZ 0x02
-#define AR_PHY_MODE_RF5GHZ 0x00
-#define AR_PHY_MODE_CCK 0x01
-#define AR_PHY_MODE_OFDM 0x00
-#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
-
-#define AR_PHY_CCK_TX_CTRL 0xA204
-#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
-#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
-#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
-
-#define AR_PHY_CCK_DETECT 0xA208
-#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
-#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
-/* [12:6] settling time for antenna switch */
-#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
-#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
-#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
-#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
-
-#define AR_PHY_GAIN_2GHZ 0xA20C
-#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
-#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
-#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
-#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
-#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
-#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
-
-#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
-#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
-#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
-#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
-#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
-#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
-#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
-#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
-
-#define AR_PHY_CCK_RXCTRL4 0xA21C
-#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
-#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
-
-#define AR_PHY_DAG_CTRLCCK 0xA228
-#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
-#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
-#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
-
-#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
-#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
-
-#define AR_PHY_POWER_TX_RATE3 0xA234
-#define AR_PHY_POWER_TX_RATE4 0xA238
-
-#define AR_PHY_SCRM_SEQ_XR 0xA23C
-#define AR_PHY_HEADER_DETECT_XR 0xA240
-#define AR_PHY_CHIRP_DETECTED_XR 0xA244
-#define AR_PHY_BLUETOOTH 0xA254
-
-#define AR_PHY_TPCRG1 0xA258
-#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
-#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
-
-#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
-#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
-#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
-#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
-#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
-#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
-
-#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
-#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
-
-#define AR_PHY_TX_PWRCTRL4 0xa264
-#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
-#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
-#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
-#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
-
-#define AR_PHY_TX_PWRCTRL6_0 0xa270
-#define AR_PHY_TX_PWRCTRL6_1 0xb270
-#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
-#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
-
-#define AR_PHY_TX_PWRCTRL7 0xa274
-#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
-#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
-
-#define AR_PHY_TX_PWRCTRL9 0xa27C
-#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
-#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
-#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
-#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
-
-#define AR_PHY_TX_GAIN_TBL1 0xa300
-#define AR_PHY_TX_GAIN 0x0007F000
-#define AR_PHY_TX_GAIN_S 12
-
-#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
-#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
-#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
-#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
-
-#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
-#define AR_PHY_MASK2_M_31_45 0xa3a4
-#define AR_PHY_MASK2_M_16_30 0xa3a8
-#define AR_PHY_MASK2_M_00_15 0xa3ac
-#define AR_PHY_MASK2_P_15_01 0xa3b8
-#define AR_PHY_MASK2_P_30_16 0xa3bc
-#define AR_PHY_MASK2_P_45_31 0xa3c0
-#define AR_PHY_MASK2_P_61_45 0xa3c4
-#define AR_PHY_SPUR_REG 0x994c
-
-#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
-#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
-
-#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
-#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
-#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
-#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
-#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
-#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
-
-#define AR_PHY_PILOT_MASK_01_30 0xa3b0
-#define AR_PHY_PILOT_MASK_31_60 0xa3b4
-
-#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
-#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
-
-#define AR_PHY_ANALOG_SWAP 0xa268
-#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
-
-#define AR_PHY_TPCRG5 0xA26C
-#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
-#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
-
-/* Carrier leak calibration control, do it after AGC calibration */
-#define AR_PHY_CL_CAL_CTL 0xA358
-#define AR_PHY_CL_CAL_ENABLE 0x00000002
-#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
-
-#define AR_PHY_POWER_TX_RATE5 0xA38C
-#define AR_PHY_POWER_TX_RATE6 0xA390
-
-#define AR_PHY_CAL_CHAINMASK 0xA39C
-
-#define AR_PHY_POWER_TX_SUB 0xA3C8
-#define AR_PHY_POWER_TX_RATE7 0xA3CC
-#define AR_PHY_POWER_TX_RATE8 0xA3D0
-#define AR_PHY_POWER_TX_RATE9 0xA3D4
-
-#define AR_PHY_XPA_CFG 0xA3D8
-#define AR_PHY_FORCE_XPA_CFG 0x000000001
-#define AR_PHY_FORCE_XPA_CFG_S 0
-
-#define AR_PHY_CH1_CCA 0xa864
-#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
-#define AR_PHY_CH1_MINCCA_PWR_S 19
-#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
-#define AR9280_PHY_CH1_MINCCA_PWR_S 20
-
-#define AR_PHY_CH2_CCA 0xb864
-#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
-#define AR_PHY_CH2_MINCCA_PWR_S 19
-
-#define AR_PHY_CH1_EXT_CCA 0xa9bc
-#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
-#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
-#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
-#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
-
-#define AR_PHY_CH2_EXT_CCA 0xb9bc
-#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
-#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
+#define AR_PHY_CLC_TBL1 0xa35c
+#define AR_PHY_CLC_I0 0x07ff0000
+#define AR_PHY_CLC_I0_S 16
+#define AR_PHY_CLC_Q0 0x0000ffd0
+#define AR_PHY_CLC_Q0_S 5
#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
int r; \
@@ -615,6 +51,7 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
#define ANTSWAP_AB 0x0001
#define REDUCE_CHAIN_0 0x00000050
#define REDUCE_CHAIN_1 0x00000051
+#define AR_PHY_CHIP_ID 0x9818
#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
int i; \
@@ -622,4 +59,7 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
(_bank)[i] = INI_RA((_iniarray), i, _col);; \
} while (0)
+#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
+#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 244e1c6..8519452 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -691,6 +691,19 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
rate_table = sc->cur_rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
+ /*
+ * If we're in HT mode and both us and our peer supports LDPC.
+ * We don't need to check our own device's capabilities as our own
+ * ht capabilities would have already been intersected with our peer's.
+ */
+ if (conf_is_ht(&sc->hw->conf) &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
+ tx_info->flags |= IEEE80211_TX_CTL_LDPC;
+
+ if (conf_is_ht(&sc->hw->conf) &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
+ tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
+
if (is_probe) {
/* set one try for probe rates. For the
* probes don't enable rts */
@@ -1228,8 +1241,12 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
long_retry = rate->count - 1;
}
- if (!priv_sta || !ieee80211_is_data(fc) ||
- !(tx_info->pad[0] & ATH_TX_INFO_UPDATE_RC))
+ if (!priv_sta || !ieee80211_is_data(fc))
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
return;
if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 4f6d6fd..3d8d40c 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -110,8 +110,8 @@ struct ath_rate_table {
int rate_cnt;
int mcs_start;
struct {
- int valid;
- int valid_single_stream;
+ u8 valid;
+ u8 valid_single_stream;
u8 phy;
u32 ratekbps;
u32 user_ratekbps;
@@ -172,14 +172,13 @@ struct ath_rate_priv {
#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0)
#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1)
-#define ATH_TX_INFO_UPDATE_RC (1 << 2)
#define ATH_TX_INFO_XRETRY (1 << 3)
#define ATH_TX_INFO_UNDERRUN (1 << 4)
enum ath9k_internal_frame_type {
- ATH9K_NOT_INTERNAL,
- ATH9K_INT_PAUSE,
- ATH9K_INT_UNPAUSE
+ ATH9K_IFT_NOT_INTERNAL,
+ ATH9K_IFT_PAUSE,
+ ATH9K_IFT_UNPAUSE
};
int ath_rate_control_register(void);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 1ca42e5..ba13913 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,6 +15,9 @@
*/
#include "ath9k.h"
+#include "ar9003_mac.h"
+
+#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
struct ieee80211_hdr *hdr)
@@ -115,56 +118,244 @@ static void ath_opmode_init(struct ath_softc *sc)
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
}
-int ath_rx_init(struct ath_softc *sc, int nbufs)
+static bool ath_rx_edma_buf_link(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
struct ath_buf *bf;
- int error = 0;
- spin_lock_init(&sc->rx.rxflushlock);
- sc->sc_flags &= ~SC_OP_RXFLUSH;
- spin_lock_init(&sc->rx.rxbuflock);
+ rx_edma = &sc->rx.rx_edma[qtype];
+ if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
+ return false;
- common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
- min(common->cachelsz, (u16)64));
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ list_del_init(&bf->list);
- ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
- common->cachelsz, common->rx_bufsize);
+ skb = bf->bf_mpdu;
+
+ ATH_RXBUF_RESET(bf);
+ memset(skb->data, 0, ah->caps.rx_status_len);
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ ah->caps.rx_status_len, DMA_TO_DEVICE);
- /* Initialize rx descriptors */
+ SKB_CB_ATHBUF(skb) = bf;
+ ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
+ skb_queue_tail(&rx_edma->rx_fifo, skb);
- error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
- "rx", nbufs, 1);
- if (error != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "failed to allocate rx descriptors: %d\n", error);
- goto err;
+ return true;
+}
+
+static void ath_rx_addbuffer_edma(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype, int size)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 nbuf = 0;
+
+ if (list_empty(&sc->rx.rxbuf)) {
+ ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
+ return;
}
+ while (!list_empty(&sc->rx.rxbuf)) {
+ nbuf++;
+
+ if (!ath_rx_edma_buf_link(sc, qtype))
+ break;
+
+ if (nbuf >= size)
+ break;
+ }
+}
+
+static void ath_rx_remove_buffer(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
+{
+ struct ath_buf *bf;
+ struct ath_rx_edma *rx_edma;
+ struct sk_buff *skb;
+
+ rx_edma = &sc->rx.rx_edma[qtype];
+
+ while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ }
+}
+
+static void ath_rx_edma_cleanup(struct ath_softc *sc)
+{
+ struct ath_buf *bf;
+
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
+
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ if (bf->bf_mpdu)
+ dev_kfree_skb_any(bf->bf_mpdu);
+ }
+
+ INIT_LIST_HEAD(&sc->rx.rxbuf);
+
+ kfree(sc->rx.rx_bufptr);
+ sc->rx.rx_bufptr = NULL;
+}
+
+static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
+{
+ skb_queue_head_init(&rx_edma->rx_fifo);
+ skb_queue_head_init(&rx_edma->rx_buffers);
+ rx_edma->rx_fifo_hwsize = size;
+}
+
+static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ int error = 0, i;
+ u32 size;
+
+
+ common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
+ ah->caps.rx_status_len,
+ min(common->cachelsz, (u16)64));
+
+ ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
+ ah->caps.rx_status_len);
+
+ ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
+ ah->caps.rx_lp_qdepth);
+ ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
+ ah->caps.rx_hp_qdepth);
+
+ size = sizeof(struct ath_buf) * nbufs;
+ bf = kzalloc(size, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&sc->rx.rxbuf);
+ sc->rx.rx_bufptr = bf;
+
+ for (i = 0; i < nbufs; i++, bf++) {
skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
- if (skb == NULL) {
+ if (!skb) {
error = -ENOMEM;
- goto err;
+ goto rx_init_fail;
}
+ memset(skb->data, 0, common->rx_bufsize);
bf->bf_mpdu = skb;
+
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
common->rx_bufsize,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(sc->dev,
- bf->bf_buf_addr))) {
- dev_kfree_skb_any(skb);
- bf->bf_mpdu = NULL;
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ ath_print(common, ATH_DBG_FATAL,
+ "dma_mapping_error() on RX init\n");
+ error = -ENOMEM;
+ goto rx_init_fail;
+ }
+
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ }
+
+ return 0;
+
+rx_init_fail:
+ ath_rx_edma_cleanup(sc);
+ return error;
+}
+
+static void ath_edma_start_recv(struct ath_softc *sc)
+{
+ spin_lock_bh(&sc->rx.rxbuflock);
+
+ ath9k_hw_rxena(sc->sc_ah);
+
+ ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
+ sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
+
+ ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
+ sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
+
+ spin_unlock_bh(&sc->rx.rxbuflock);
+
+ ath_opmode_init(sc);
+
+ ath9k_hw_startpcureceive(sc->sc_ah);
+}
+
+static void ath_edma_stop_recv(struct ath_softc *sc)
+{
+ spin_lock_bh(&sc->rx.rxbuflock);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
+ spin_unlock_bh(&sc->rx.rxbuflock);
+}
+
+int ath_rx_init(struct ath_softc *sc, int nbufs)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ int error = 0;
+
+ spin_lock_init(&sc->rx.rxflushlock);
+ sc->sc_flags &= ~SC_OP_RXFLUSH;
+ spin_lock_init(&sc->rx.rxbuflock);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ return ath_rx_edma_init(sc, nbufs);
+ } else {
+ common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
+ min(common->cachelsz, (u16)64));
+
+ ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+ common->cachelsz, common->rx_bufsize);
+
+ /* Initialize rx descriptors */
+
+ error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
+ "rx", nbufs, 1, 0);
+ if (error != 0) {
ath_print(common, ATH_DBG_FATAL,
- "dma_mapping_error() on RX init\n");
- error = -ENOMEM;
+ "failed to allocate rx descriptors: %d\n",
+ error);
goto err;
}
- bf->bf_dmacontext = bf->bf_buf_addr;
+
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ skb = ath_rxbuf_alloc(common, common->rx_bufsize,
+ GFP_KERNEL);
+ if (skb == NULL) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ bf->bf_mpdu = skb;
+ bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(sc->dev,
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ ath_print(common, ATH_DBG_FATAL,
+ "dma_mapping_error() on RX init\n");
+ error = -ENOMEM;
+ goto err;
+ }
+ bf->bf_dmacontext = bf->bf_buf_addr;
+ }
+ sc->rx.rxlink = NULL;
}
- sc->rx.rxlink = NULL;
err:
if (error)
@@ -180,17 +371,23 @@ void ath_rx_cleanup(struct ath_softc *sc)
struct sk_buff *skb;
struct ath_buf *bf;
- list_for_each_entry(bf, &sc->rx.rxbuf, list) {
- skb = bf->bf_mpdu;
- if (skb) {
- dma_unmap_single(sc->dev, bf->bf_buf_addr,
- common->rx_bufsize, DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ ath_rx_edma_cleanup(sc);
+ return;
+ } else {
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ skb = bf->bf_mpdu;
+ if (skb) {
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
}
- }
- if (sc->rx.rxdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
+ if (sc->rx.rxdma.dd_desc_len != 0)
+ ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
+ }
}
/*
@@ -273,6 +470,11 @@ int ath_startrecv(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_buf *bf, *tbf;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ ath_edma_start_recv(sc);
+ return 0;
+ }
+
spin_lock_bh(&sc->rx.rxbuflock);
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
@@ -306,7 +508,11 @@ bool ath_stoprecv(struct ath_softc *sc)
ath9k_hw_stoppcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah);
- sc->rx.rxlink = NULL;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_edma_stop_recv(sc);
+ else
+ sc->rx.rxlink = NULL;
return stopped;
}
@@ -315,7 +521,9 @@ void ath_flushrecv(struct ath_softc *sc)
{
spin_lock_bh(&sc->rx.rxflushlock);
sc->sc_flags |= SC_OP_RXFLUSH;
- ath_rx_tasklet(sc, 1);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_rx_tasklet(sc, 1, true);
+ ath_rx_tasklet(sc, 1, false);
sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_unlock_bh(&sc->rx.rxflushlock);
}
@@ -469,15 +677,148 @@ static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
ieee80211_rx(hw, skb);
}
-int ath_rx_tasklet(struct ath_softc *sc, int flush)
+static bool ath_edma_get_buffers(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
{
-#define PA2DESC(_sc, _pa) \
- ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \
- ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr)))
+ struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ int ret;
+
+ skb = skb_peek(&rx_edma->rx_fifo);
+ if (!skb)
+ return false;
+
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize, DMA_FROM_DEVICE);
+
+ ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
+ if (ret == -EINPROGRESS)
+ return false;
+
+ __skb_unlink(skb, &rx_edma->rx_fifo);
+ if (ret == -EINVAL) {
+ /* corrupt descriptor, skip this one and the following one */
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ skb = skb_peek(&rx_edma->rx_fifo);
+ if (!skb)
+ return true;
+
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+
+ __skb_unlink(skb, &rx_edma->rx_fifo);
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ return true;
+ }
+ skb_queue_tail(&rx_edma->rx_buffers, skb);
+
+ return true;
+}
+static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ enum ath9k_rx_qtype qtype)
+{
+ struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
+ struct sk_buff *skb;
struct ath_buf *bf;
+
+ while (ath_edma_get_buffers(sc, qtype));
+ skb = __skb_dequeue(&rx_edma->rx_buffers);
+ if (!skb)
+ return NULL;
+
+ bf = SKB_CB_ATHBUF(skb);
+ ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
+ return bf;
+}
+
+static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+ struct ath_rx_status *rs)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_desc *ds;
- struct ath_rx_status *rx_stats;
+ struct ath_buf *bf;
+ int ret;
+
+ if (list_empty(&sc->rx.rxbuf)) {
+ sc->rx.rxlink = NULL;
+ return NULL;
+ }
+
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ ds = bf->bf_desc;
+
+ /*
+ * Must provide the virtual address of the current
+ * descriptor, the physical address, and the virtual
+ * address of the next descriptor in the h/w chain.
+ * This allows the HAL to look ahead to see if the
+ * hardware is done with a descriptor by checking the
+ * done bit in the following descriptor and the address
+ * of the current descriptor the DMA engine is working
+ * on. All this is necessary because of our use of
+ * a self-linked list to avoid rx overruns.
+ */
+ ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
+ if (ret == -EINPROGRESS) {
+ struct ath_rx_status trs;
+ struct ath_buf *tbf;
+ struct ath_desc *tds;
+
+ memset(&trs, 0, sizeof(trs));
+ if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
+ sc->rx.rxlink = NULL;
+ return NULL;
+ }
+
+ tbf = list_entry(bf->list.next, struct ath_buf, list);
+
+ /*
+ * On some hardware the descriptor status words could
+ * get corrupted, including the done bit. Because of
+ * this, check if the next descriptor's done bit is
+ * set or not.
+ *
+ * If the next descriptor's done bit is set, the current
+ * descriptor has been corrupted. Force s/w to discard
+ * this descriptor and continue...
+ */
+
+ tds = tbf->bf_desc;
+ ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
+ if (ret == -EINPROGRESS)
+ return NULL;
+ }
+
+ if (!bf->bf_mpdu)
+ return bf;
+
+ /*
+ * Synchronize the DMA transfer with CPU before
+ * 1. accessing the frame
+ * 2. requeueing the same buffer to h/w
+ */
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+
+ return bf;
+}
+
+
+int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+{
+ struct ath_buf *bf;
struct sk_buff *skb = NULL, *requeue_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
@@ -491,7 +832,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
struct ieee80211_hdr *hdr;
int retval;
bool decrypt_error = false;
+ struct ath_rx_status rs;
+ enum ath9k_rx_qtype qtype;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+ int dma_type;
+ if (edma)
+ dma_type = DMA_FROM_DEVICE;
+ else
+ dma_type = DMA_BIDIRECTIONAL;
+
+ qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
spin_lock_bh(&sc->rx.rxbuflock);
do {
@@ -499,79 +850,25 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
break;
- if (list_empty(&sc->rx.rxbuf)) {
- sc->rx.rxlink = NULL;
- break;
- }
-
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
- ds = bf->bf_desc;
-
- /*
- * Must provide the virtual address of the current
- * descriptor, the physical address, and the virtual
- * address of the next descriptor in the h/w chain.
- * This allows the HAL to look ahead to see if the
- * hardware is done with a descriptor by checking the
- * done bit in the following descriptor and the address
- * of the current descriptor the DMA engine is working
- * on. All this is necessary because of our use of
- * a self-linked list to avoid rx overruns.
- */
- retval = ath9k_hw_rxprocdesc(ah, ds,
- bf->bf_daddr,
- PA2DESC(sc, ds->ds_link),
- 0);
- if (retval == -EINPROGRESS) {
- struct ath_buf *tbf;
- struct ath_desc *tds;
-
- if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
- sc->rx.rxlink = NULL;
- break;
- }
+ memset(&rs, 0, sizeof(rs));
+ if (edma)
+ bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
+ else
+ bf = ath_get_next_rx_buf(sc, &rs);
- tbf = list_entry(bf->list.next, struct ath_buf, list);
-
- /*
- * On some hardware the descriptor status words could
- * get corrupted, including the done bit. Because of
- * this, check if the next descriptor's done bit is
- * set or not.
- *
- * If the next descriptor's done bit is set, the current
- * descriptor has been corrupted. Force s/w to discard
- * this descriptor and continue...
- */
-
- tds = tbf->bf_desc;
- retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
- PA2DESC(sc, tds->ds_link), 0);
- if (retval == -EINPROGRESS) {
- break;
- }
- }
+ if (!bf)
+ break;
skb = bf->bf_mpdu;
if (!skb)
continue;
- /*
- * Synchronize the DMA transfer with CPU before
- * 1. accessing the frame
- * 2. requeueing the same buffer to h/w
- */
- dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
- common->rx_bufsize,
- DMA_FROM_DEVICE);
-
hdr = (struct ieee80211_hdr *) skb->data;
rxs = IEEE80211_SKB_RXCB(skb);
hw = ath_get_virt_hw(sc, hdr);
- rx_stats = &ds->ds_rxstat;
- ath_debug_stat_rx(sc, bf);
+ ath_debug_stat_rx(sc, &rs);
/*
* If we're asked to flush receive queue, directly
@@ -580,7 +877,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
if (flush)
goto requeue;
- retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats,
+ retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
rxs, &decrypt_error);
if (retval)
goto requeue;
@@ -599,18 +896,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
/* Unmap the frame */
dma_unmap_single(sc->dev, bf->bf_buf_addr,
common->rx_bufsize,
- DMA_FROM_DEVICE);
+ dma_type);
- skb_put(skb, rx_stats->rs_datalen);
+ skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
+ if (ah->caps.rx_status_len)
+ skb_pull(skb, ah->caps.rx_status_len);
- ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats,
+ ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
rxs, decrypt_error);
/* We will now give hardware our shiny new allocated skb */
bf->bf_mpdu = requeue_skb;
bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
common->rx_bufsize,
- DMA_FROM_DEVICE);
+ dma_type);
if (unlikely(dma_mapping_error(sc->dev,
bf->bf_buf_addr))) {
dev_kfree_skb_any(requeue_skb);
@@ -626,9 +925,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row.
*/
- if (sc->rx.defant != ds->ds_rxstat.rs_antenna) {
+ if (sc->rx.defant != rs.rs_antenna) {
if (++sc->rx.rxotherant >= 3)
- ath_setdefantenna(sc, rx_stats->rs_antenna);
+ ath_setdefantenna(sc, rs.rs_antenna);
} else {
sc->rx.rxotherant = 0;
}
@@ -641,12 +940,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
ath_rx_send_to_mac80211(hw, sc, skb, rxs);
requeue:
- list_move_tail(&bf->list, &sc->rx.rxbuf);
- ath_rx_buf_link(sc, bf);
+ if (edma) {
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ } else {
+ list_move_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_buf_link(sc, bf);
+ }
} while (1);
spin_unlock_bh(&sc->rx.rxbuflock);
return 0;
-#undef PA2DESC
}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 72cfa8e..d4371a4 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -20,7 +20,7 @@
#include "../reg.h"
#define AR_CR 0x0008
-#define AR_CR_RXE 0x00000004
+#define AR_CR_RXE (AR_SREV_9300_20_OR_LATER(ah) ? 0x0000000c : 0x00000004)
#define AR_CR_RXD 0x00000020
#define AR_CR_SWI 0x00000040
@@ -39,6 +39,12 @@
#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
+#define AR_RXBP_THRESH 0x0018
+#define AR_RXBP_THRESH_HP 0x0000000f
+#define AR_RXBP_THRESH_HP_S 0
+#define AR_RXBP_THRESH_LP 0x00003f00
+#define AR_RXBP_THRESH_LP_S 8
+
#define AR_MIRT 0x0020
#define AR_MIRT_VAL 0x0000ffff
#define AR_MIRT_VAL_S 16
@@ -144,6 +150,9 @@
#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
#define AR_MACMISC_MISC_OBS_BUS_1 1
+#define AR_DATABUF_SIZE 0x0060
+#define AR_DATABUF_SIZE_MASK 0x00000FFF
+
#define AR_GTXTO 0x0064
#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF
#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000
@@ -160,9 +169,14 @@
#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000
#define AR_CST_TIMEOUT_LIMIT_S 16
+#define AR_HP_RXDP 0x0074
+#define AR_LP_RXDP 0x0078
+
#define AR_ISR 0x0080
#define AR_ISR_RXOK 0x00000001
#define AR_ISR_RXDESC 0x00000002
+#define AR_ISR_HP_RXOK 0x00000001
+#define AR_ISR_LP_RXOK 0x00000002
#define AR_ISR_RXERR 0x00000004
#define AR_ISR_RXNOPKT 0x00000008
#define AR_ISR_RXEOL 0x00000010
@@ -232,7 +246,6 @@
#define AR_ISR_S5_TIMER_THRESH 0x0007FE00
#define AR_ISR_S5_TIM_TIMER 0x00000010
#define AR_ISR_S5_DTIM_TIMER 0x00000020
-#define AR_ISR_S5_S 0x00d8
#define AR_IMR_S5 0x00b8
#define AR_IMR_S5_TIM_TIMER 0x00000010
#define AR_IMR_S5_DTIM_TIMER 0x00000020
@@ -240,7 +253,6 @@
#define AR_ISR_S5_GENTIMER_TRIG_S 0
#define AR_ISR_S5_GENTIMER_THRESH 0xFF800000
#define AR_ISR_S5_GENTIMER_THRESH_S 16
-#define AR_ISR_S5_S 0x00d8
#define AR_IMR_S5_GENTIMER_TRIG 0x0000FF80
#define AR_IMR_S5_GENTIMER_TRIG_S 0
#define AR_IMR_S5_GENTIMER_THRESH 0xFF800000
@@ -249,6 +261,8 @@
#define AR_IMR 0x00a0
#define AR_IMR_RXOK 0x00000001
#define AR_IMR_RXDESC 0x00000002
+#define AR_IMR_RXOK_HP 0x00000001
+#define AR_IMR_RXOK_LP 0x00000002
#define AR_IMR_RXERR 0x00000004
#define AR_IMR_RXNOPKT 0x00000008
#define AR_IMR_RXEOL 0x00000010
@@ -332,10 +346,10 @@
#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
#define AR_ISR_S1_QCU_TXEOL_S 16
-#define AR_ISR_S2_S 0x00cc
-#define AR_ISR_S3_S 0x00d0
-#define AR_ISR_S4_S 0x00d4
-#define AR_ISR_S5_S 0x00d8
+#define AR_ISR_S2_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d0 : 0x00cc)
+#define AR_ISR_S3_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d4 : 0x00d0)
+#define AR_ISR_S4_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d8 : 0x00d4)
+#define AR_ISR_S5_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00dc : 0x00d8)
#define AR_DMADBG_0 0x00e0
#define AR_DMADBG_1 0x00e4
#define AR_DMADBG_2 0x00e8
@@ -369,6 +383,9 @@
#define AR_Q9_TXDP 0x0824
#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2))
+#define AR_Q_STATUS_RING_START 0x830
+#define AR_Q_STATUS_RING_END 0x834
+
#define AR_Q_TXE 0x0840
#define AR_Q_TXE_M 0x000003FF
@@ -461,6 +478,10 @@
#define AR_Q_RDYTIMESHDN 0x0a40
#define AR_Q_RDYTIMESHDN_M 0x000003FF
+/* MAC Descriptor CRC check */
+#define AR_Q_DESC_CRCCHK 0xa44
+/* Enable CRC check on the descriptor fetched from host */
+#define AR_Q_DESC_CRCCHK_EN 1
#define AR_NUM_DCU 10
#define AR_DCU_0 0x0001
@@ -679,7 +700,7 @@
#define AR_WA 0x4004
#define AR_WA_D3_L1_DISABLE (1 << 14)
-#define AR9285_WA_DEFAULT 0x004a05cb
+#define AR9285_WA_DEFAULT 0x004a050b
#define AR9280_WA_DEFAULT 0x0040073b
#define AR_WA_DEFAULT 0x0000073f
@@ -759,6 +780,8 @@
#define AR_SREV_VERSION_9271 0x140
#define AR_SREV_REVISION_9271_10 0
#define AR_SREV_REVISION_9271_11 1
+#define AR_SREV_VERSION_9300 0x1c0
+#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -844,6 +867,19 @@
#define AR_SREV_9271_11(_ah) \
(AR_SREV_9271(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
+#define AR_SREV_9300(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
+#define AR_SREV_9300_20(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_20))
+#define AR_SREV_9300_20_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9300) || \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9300_20)))
+
+#define AR_SREV_9285E_20(_ah) \
+ (AR_SREV_9285_12_OR_LATER(_ah) && \
+ ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
#define AR_RADIO_SREV_MAJOR 0xf0
#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -940,6 +976,8 @@ enum {
#define AR928X_NUM_GPIO 10
#define AR9285_NUM_GPIO 12
#define AR9287_NUM_GPIO 11
+#define AR9271_NUM_GPIO 16
+#define AR9300_NUM_GPIO 17
#define AR_GPIO_IN_OUT 0x4048
#define AR_GPIO_IN_VAL 0x0FFFC000
@@ -950,19 +988,23 @@ enum {
#define AR9285_GPIO_IN_VAL_S 12
#define AR9287_GPIO_IN_VAL 0x003FF800
#define AR9287_GPIO_IN_VAL_S 11
+#define AR9271_GPIO_IN_VAL 0xFFFF0000
+#define AR9271_GPIO_IN_VAL_S 16
+#define AR9300_GPIO_IN_VAL 0x0001FFFF
+#define AR9300_GPIO_IN_VAL_S 0
-#define AR_GPIO_OE_OUT 0x404c
+#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
#define AR_GPIO_OE_OUT_DRV_LOW 0x1
#define AR_GPIO_OE_OUT_DRV_HI 0x2
#define AR_GPIO_OE_OUT_DRV_ALL 0x3
-#define AR_GPIO_INTR_POL 0x4050
-#define AR_GPIO_INTR_POL_VAL 0x00001FFF
+#define AR_GPIO_INTR_POL (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050)
+#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
#define AR_GPIO_INTR_POL_VAL_S 0
-#define AR_GPIO_INPUT_EN_VAL 0x4054
+#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054)
#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004
#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2
#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008
@@ -980,13 +1022,13 @@ enum {
#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
#define AR_GPIO_JTAG_DISABLE 0x00020000
-#define AR_GPIO_INPUT_MUX1 0x4058
+#define AR_GPIO_INPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058)
#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000
#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16
#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00
#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8
-#define AR_GPIO_INPUT_MUX2 0x405c
+#define AR_GPIO_INPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c)
#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
#define AR_GPIO_INPUT_MUX2_CLK25_S 0
#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
@@ -994,13 +1036,13 @@ enum {
#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
-#define AR_GPIO_OUTPUT_MUX1 0x4060
-#define AR_GPIO_OUTPUT_MUX2 0x4064
-#define AR_GPIO_OUTPUT_MUX3 0x4068
+#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060)
+#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064)
+#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068)
-#define AR_INPUT_STATE 0x406c
+#define AR_INPUT_STATE (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c)
-#define AR_EEPROM_STATUS_DATA 0x407c
+#define AR_EEPROM_STATUS_DATA (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c)
#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
#define AR_EEPROM_STATUS_DATA_VAL_S 0
#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
@@ -1008,13 +1050,24 @@ enum {
#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
-#define AR_OBS 0x4080
+#define AR_OBS (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080)
-#define AR_GPIO_PDPU 0x4088
+#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088)
-#define AR_PCIE_MSI 0x4094
+#define AR_PCIE_MSI (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094)
#define AR_PCIE_MSI_ENABLE 0x00000001
+#define AR_INTR_PRIO_SYNC_ENABLE 0x40c4
+#define AR_INTR_PRIO_ASYNC_MASK 0x40c8
+#define AR_INTR_PRIO_SYNC_MASK 0x40cc
+#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4
+
+#define AR_RTC_9300_PLL_DIV 0x000003ff
+#define AR_RTC_9300_PLL_DIV_S 0
+#define AR_RTC_9300_PLL_REFDIV 0x00003C00
+#define AR_RTC_9300_PLL_REFDIV_S 10
+#define AR_RTC_9300_PLL_CLKSEL 0x0000C000
+#define AR_RTC_9300_PLL_CLKSEL_S 14
#define AR_RTC_9160_PLL_DIV 0x000003ff
#define AR_RTC_9160_PLL_DIV_S 0
@@ -1032,6 +1085,16 @@ enum {
#define AR_RTC_RC_COLD_RESET 0x00000004
#define AR_RTC_RC_WARM_RESET 0x00000008
+/* Crystal Control */
+#define AR_RTC_XTAL_CONTROL 0x7004
+
+/* Reg Control 0 */
+#define AR_RTC_REG_CONTROL0 0x7008
+
+/* Reg Control 1 */
+#define AR_RTC_REG_CONTROL1 0x700c
+#define AR_RTC_REG_CONTROL1_SWREG_PROGRAM 0x00000001
+
#define AR_RTC_PLL_CONTROL \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
@@ -1062,6 +1125,7 @@ enum {
#define AR_RTC_SLEEP_CLK \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
#define AR_RTC_FORCE_DERIVED_CLK 0x2
+#define AR_RTC_FORCE_SWREG_PRD 0x00000004
#define AR_RTC_FORCE_WAKE \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
@@ -1178,6 +1242,13 @@ enum {
#define AR9285_AN_RF2G4_DB2_4 0x00003800
#define AR9285_AN_RF2G4_DB2_4_S 11
+#define AR9285_RF2G5 0x7830
+#define AR9285_RF2G5_IC50TX 0xfffff8ff
+#define AR9285_RF2G5_IC50TX_SET 0x00000400
+#define AR9285_RF2G5_IC50TX_XE_SET 0x00000500
+#define AR9285_RF2G5_IC50TX_CLEAR 0x00000700
+#define AR9285_RF2G5_IC50TX_CLEAR_S 8
+
/* AR9271 : 0x7828, 0x782c different setting from AR9285 */
#define AR9271_AN_RF2G3_OB_cck 0x001C0000
#define AR9271_AN_RF2G3_OB_cck_S 18
@@ -1519,7 +1590,7 @@ enum {
#define AR_TSFOOR_THRESHOLD 0x813c
#define AR_TSFOOR_THRESHOLD_VAL 0x0000FFFF
-#define AR_PHY_ERR_EIFS_MASK 8144
+#define AR_PHY_ERR_EIFS_MASK 0x8144
#define AR_PHY_ERR_3 0x8168
#define AR_PHY_ERR_3_COUNT 0x00FFFFFF
@@ -1585,24 +1656,26 @@ enum {
#define AR_FIRST_NDP_TIMER 7
#define AR_NDP2_PERIOD 0x81a0
#define AR_NDP2_TIMER_MODE 0x81c0
-#define AR_NEXT_TBTT_TIMER 0x8200
-#define AR_NEXT_DMA_BEACON_ALERT 0x8204
-#define AR_NEXT_SWBA 0x8208
-#define AR_NEXT_CFP 0x8208
-#define AR_NEXT_HCF 0x820C
-#define AR_NEXT_TIM 0x8210
-#define AR_NEXT_DTIM 0x8214
-#define AR_NEXT_QUIET_TIMER 0x8218
-#define AR_NEXT_NDP_TIMER 0x821C
-
-#define AR_BEACON_PERIOD 0x8220
-#define AR_DMA_BEACON_PERIOD 0x8224
-#define AR_SWBA_PERIOD 0x8228
-#define AR_HCF_PERIOD 0x822C
-#define AR_TIM_PERIOD 0x8230
-#define AR_DTIM_PERIOD 0x8234
-#define AR_QUIET_PERIOD 0x8238
-#define AR_NDP_PERIOD 0x823C
+
+#define AR_GEN_TIMERS(_i) (0x8200 + ((_i) << 2))
+#define AR_NEXT_TBTT_TIMER AR_GEN_TIMERS(0)
+#define AR_NEXT_DMA_BEACON_ALERT AR_GEN_TIMERS(1)
+#define AR_NEXT_SWBA AR_GEN_TIMERS(2)
+#define AR_NEXT_CFP AR_GEN_TIMERS(2)
+#define AR_NEXT_HCF AR_GEN_TIMERS(3)
+#define AR_NEXT_TIM AR_GEN_TIMERS(4)
+#define AR_NEXT_DTIM AR_GEN_TIMERS(5)
+#define AR_NEXT_QUIET_TIMER AR_GEN_TIMERS(6)
+#define AR_NEXT_NDP_TIMER AR_GEN_TIMERS(7)
+
+#define AR_BEACON_PERIOD AR_GEN_TIMERS(8)
+#define AR_DMA_BEACON_PERIOD AR_GEN_TIMERS(9)
+#define AR_SWBA_PERIOD AR_GEN_TIMERS(10)
+#define AR_HCF_PERIOD AR_GEN_TIMERS(11)
+#define AR_TIM_PERIOD AR_GEN_TIMERS(12)
+#define AR_DTIM_PERIOD AR_GEN_TIMERS(13)
+#define AR_QUIET_PERIOD AR_GEN_TIMERS(14)
+#define AR_NDP_PERIOD AR_GEN_TIMERS(15)
#define AR_TIMER_MODE 0x8240
#define AR_TBTT_TIMER_EN 0x00000001
@@ -1716,4 +1789,32 @@ enum {
#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
+#define AR_AGG_WEP_ENABLE_FIX 0x00000008 /* This allows the use of AR_AGG_WEP_ENABLE */
+#define AR_ADHOC_MCAST_KEYID_ENABLE 0x00000040 /* This bit enables the Multicast search
+ * based on both MAC Address and Key ID.
+ * If bit is 0, then Multicast search is
+ * based on MAC address only.
+ * For Merlin and above only.
+ */
+#define AR_AGG_WEP_ENABLE 0x00020000 /* This field enables AGG_WEP feature,
+ * when it is enable, AGG_WEP would takes
+ * charge of the encryption interface of
+ * pcu_txsm.
+ */
+
+#define AR9300_SM_BASE 0xa200
+#define AR9002_PHY_AGC_CONTROL 0x9860
+#define AR9003_PHY_AGC_CONTROL AR9300_SM_BASE + 0xc4
+#define AR_PHY_AGC_CONTROL (AR_SREV_9300_20_OR_LATER(ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL)
+#define AR_PHY_AGC_CONTROL_CAL 0x00000001 /* do internal calibration */
+#define AR_PHY_AGC_CONTROL_NF 0x00000002 /* do noise-floor calibration */
+#define AR_PHY_AGC_CONTROL_OFFSET_CAL 0x00000800 /* allow offset calibration */
+#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000 /* enable noise floor calibration to happen */
+#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000 /* allow tx filter calibration */
+#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 /* don't update noise floor automatically */
+#define AR_PHY_AGC_CONTROL_EXT_NF_PWR_MEAS 0x00040000 /* extend noise floor power measurement */
+#define AR_PHY_AGC_CONTROL_CLC_SUCCESS 0x00080000 /* carrier leak calibration done */
+#define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0
+#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 00c0e21..105ad40 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -220,7 +220,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
- txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE;
+ txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
goto exit;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
new file mode 100644
index 0000000..e23172c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
+{
+ switch (wmi_cmd) {
+ case WMI_ECHO_CMDID:
+ return "WMI_ECHO_CMDID";
+ case WMI_ACCESS_MEMORY_CMDID:
+ return "WMI_ACCESS_MEMORY_CMDID";
+ case WMI_DISABLE_INTR_CMDID:
+ return "WMI_DISABLE_INTR_CMDID";
+ case WMI_ENABLE_INTR_CMDID:
+ return "WMI_ENABLE_INTR_CMDID";
+ case WMI_RX_LINK_CMDID:
+ return "WMI_RX_LINK_CMDID";
+ case WMI_ATH_INIT_CMDID:
+ return "WMI_ATH_INIT_CMDID";
+ case WMI_ABORT_TXQ_CMDID:
+ return "WMI_ABORT_TXQ_CMDID";
+ case WMI_STOP_TX_DMA_CMDID:
+ return "WMI_STOP_TX_DMA_CMDID";
+ case WMI_STOP_DMA_RECV_CMDID:
+ return "WMI_STOP_DMA_RECV_CMDID";
+ case WMI_ABORT_TX_DMA_CMDID:
+ return "WMI_ABORT_TX_DMA_CMDID";
+ case WMI_DRAIN_TXQ_CMDID:
+ return "WMI_DRAIN_TXQ_CMDID";
+ case WMI_DRAIN_TXQ_ALL_CMDID:
+ return "WMI_DRAIN_TXQ_ALL_CMDID";
+ case WMI_START_RECV_CMDID:
+ return "WMI_START_RECV_CMDID";
+ case WMI_STOP_RECV_CMDID:
+ return "WMI_STOP_RECV_CMDID";
+ case WMI_FLUSH_RECV_CMDID:
+ return "WMI_FLUSH_RECV_CMDID";
+ case WMI_SET_MODE_CMDID:
+ return "WMI_SET_MODE_CMDID";
+ case WMI_RESET_CMDID:
+ return "WMI_RESET_CMDID";
+ case WMI_NODE_CREATE_CMDID:
+ return "WMI_NODE_CREATE_CMDID";
+ case WMI_NODE_REMOVE_CMDID:
+ return "WMI_NODE_REMOVE_CMDID";
+ case WMI_VAP_REMOVE_CMDID:
+ return "WMI_VAP_REMOVE_CMDID";
+ case WMI_VAP_CREATE_CMDID:
+ return "WMI_VAP_CREATE_CMDID";
+ case WMI_BEACON_UPDATE_CMDID:
+ return "WMI_BEACON_UPDATE_CMDID";
+ case WMI_REG_READ_CMDID:
+ return "WMI_REG_READ_CMDID";
+ case WMI_REG_WRITE_CMDID:
+ return "WMI_REG_WRITE_CMDID";
+ case WMI_RC_STATE_CHANGE_CMDID:
+ return "WMI_RC_STATE_CHANGE_CMDID";
+ case WMI_RC_RATE_UPDATE_CMDID:
+ return "WMI_RC_RATE_UPDATE_CMDID";
+ case WMI_DEBUG_INFO_CMDID:
+ return "WMI_DEBUG_INFO_CMDID";
+ case WMI_HOST_ATTACH:
+ return "WMI_HOST_ATTACH";
+ case WMI_TARGET_IC_UPDATE_CMDID:
+ return "WMI_TARGET_IC_UPDATE_CMDID";
+ case WMI_TGT_STATS_CMDID:
+ return "WMI_TGT_STATS_CMDID";
+ case WMI_TX_AGGR_ENABLE_CMDID:
+ return "WMI_TX_AGGR_ENABLE_CMDID";
+ case WMI_TGT_DETACH_CMDID:
+ return "WMI_TGT_DETACH_CMDID";
+ case WMI_TGT_TXQ_ENABLE_CMDID:
+ return "WMI_TGT_TXQ_ENABLE_CMDID";
+ }
+
+ return "Bogus";
+}
+
+struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
+{
+ struct wmi *wmi;
+
+ wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
+ if (!wmi)
+ return NULL;
+
+ wmi->drv_priv = priv;
+ wmi->stopped = false;
+ mutex_init(&wmi->op_mutex);
+ mutex_init(&wmi->multi_write_mutex);
+ init_completion(&wmi->cmd_wait);
+
+ return wmi;
+}
+
+void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
+{
+ struct wmi *wmi = priv->wmi;
+
+ mutex_lock(&wmi->op_mutex);
+ wmi->stopped = true;
+ mutex_unlock(&wmi->op_mutex);
+
+ kfree(priv->wmi);
+}
+
+void ath9k_wmi_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct wmi_cmd_hdr *hdr;
+ struct wmi_swba *swba_hdr;
+ enum wmi_event_id event;
+ struct sk_buff *skb;
+ void *wmi_event;
+ unsigned long flags;
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ __be32 txrate;
+#endif
+
+ spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
+ skb = priv->wmi->wmi_skb;
+ spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
+
+ hdr = (struct wmi_cmd_hdr *) skb->data;
+ event = be16_to_cpu(hdr->command_id);
+ wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+
+ ath_print(common, ATH_DBG_WMI,
+ "WMI Event: 0x%x\n", event);
+
+ switch (event) {
+ case WMI_TGT_RDY_EVENTID:
+ break;
+ case WMI_SWBA_EVENTID:
+ swba_hdr = (struct wmi_swba *) wmi_event;
+ ath9k_htc_swba(priv, swba_hdr->beacon_pending);
+ break;
+ case WMI_FATAL_EVENTID:
+ break;
+ case WMI_TXTO_EVENTID:
+ break;
+ case WMI_BMISS_EVENTID:
+ break;
+ case WMI_WLAN_TXCOMP_EVENTID:
+ break;
+ case WMI_DELBA_EVENTID:
+ break;
+ case WMI_TXRATE_EVENTID:
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ txrate = ((struct wmi_event_txrate *)wmi_event)->txrate;
+ priv->debug.txrate = be32_to_cpu(txrate);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+}
+
+static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+
+ if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0)
+ memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
+
+ complete(&wmi->cmd_wait);
+}
+
+static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id epid)
+{
+ struct wmi *wmi = (struct wmi *) priv;
+ struct wmi_cmd_hdr *hdr;
+ u16 cmd_id;
+
+ if (unlikely(wmi->stopped))
+ goto free_skb;
+
+ hdr = (struct wmi_cmd_hdr *) skb->data;
+ cmd_id = be16_to_cpu(hdr->command_id);
+
+ if (cmd_id & 0x1000) {
+ spin_lock(&wmi->wmi_lock);
+ wmi->wmi_skb = skb;
+ spin_unlock(&wmi->wmi_lock);
+ tasklet_schedule(&wmi->drv_priv->wmi_tasklet);
+ return;
+ }
+
+ /* Check if there has been a timeout. */
+ spin_lock(&wmi->wmi_lock);
+ if (cmd_id != wmi->last_cmd_id) {
+ spin_unlock(&wmi->wmi_lock);
+ goto free_skb;
+ }
+ spin_unlock(&wmi->wmi_lock);
+
+ /* WMI command response */
+ ath9k_wmi_rsp_callback(wmi, skb);
+
+free_skb:
+ kfree_skb(skb);
+}
+
+static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id epid, bool txok)
+{
+ kfree_skb(skb);
+}
+
+int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ enum htc_endpoint_id *wmi_ctrl_epid)
+{
+ struct htc_service_connreq connect;
+ int ret;
+
+ wmi->htc = htc;
+
+ memset(&connect, 0, sizeof(connect));
+
+ connect.ep_callbacks.priv = wmi;
+ connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx;
+ connect.ep_callbacks.rx = ath9k_wmi_ctrl_rx;
+ connect.service_id = WMI_CONTROL_SVC;
+
+ ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid);
+ if (ret)
+ return ret;
+
+ *wmi_ctrl_epid = wmi->ctrl_epid;
+
+ return 0;
+}
+
+static int ath9k_wmi_cmd_issue(struct wmi *wmi,
+ struct sk_buff *skb,
+ enum wmi_cmd_id cmd, u16 len)
+{
+ struct wmi_cmd_hdr *hdr;
+
+ hdr = (struct wmi_cmd_hdr *) skb_push(skb, sizeof(struct wmi_cmd_hdr));
+ hdr->command_id = cpu_to_be16(cmd);
+ hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
+
+ return htc_send(wmi->htc, skb, wmi->ctrl_epid, NULL);
+}
+
+int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ u8 *cmd_buf, u32 cmd_len,
+ u8 *rsp_buf, u32 rsp_len,
+ u32 timeout)
+{
+ struct ath_hw *ah = wmi->drv_priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u16 headroom = sizeof(struct htc_frame_hdr) +
+ sizeof(struct wmi_cmd_hdr);
+ struct sk_buff *skb;
+ u8 *data;
+ int time_left, ret = 0;
+ unsigned long flags;
+
+ if (wmi->drv_priv->op_flags & OP_UNPLUGGED)
+ return 0;
+
+ if (!wmi)
+ return -EINVAL;
+
+ skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, headroom);
+
+ if (cmd_len != 0 && cmd_buf != NULL) {
+ data = (u8 *) skb_put(skb, cmd_len);
+ memcpy(data, cmd_buf, cmd_len);
+ }
+
+ mutex_lock(&wmi->op_mutex);
+
+ /* check if wmi stopped flag is set */
+ if (unlikely(wmi->stopped)) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ /* record the rsp buffer and length */
+ wmi->cmd_rsp_buf = rsp_buf;
+ wmi->cmd_rsp_len = rsp_len;
+
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
+ wmi->last_cmd_id = cmd_id;
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+
+ ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
+ if (ret)
+ goto out;
+
+ time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
+ if (!time_left) {
+ ath_print(common, ATH_DBG_WMI,
+ "Timeout waiting for WMI command: %s\n",
+ wmi_cmd_to_name(cmd_id));
+ mutex_unlock(&wmi->op_mutex);
+ return -ETIMEDOUT;
+ }
+
+ mutex_unlock(&wmi->op_mutex);
+
+ return 0;
+
+out:
+ ath_print(common, ATH_DBG_WMI,
+ "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
+ mutex_unlock(&wmi->op_mutex);
+ kfree_skb(skb);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
new file mode 100644
index 0000000..765db5f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WMI_H
+#define WMI_H
+
+
+struct wmi_event_txrate {
+ __be32 txrate;
+ struct {
+ u8 rssi_thresh;
+ u8 per;
+ } rc_stats;
+} __packed;
+
+struct wmi_cmd_hdr {
+ __be16 command_id;
+ __be16 seq_no;
+} __packed;
+
+struct wmi_swba {
+ u8 beacon_pending;
+} __packed;
+
+enum wmi_cmd_id {
+ WMI_ECHO_CMDID = 0x0001,
+ WMI_ACCESS_MEMORY_CMDID,
+
+ /* Commands to Target */
+ WMI_DISABLE_INTR_CMDID,
+ WMI_ENABLE_INTR_CMDID,
+ WMI_RX_LINK_CMDID,
+ WMI_ATH_INIT_CMDID,
+ WMI_ABORT_TXQ_CMDID,
+ WMI_STOP_TX_DMA_CMDID,
+ WMI_STOP_DMA_RECV_CMDID,
+ WMI_ABORT_TX_DMA_CMDID,
+ WMI_DRAIN_TXQ_CMDID,
+ WMI_DRAIN_TXQ_ALL_CMDID,
+ WMI_START_RECV_CMDID,
+ WMI_STOP_RECV_CMDID,
+ WMI_FLUSH_RECV_CMDID,
+ WMI_SET_MODE_CMDID,
+ WMI_RESET_CMDID,
+ WMI_NODE_CREATE_CMDID,
+ WMI_NODE_REMOVE_CMDID,
+ WMI_VAP_REMOVE_CMDID,
+ WMI_VAP_CREATE_CMDID,
+ WMI_BEACON_UPDATE_CMDID,
+ WMI_REG_READ_CMDID,
+ WMI_REG_WRITE_CMDID,
+ WMI_RC_STATE_CHANGE_CMDID,
+ WMI_RC_RATE_UPDATE_CMDID,
+ WMI_DEBUG_INFO_CMDID,
+ WMI_HOST_ATTACH,
+ WMI_TARGET_IC_UPDATE_CMDID,
+ WMI_TGT_STATS_CMDID,
+ WMI_TX_AGGR_ENABLE_CMDID,
+ WMI_TGT_DETACH_CMDID,
+ WMI_TGT_TXQ_ENABLE_CMDID,
+};
+
+enum wmi_event_id {
+ WMI_TGT_RDY_EVENTID = 0x1001,
+ WMI_SWBA_EVENTID,
+ WMI_FATAL_EVENTID,
+ WMI_TXTO_EVENTID,
+ WMI_BMISS_EVENTID,
+ WMI_WLAN_TXCOMP_EVENTID,
+ WMI_DELBA_EVENTID,
+ WMI_TXRATE_EVENTID,
+};
+
+#define MAX_CMD_NUMBER 62
+
+struct register_write {
+ __be32 reg;
+ __be32 val;
+};
+
+struct wmi {
+ struct ath9k_htc_priv *drv_priv;
+ struct htc_target *htc;
+ enum htc_endpoint_id ctrl_epid;
+ struct mutex op_mutex;
+ struct completion cmd_wait;
+ enum wmi_cmd_id last_cmd_id;
+ u16 tx_seq_id;
+ u8 *cmd_rsp_buf;
+ u32 cmd_rsp_len;
+ bool stopped;
+
+ struct sk_buff *wmi_skb;
+ spinlock_t wmi_lock;
+
+ atomic_t mwrite_cnt;
+ struct register_write multi_write[MAX_CMD_NUMBER];
+ u32 multi_write_idx;
+ struct mutex multi_write_mutex;
+};
+
+struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
+void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
+int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ enum htc_endpoint_id *wmi_ctrl_epid);
+int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ u8 *cmd_buf, u32 cmd_len,
+ u8 *rsp_buf, u32 rsp_len,
+ u32 timeout);
+void ath9k_wmi_tasklet(unsigned long data);
+
+#define WMI_CMD(_wmi_cmd) \
+ do { \
+ ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, NULL, 0, \
+ (u8 *) &cmd_rsp, \
+ sizeof(cmd_rsp), HZ*2); \
+ } while (0)
+
+#define WMI_CMD_BUF(_wmi_cmd, _buf) \
+ do { \
+ ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, \
+ (u8 *) _buf, sizeof(*_buf), \
+ &cmd_rsp, sizeof(cmd_rsp), HZ*2); \
+ } while (0)
+
+#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 294b486..3db1917 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -15,10 +15,11 @@
*/
#include "ath9k.h"
+#include "ar9003_mac.h"
#define BITS_PER_BYTE 8
#define OFDM_PLCP_BITS 22
-#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
+#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
#define L_STF 8
#define L_LTF 8
@@ -33,7 +34,7 @@
#define OFDM_SIFS_TIME 16
-static u32 bits_per_symbol[][2] = {
+static u16 bits_per_symbol[][2] = {
/* 20MHz 40MHz */
{ 26, 54 }, /* 0: BPSK */
{ 52, 108 }, /* 1: QPSK 1/2 */
@@ -43,14 +44,6 @@ static u32 bits_per_symbol[][2] = {
{ 208, 432 }, /* 5: 64-QAM 2/3 */
{ 234, 486 }, /* 6: 64-QAM 3/4 */
{ 260, 540 }, /* 7: 64-QAM 5/6 */
- { 52, 108 }, /* 8: BPSK */
- { 104, 216 }, /* 9: QPSK 1/2 */
- { 156, 324 }, /* 10: QPSK 3/4 */
- { 208, 432 }, /* 11: 16-QAM 1/2 */
- { 312, 648 }, /* 12: 16-QAM 3/4 */
- { 416, 864 }, /* 13: 64-QAM 2/3 */
- { 468, 972 }, /* 14: 64-QAM 3/4 */
- { 520, 1080 }, /* 15: 64-QAM 5/6 */
};
#define IS_HT_RATE(_rate) ((_rate) & 0x80)
@@ -59,40 +52,50 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid,
struct list_head *bf_head);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_txq *txq,
- struct list_head *bf_q,
- int txok, int sendbar);
+ struct ath_txq *txq, struct list_head *bf_q,
+ struct ath_tx_status *ts, int txok, int sendbar);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head);
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
- int txok);
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
+ struct ath_tx_status *ts, int txok);
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
int nbad, int txok, bool update_rc);
enum {
- MCS_DEFAULT,
+ MCS_HT20,
+ MCS_HT20_SGI,
MCS_HT40,
MCS_HT40_SGI,
};
-static int ath_max_4ms_framelen[3][16] = {
- [MCS_DEFAULT] = {
- 3216, 6434, 9650, 12868, 19304, 25740, 28956, 32180,
- 6430, 12860, 19300, 25736, 38600, 51472, 57890, 64320,
+static int ath_max_4ms_framelen[4][32] = {
+ [MCS_HT20] = {
+ 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
+ 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
+ 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
+ 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
+ },
+ [MCS_HT20_SGI] = {
+ 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
+ 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
+ 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
+ 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
},
[MCS_HT40] = {
- 6684, 13368, 20052, 26738, 40104, 53476, 60156, 66840,
- 13360, 26720, 40080, 53440, 80160, 106880, 120240, 133600,
+ 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
+ 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
+ 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
+ 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
},
[MCS_HT40_SGI] = {
- /* TODO: Only MCS 7 and 15 updated, recalculate the rest */
- 6684, 13368, 20052, 26738, 40104, 53476, 60156, 74200,
- 13360, 26720, 40080, 53440, 80160, 106880, 120240, 148400,
+ 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
+ 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
+ 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
+ 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
}
};
-
/*********************/
/* Aggregation logic */
/*********************/
@@ -223,6 +226,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
{
struct ath_buf *bf;
struct list_head bf_head;
+ struct ath_tx_status ts;
+
+ memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
for (;;) {
@@ -236,7 +242,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_update_baw(sc, tid, bf->bf_seqno);
spin_unlock(&txq->axq_lock);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
spin_lock(&txq->axq_lock);
}
@@ -259,25 +265,46 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
}
-static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
+static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
{
- struct ath_buf *tbf;
+ struct ath_buf *bf = NULL;
spin_lock_bh(&sc->tx.txbuflock);
- if (WARN_ON(list_empty(&sc->tx.txbuf))) {
+
+ if (unlikely(list_empty(&sc->tx.txbuf))) {
spin_unlock_bh(&sc->tx.txbuflock);
return NULL;
}
- tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
- list_del(&tbf->list);
+
+ bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
+ list_del(&bf->list);
+
spin_unlock_bh(&sc->tx.txbuflock);
+ return bf;
+}
+
+static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
+{
+ spin_lock_bh(&sc->tx.txbuflock);
+ list_add_tail(&bf->list, &sc->tx.txbuf);
+ spin_unlock_bh(&sc->tx.txbuflock);
+}
+
+static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ath_buf *tbf;
+
+ tbf = ath_tx_get_buffer(sc);
+ if (WARN_ON(!tbf))
+ return NULL;
+
ATH_TXBUF_RESET(tbf);
tbf->aphy = bf->aphy;
tbf->bf_mpdu = bf->bf_mpdu;
tbf->bf_buf_addr = bf->bf_buf_addr;
- *(tbf->bf_desc) = *(bf->bf_desc);
+ memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
tbf->bf_state = bf->bf_state;
tbf->bf_dmacontext = bf->bf_dmacontext;
@@ -286,7 +313,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
- int txok)
+ struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
@@ -296,7 +323,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
- struct ath_desc *ds = bf_last->bf_desc;
struct list_head bf_head, bf_pending;
u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
@@ -325,10 +351,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
memset(ba, 0, WME_BA_BMP_SIZE >> 3);
if (isaggr && txok) {
- if (ATH_DS_TX_BA(ds)) {
- seq_st = ATH_DS_BA_SEQ(ds);
- memcpy(ba, ATH_DS_BA_BITMAP(ds),
- WME_BA_BMP_SIZE >> 3);
+ if (ts->ts_flags & ATH9K_TX_BA) {
+ seq_st = ts->ts_seqnum;
+ memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
} else {
/*
* AR5416 can become deaf/mute when BA
@@ -345,7 +370,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_pending);
INIT_LIST_HEAD(&bf_head);
- nbad = ath_tx_num_badfrms(sc, bf, txok);
+ nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
while (bf) {
txfail = txpending = 0;
bf_next = bf->bf_next;
@@ -359,7 +384,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
acked_cnt++;
} else {
if (!(tid->state & AGGR_CLEANUP) &&
- ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
+ !bf_last->bf_tx_aborted) {
if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
ath_tx_set_retry(sc, txq, bf);
txpending = 1;
@@ -378,7 +403,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
}
- if (bf_next == NULL) {
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ bf_next == NULL) {
/*
* Make sure the last desc is reclaimed if it
* not a holding desc.
@@ -402,45 +428,53 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
- ath_tx_rc_status(bf, ds, nbad, txok, true);
+ ath_tx_rc_status(bf, ts, nbad, txok, true);
rc_update = false;
} else {
- ath_tx_rc_status(bf, ds, nbad, txok, false);
+ ath_tx_rc_status(bf, ts, nbad, txok, false);
}
- ath_tx_complete_buf(sc, bf, txq, &bf_head, !txfail, sendbar);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
+ !txfail, sendbar);
} else {
/* retry the un-acked ones */
- if (bf->bf_next == NULL && bf_last->bf_stale) {
- struct ath_buf *tbf;
-
- tbf = ath_clone_txbuf(sc, bf_last);
- /*
- * Update tx baw and complete the frame with
- * failed status if we run out of tx buf
- */
- if (!tbf) {
- spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid,
- bf->bf_seqno);
- spin_unlock_bh(&txq->axq_lock);
-
- bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, ds, nbad,
- 0, false);
- ath_tx_complete_buf(sc, bf, txq,
- &bf_head, 0, 0);
- break;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
+ if (bf->bf_next == NULL && bf_last->bf_stale) {
+ struct ath_buf *tbf;
+
+ tbf = ath_clone_txbuf(sc, bf_last);
+ /*
+ * Update tx baw and complete the
+ * frame with failed status if we
+ * run out of tx buf.
+ */
+ if (!tbf) {
+ spin_lock_bh(&txq->axq_lock);
+ ath_tx_update_baw(sc, tid,
+ bf->bf_seqno);
+ spin_unlock_bh(&txq->axq_lock);
+
+ bf->bf_state.bf_type |=
+ BUF_XRETRY;
+ ath_tx_rc_status(bf, ts, nbad,
+ 0, false);
+ ath_tx_complete_buf(sc, bf, txq,
+ &bf_head,
+ ts, 0, 0);
+ break;
+ }
+
+ ath9k_hw_cleartxdesc(sc->sc_ah,
+ tbf->bf_desc);
+ list_add_tail(&tbf->list, &bf_head);
+ } else {
+ /*
+ * Clear descriptor status words for
+ * software retry
+ */
+ ath9k_hw_cleartxdesc(sc->sc_ah,
+ bf->bf_desc);
}
-
- ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
- list_add_tail(&tbf->list, &bf_head);
- } else {
- /*
- * Clear descriptor status words for
- * software retry
- */
- ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
}
/*
@@ -508,12 +542,13 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
break;
}
- if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
- modeidx = MCS_HT40_SGI;
- else if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
modeidx = MCS_HT40;
else
- modeidx = MCS_DEFAULT;
+ modeidx = MCS_HT20;
+
+ if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ modeidx++;
frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
max_4ms_framelen = min(max_4ms_framelen, frmlen);
@@ -558,7 +593,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
u32 nsymbits, nsymbols;
u16 minlen;
u8 flags, rix;
- int width, half_gi, ndelim, mindelim;
+ int width, streams, half_gi, ndelim, mindelim;
/* Select standard number of delimiters based on frame length alone */
ndelim = ATH_AGGR_GET_NDELIM(frmlen);
@@ -598,7 +633,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
if (nsymbols == 0)
nsymbols = 1;
- nsymbits = bits_per_symbol[rix][width];
+ streams = HT_RC_2_STREAMS(rix);
+ nsymbits = bits_per_symbol[rix % 8][width] * streams;
minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
if (frmlen < minlen) {
@@ -664,7 +700,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
bpad = PADBYTES(al_delta) + (ndelim << 2);
bf->bf_next = NULL;
- bf->bf_desc->ds_link = 0;
+ ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
/* link buffers of this frame to the aggregate */
ath_tx_addto_baw(sc, tid, bf);
@@ -672,7 +708,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
list_move_tail(&bf->list, bf_q);
if (bf_prev) {
bf_prev->bf_next = bf;
- bf_prev->bf_desc->ds_link = bf->bf_daddr;
+ ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
+ bf->bf_daddr);
}
bf_prev = bf;
@@ -752,8 +789,11 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
+ struct ath_tx_status ts;
struct ath_buf *bf;
struct list_head bf_head;
+
+ memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
if (txtid->state & AGGR_CLEANUP)
@@ -780,7 +820,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
}
list_move_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, txtid, bf->bf_seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
}
spin_unlock_bh(&txq->axq_lock);
@@ -849,7 +889,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_tx_queue_info qi;
- int qnum;
+ int qnum, i;
memset(&qi, 0, sizeof(qi));
qi.tqi_subtype = subtype;
@@ -873,11 +913,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
* The UAPSD queue is an exception, since we take a desc-
* based intr on the EOSP frames.
*/
- if (qtype == ATH9K_TX_QUEUE_UAPSD)
- qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
- else
- qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
- TXQ_FLAG_TXDESCINT_ENABLE;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
+ TXQ_FLAG_TXERRINT_ENABLE;
+ } else {
+ if (qtype == ATH9K_TX_QUEUE_UAPSD)
+ qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
+ else
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
+ TXQ_FLAG_TXDESCINT_ENABLE;
+ }
qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
if (qnum == -1) {
/*
@@ -904,6 +949,11 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->axq_depth = 0;
txq->axq_tx_inprogress = false;
sc->tx.txqsetup |= 1<<qnum;
+
+ txq->txq_headidx = txq->txq_tailidx = 0;
+ for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
+ INIT_LIST_HEAD(&txq->txq_fifo[i]);
+ INIT_LIST_HEAD(&txq->txq_fifo_pending);
}
return &sc->tx.txq[qnum];
}
@@ -1028,45 +1078,63 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
+ struct ath_tx_status ts;
+ memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
for (;;) {
spin_lock_bh(&txq->axq_lock);
- if (list_empty(&txq->axq_q)) {
- txq->axq_link = NULL;
- spin_unlock_bh(&txq->axq_lock);
- break;
- }
-
- bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
+ txq->txq_headidx = txq->txq_tailidx = 0;
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ } else {
+ bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
+ struct ath_buf, list);
+ }
+ } else {
+ if (list_empty(&txq->axq_q)) {
+ txq->axq_link = NULL;
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ }
+ bf = list_first_entry(&txq->axq_q, struct ath_buf,
+ list);
- if (bf->bf_stale) {
- list_del(&bf->list);
- spin_unlock_bh(&txq->axq_lock);
+ if (bf->bf_stale) {
+ list_del(&bf->list);
+ spin_unlock_bh(&txq->axq_lock);
- spin_lock_bh(&sc->tx.txbuflock);
- list_add_tail(&bf->list, &sc->tx.txbuf);
- spin_unlock_bh(&sc->tx.txbuflock);
- continue;
+ ath_tx_return_buffer(sc, bf);
+ continue;
+ }
}
lastbf = bf->bf_lastbf;
if (!retry_tx)
- lastbf->bf_desc->ds_txstat.ts_flags =
- ATH9K_TX_SW_ABORTED;
+ lastbf->bf_tx_aborted = true;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ list_cut_position(&bf_head,
+ &txq->txq_fifo[txq->txq_tailidx],
+ &lastbf->list);
+ INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
+ } else {
+ /* remove ath_buf's of the same mpdu from txq */
+ list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
+ }
- /* remove ath_buf's of the same mpdu from txq */
- list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
txq->axq_depth--;
spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
}
spin_lock_bh(&txq->axq_lock);
@@ -1081,6 +1149,27 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
spin_unlock_bh(&txq->axq_lock);
}
}
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ spin_lock_bh(&txq->axq_lock);
+ while (!list_empty(&txq->txq_fifo_pending)) {
+ bf = list_first_entry(&txq->txq_fifo_pending,
+ struct ath_buf, list);
+ list_cut_position(&bf_head,
+ &txq->txq_fifo_pending,
+ &bf->bf_lastbf->list);
+ spin_unlock_bh(&txq->axq_lock);
+
+ if (bf_isampdu(bf))
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head,
+ &ts, 0);
+ else
+ ath_tx_complete_buf(sc, bf, txq, &bf_head,
+ &ts, 0, 0);
+ spin_lock_bh(&txq->axq_lock);
+ }
+ spin_unlock_bh(&txq->axq_lock);
+ }
}
void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1218,44 +1307,47 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
bf = list_first_entry(head, struct ath_buf, list);
- list_splice_tail_init(head, &txq->axq_q);
- txq->axq_depth++;
-
ath_print(common, ATH_DBG_QUEUE,
"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
- if (txq->axq_link == NULL) {
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
+ list_splice_tail_init(head, &txq->txq_fifo_pending);
+ return;
+ }
+ if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
+ ath_print(common, ATH_DBG_XMIT,
+ "Initializing tx fifo %d which "
+ "is non-empty\n",
+ txq->txq_headidx);
+ INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
+ list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
+ INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
ath_print(common, ATH_DBG_XMIT,
"TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
} else {
- *txq->axq_link = bf->bf_daddr;
- ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
- txq->axq_qnum, txq->axq_link,
- ito64(bf->bf_daddr), bf->bf_desc);
- }
- txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
- ath9k_hw_txstart(ah, txq->axq_qnum);
-}
+ list_splice_tail_init(head, &txq->axq_q);
-static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
-{
- struct ath_buf *bf = NULL;
-
- spin_lock_bh(&sc->tx.txbuflock);
-
- if (unlikely(list_empty(&sc->tx.txbuf))) {
- spin_unlock_bh(&sc->tx.txbuflock);
- return NULL;
+ if (txq->axq_link == NULL) {
+ ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+ ath_print(common, ATH_DBG_XMIT,
+ "TXDP[%u] = %llx (%p)\n",
+ txq->axq_qnum, ito64(bf->bf_daddr),
+ bf->bf_desc);
+ } else {
+ *txq->axq_link = bf->bf_daddr;
+ ath_print(common, ATH_DBG_XMIT,
+ "link[%u] (%p)=%llx (%p)\n",
+ txq->axq_qnum, txq->axq_link,
+ ito64(bf->bf_daddr), bf->bf_desc);
+ }
+ ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
+ &txq->axq_link);
+ ath9k_hw_txstart(ah, txq->axq_qnum);
}
-
- bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
- list_del(&bf->list);
-
- spin_unlock_bh(&sc->tx.txbuflock);
-
- return bf;
+ txq->axq_depth++;
}
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -1402,8 +1494,7 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
}
-static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
- struct ath_txq *txq)
+static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
int flags = 0;
@@ -1414,6 +1505,9 @@ static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
flags |= ATH9K_TXDESC_NOACK;
+ if (use_ldpc)
+ flags |= ATH9K_TXDESC_LDPC;
+
return flags;
}
@@ -1432,8 +1526,9 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
/* find number of symbols: PLCP + data */
+ streams = HT_RC_2_STREAMS(rix);
nbits = (pktlen << 3) + OFDM_PLCP_BITS;
- nsymbits = bits_per_symbol[rix][width];
+ nsymbits = bits_per_symbol[rix % 8][width] * streams;
nsymbols = (nbits + nsymbits - 1) / nsymbits;
if (!half_gi)
@@ -1442,7 +1537,6 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
duration = SYMBOL_TIME_HALFGI(nsymbols);
/* addup duration for legacy/ht training and signal fields */
- streams = HT_RC_2_STREAMS(rix);
duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
return duration;
@@ -1513,6 +1607,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
series[i].Rate = rix | 0x80;
series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
is_40, is_sgi, is_sp);
+ if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
+ series[i].RateFlags |= ATH9K_RATESERIES_STBC;
continue;
}
@@ -1565,15 +1661,16 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
int hdrlen;
__le16 fc;
int padpos, padsize;
+ bool use_ldpc = false;
tx_info->pad[0] = 0;
switch (txctl->frame_type) {
- case ATH9K_NOT_INTERNAL:
+ case ATH9K_IFT_NOT_INTERNAL:
break;
- case ATH9K_INT_PAUSE:
+ case ATH9K_IFT_PAUSE:
tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
/* fall through */
- case ATH9K_INT_UNPAUSE:
+ case ATH9K_IFT_UNPAUSE:
tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
break;
}
@@ -1591,10 +1688,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_frmlen -= padsize;
}
- if (conf_is_ht(&hw->conf))
+ if (conf_is_ht(&hw->conf)) {
bf->bf_state.bf_type |= BUF_HT;
+ if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
+ use_ldpc = true;
+ }
- bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
+ bf->bf_flags = setup_tx_flags(skb, use_ldpc);
bf->bf_keytype = get_hw_crypto_keytype(skb);
if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
@@ -1653,8 +1753,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
list_add_tail(&bf->list, &bf_head);
ds = bf->bf_desc;
- ds->ds_link = 0;
- ds->ds_data = bf->bf_buf_addr;
+ ath9k_hw_set_desc_link(ah, ds, 0);
ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
@@ -1663,7 +1762,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
skb->len, /* segment length */
true, /* first segment */
true, /* last segment */
- ds); /* first descriptor */
+ ds, /* first descriptor */
+ bf->bf_buf_addr,
+ txctl->txq->axq_qnum);
spin_lock_bh(&txctl->txq->axq_lock);
@@ -1732,9 +1833,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
}
spin_unlock_bh(&txq->axq_lock);
- spin_lock_bh(&sc->tx.txbuflock);
- list_add_tail(&bf->list, &sc->tx.txbuf);
- spin_unlock_bh(&sc->tx.txbuflock);
+ ath_tx_return_buffer(sc, bf);
return r;
}
@@ -1852,9 +1951,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_txq *txq,
- struct list_head *bf_q,
- int txok, int sendbar)
+ struct ath_txq *txq, struct list_head *bf_q,
+ struct ath_tx_status *ts, int txok, int sendbar)
{
struct sk_buff *skb = bf->bf_mpdu;
unsigned long flags;
@@ -1872,7 +1970,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
ath_tx_complete(sc, skb, bf->aphy, tx_flags);
- ath_debug_stat_tx(sc, txq, bf);
+ ath_debug_stat_tx(sc, txq, bf, ts);
/*
* Return the list of ath_buf of this mpdu to free queue
@@ -1883,23 +1981,21 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
}
static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
- int txok)
+ struct ath_tx_status *ts, int txok)
{
- struct ath_buf *bf_last = bf->bf_lastbf;
- struct ath_desc *ds = bf_last->bf_desc;
u16 seq_st = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
int ba_index;
int nbad = 0;
int isaggr = 0;
- if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
+ if (bf->bf_tx_aborted)
return 0;
isaggr = bf_isaggr(bf);
if (isaggr) {
- seq_st = ATH_DS_BA_SEQ(ds);
- memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
+ seq_st = ts->ts_seqnum;
+ memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
}
while (bf) {
@@ -1913,7 +2009,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
return nbad;
}
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
int nbad, int txok, bool update_rc)
{
struct sk_buff *skb = bf->bf_mpdu;
@@ -1923,24 +2019,24 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
u8 i, tx_rateindex;
if (txok)
- tx_info->status.ack_signal = ds->ds_txstat.ts_rssi;
+ tx_info->status.ack_signal = ts->ts_rssi;
- tx_rateindex = ds->ds_txstat.ts_rateindex;
+ tx_rateindex = ts->ts_rateindex;
WARN_ON(tx_rateindex >= hw->max_rates);
- if (update_rc)
- tx_info->pad[0] |= ATH_TX_INFO_UPDATE_RC;
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
+ if (ts->ts_status & ATH9K_TXERR_FILT)
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
- if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
+ if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
if (ieee80211_is_data(hdr->frame_control)) {
- if (ds->ds_txstat.ts_flags &
+ if (ts->ts_flags &
(ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
- if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) ||
- (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO))
+ if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
+ (ts->ts_status & ATH9K_TXERR_FIFO))
tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
tx_info->status.ampdu_len = bf->bf_nframes;
tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
@@ -1978,6 +2074,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_buf *bf, *lastbf, *bf_held = NULL;
struct list_head bf_head;
struct ath_desc *ds;
+ struct ath_tx_status ts;
int txok;
int status;
@@ -2017,7 +2114,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
lastbf = bf->bf_lastbf;
ds = lastbf->bf_desc;
- status = ath9k_hw_txprocdesc(ah, ds);
+ memset(&ts, 0, sizeof(ts));
+ status = ath9k_hw_txprocdesc(ah, ds, &ts);
if (status == -EINPROGRESS) {
spin_unlock_bh(&txq->axq_lock);
break;
@@ -2028,7 +2126,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* can disable RX.
*/
if (bf->bf_isnullfunc &&
- (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
+ (ts.ts_status & ATH9K_TX_ACKED)) {
if ((sc->ps_flags & PS_ENABLED))
ath9k_enable_ps(sc);
else
@@ -2047,31 +2145,30 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
&txq->axq_q, lastbf->list.prev);
txq->axq_depth--;
- txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
+ txok = !(ts.ts_status & ATH9K_TXERR_MASK);
txq->axq_tx_inprogress = false;
+ if (bf_held)
+ list_del(&bf_held->list);
spin_unlock_bh(&txq->axq_lock);
- if (bf_held) {
- spin_lock_bh(&sc->tx.txbuflock);
- list_move_tail(&bf_held->list, &sc->tx.txbuf);
- spin_unlock_bh(&sc->tx.txbuflock);
- }
+ if (bf_held)
+ ath_tx_return_buffer(sc, bf_held);
if (!bf_isampdu(bf)) {
/*
* This frame is sent out as a single frame.
* Use hardware retry status for this frame.
*/
- bf->bf_retries = ds->ds_txstat.ts_longretry;
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
+ bf->bf_retries = ts.ts_longretry;
+ if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, ds, 0, txok, true);
+ ath_tx_rc_status(bf, &ts, 0, txok, true);
}
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, txok, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
ath_wake_mac80211_queue(sc, txq);
@@ -2133,10 +2230,121 @@ void ath_tx_tasklet(struct ath_softc *sc)
}
}
+void ath_tx_edma_tasklet(struct ath_softc *sc)
+{
+ struct ath_tx_status txs;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_txq *txq;
+ struct ath_buf *bf, *lastbf;
+ struct list_head bf_head;
+ int status;
+ int txok;
+
+ for (;;) {
+ status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
+ if (status == -EINPROGRESS)
+ break;
+ if (status == -EIO) {
+ ath_print(common, ATH_DBG_XMIT,
+ "Error processing tx status\n");
+ break;
+ }
+
+ /* Skip beacon completions */
+ if (txs.qid == sc->beacon.beaconq)
+ continue;
+
+ txq = &sc->tx.txq[txs.qid];
+
+ spin_lock_bh(&txq->axq_lock);
+ if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
+ spin_unlock_bh(&txq->axq_lock);
+ return;
+ }
+
+ bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
+ struct ath_buf, list);
+ lastbf = bf->bf_lastbf;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
+ &lastbf->list);
+ INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
+ txq->axq_depth--;
+ txq->axq_tx_inprogress = false;
+ spin_unlock_bh(&txq->axq_lock);
+
+ txok = !(txs.ts_status & ATH9K_TXERR_MASK);
+
+ if (!bf_isampdu(bf)) {
+ bf->bf_retries = txs.ts_longretry;
+ if (txs.ts_status & ATH9K_TXERR_XRETRY)
+ bf->bf_state.bf_type |= BUF_XRETRY;
+ ath_tx_rc_status(bf, &txs, 0, txok, true);
+ }
+
+ if (bf_isampdu(bf))
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
+ else
+ ath_tx_complete_buf(sc, bf, txq, &bf_head,
+ &txs, txok, 0);
+
+ ath_wake_mac80211_queue(sc, txq);
+
+ spin_lock_bh(&txq->axq_lock);
+ if (!list_empty(&txq->txq_fifo_pending)) {
+ INIT_LIST_HEAD(&bf_head);
+ bf = list_first_entry(&txq->txq_fifo_pending,
+ struct ath_buf, list);
+ list_cut_position(&bf_head, &txq->txq_fifo_pending,
+ &bf->bf_lastbf->list);
+ ath_tx_txqaddbuf(sc, txq, &bf_head);
+ } else if (sc->sc_flags & SC_OP_TXAGGR)
+ ath_txq_schedule(sc, txq);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+}
+
/*****************/
/* Init, Cleanup */
/*****************/
+static int ath_txstatus_setup(struct ath_softc *sc, int size)
+{
+ struct ath_descdma *dd = &sc->txsdma;
+ u8 txs_len = sc->sc_ah->caps.txs_len;
+
+ dd->dd_desc_len = size * txs_len;
+ dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (!dd->dd_desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ath_tx_edma_init(struct ath_softc *sc)
+{
+ int err;
+
+ err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
+ if (!err)
+ ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
+ sc->txsdma.dd_desc_paddr,
+ ATH_TXSTATUS_RING_SIZE);
+
+ return err;
+}
+
+static void ath_tx_edma_cleanup(struct ath_softc *sc)
+{
+ struct ath_descdma *dd = &sc->txsdma;
+
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+}
+
int ath_tx_init(struct ath_softc *sc, int nbufs)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2145,7 +2353,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
spin_lock_init(&sc->tx.txbuflock);
error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
- "tx", nbufs, 1);
+ "tx", nbufs, 1, 1);
if (error != 0) {
ath_print(common, ATH_DBG_FATAL,
"Failed to allocate tx descriptors: %d\n", error);
@@ -2153,7 +2361,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
}
error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
- "beacon", ATH_BCBUF, 1);
+ "beacon", ATH_BCBUF, 1, 1);
if (error != 0) {
ath_print(common, ATH_DBG_FATAL,
"Failed to allocate beacon descriptors: %d\n", error);
@@ -2162,6 +2370,12 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ error = ath_tx_edma_init(sc);
+ if (error)
+ goto err;
+ }
+
err:
if (error != 0)
ath_tx_cleanup(sc);
@@ -2176,6 +2390,9 @@ void ath_tx_cleanup(struct ath_softc *sc)
if (sc->tx.txdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_tx_edma_cleanup(sc);
}
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index 8263633..873bf52 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -59,6 +59,7 @@ enum ATH_DEBUG {
ATH_DBG_PS = 0x00000800,
ATH_DBG_HWTIMER = 0x00001000,
ATH_DBG_BTCOEX = 0x00002000,
+ ATH_DBG_WMI = 0x00004000,
ATH_DBG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index ecc9eb0..a8f81ea 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -19,8 +19,8 @@
#include "ath.h"
#include "reg.h"
-#define REG_READ common->ops->read
-#define REG_WRITE common->ops->write
+#define REG_READ (common->ops->read)
+#define REG_WRITE (common->ops->write)
/**
* ath_hw_set_bssid_mask - filter out bssids we listen
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 00489c4..3f4244f 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -50,6 +50,7 @@
#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
ATH9K_5GHZ_5470_5850
+
/* This one skips what we call "mid band" */
#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
ATH9K_5GHZ_5725_5850
@@ -332,7 +333,6 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
ath_reg_apply_active_scan_flags(wiphy, initiator);
break;
}
- return;
}
int ath_reg_notifier_apply(struct wiphy *wiphy,
@@ -360,7 +360,7 @@ EXPORT_SYMBOL(ath_reg_notifier_apply);
static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
{
- u16 rd = ath_regd_get_eepromRD(reg);
+ u16 rd = ath_regd_get_eepromRD(reg);
int i;
if (rd & COUNTRY_ERD_FLAG) {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 3edbbcf..c8f7090 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -865,7 +865,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
/* low bit of first byte of destination tells us if broadcast */
tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += len;
spin_unlock_irqrestore(&priv->irqlock, flags);
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index b8807fb..3a003e6 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -104,6 +104,7 @@
#define B43_MMIO_MACFILTER_CONTROL 0x420
#define B43_MMIO_MACFILTER_DATA 0x422
#define B43_MMIO_RCMTA_COUNT 0x43C
+#define B43_MMIO_PSM_PHY_HDR 0x492
#define B43_MMIO_RADIO_HWENABLED_LO 0x49A
#define B43_MMIO_GPIO_CONTROL 0x49C
#define B43_MMIO_GPIO_MASK 0x49E
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 9a374ef..7965b70 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4349,11 +4349,10 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
b43_set_phytxctl_defaults(dev);
/* Minimum Contention Window */
- if (phy->type == B43_PHYTYPE_B) {
+ if (phy->type == B43_PHYTYPE_B)
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F);
- } else {
+ else
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF);
- }
/* Maximum Contention Window */
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
@@ -4572,6 +4571,23 @@ static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw)
mutex_unlock(&wl->mutex);
}
+static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct b43_wl *wl = hw_to_b43_wl(hw);
+ struct b43_wldev *dev = wl->current_dev;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = dev->stats.link_noise;
+
+ return 0;
+}
+
static const struct ieee80211_ops b43_hw_ops = {
.tx = b43_op_tx,
.conf_tx = b43_op_conf_tx,
@@ -4591,6 +4607,7 @@ static const struct ieee80211_ops b43_hw_ops = {
.sta_notify = b43_op_sta_notify,
.sw_scan_start = b43_op_sw_scan_start_notifier,
.sw_scan_complete = b43_op_sw_scan_complete_notifier,
+ .get_survey = b43_op_get_survey,
.rfkill_poll = b43_rfkill_poll,
};
@@ -4906,8 +4923,7 @@ static int b43_wireless_init(struct ssb_device *dev)
/* fill hw info */
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 9c7cd28..3d6b337 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -73,6 +73,22 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
u16 value, u8 core, bool off);
static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
u16 value, u8 core);
+static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel);
+
+static inline bool b43_empty_chanspec(struct b43_chanspec *chanspec)
+{
+ return !chanspec->channel && !chanspec->sideband &&
+ !chanspec->b_width && !chanspec->b_freq;
+}
+
+static inline bool b43_eq_chanspecs(struct b43_chanspec *chanspec1,
+ struct b43_chanspec *chanspec2)
+{
+ return (chanspec1->channel == chanspec2->channel &&
+ chanspec1->sideband == chanspec2->sideband &&
+ chanspec1->b_width == chanspec2->b_width &&
+ chanspec1->b_freq == chanspec2->b_freq);
+}
void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
{//TODO
@@ -89,34 +105,44 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
}
static void b43_chantab_radio_upload(struct b43_wldev *dev,
- const struct b43_nphy_channeltab_entry *e)
-{
- b43_radio_write16(dev, B2055_PLL_REF, e->radio_pll_ref);
- b43_radio_write16(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
- b43_radio_write16(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
- b43_radio_write16(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
- b43_radio_write16(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
- b43_radio_write16(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
- b43_radio_write16(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
- b43_radio_write16(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
- b43_radio_write16(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
- b43_radio_write16(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
- b43_radio_write16(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
- b43_radio_write16(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
- b43_radio_write16(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
- b43_radio_write16(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
- b43_radio_write16(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
- b43_radio_write16(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
- b43_radio_write16(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
- b43_radio_write16(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
- b43_radio_write16(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
- b43_radio_write16(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
- b43_radio_write16(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
- b43_radio_write16(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
+ const struct b43_nphy_channeltab_entry_rev2 *e)
+{
+ b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
+ b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
+ b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
+ b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
+ b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
+ b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
+ b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
+ b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
+ b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
+ b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
+ b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
+ b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
+ b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
+ b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
+ b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
}
static void b43_chantab_phy_upload(struct b43_wldev *dev,
- const struct b43_nphy_channeltab_entry *e)
+ const struct b43_phy_n_sfo_cfg *e)
{
b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
@@ -131,34 +157,20 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
//TODO
}
-/* Tune the hardware to a new channel. */
-static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
-{
- const struct b43_nphy_channeltab_entry *tabent;
- tabent = b43_nphy_get_chantabent(dev, channel);
- if (!tabent)
- return -ESRCH;
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
+static void b43_radio_2055_setup(struct b43_wldev *dev,
+ const struct b43_nphy_channeltab_entry_rev2 *e)
+{
+ B43_WARN_ON(dev->phy.rev >= 3);
- //FIXME enable/disable band select upper20 in RXCTL
- if (0 /*FIXME 5Ghz*/)
- b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x20);
- else
- b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x50);
- b43_chantab_radio_upload(dev, tabent);
+ b43_chantab_radio_upload(dev, e);
udelay(50);
- b43_radio_write16(dev, B2055_VCO_CAL10, 5);
- b43_radio_write16(dev, B2055_VCO_CAL10, 45);
- b43_radio_write16(dev, B2055_VCO_CAL10, 65);
+ b43_radio_write(dev, B2055_VCO_CAL10, 0x05);
+ b43_radio_write(dev, B2055_VCO_CAL10, 0x45);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ b43_radio_write(dev, B2055_VCO_CAL10, 0x65);
udelay(300);
- if (0 /*FIXME 5Ghz*/)
- b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
- else
- b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
- b43_chantab_phy_upload(dev, tabent);
- b43_nphy_tx_power_fix(dev);
-
- return 0;
}
static void b43_radio_init2055_pre(struct b43_wldev *dev)
@@ -174,52 +186,64 @@ static void b43_radio_init2055_pre(struct b43_wldev *dev)
static void b43_radio_init2055_post(struct b43_wldev *dev)
{
+ struct b43_phy_n *nphy = dev->phy.n;
struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
struct ssb_boardinfo *binfo = &(dev->dev->bus->boardinfo);
int i;
u16 val;
+ bool workaround = false;
+
+ if (sprom->revision < 4)
+ workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM ||
+ binfo->type != 0x46D ||
+ binfo->rev < 0x41);
+ else
+ workaround = ((sprom->boardflags_hi & B43_BFH_NOPA) == 0);
b43_radio_mask(dev, B2055_MASTER1, 0xFFF3);
- msleep(1);
- if ((sprom->revision != 4) ||
- !(sprom->boardflags_hi & B43_BFH_RSSIINV)) {
- if ((binfo->vendor != PCI_VENDOR_ID_BROADCOM) ||
- (binfo->type != 0x46D) ||
- (binfo->rev < 0x41)) {
- b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
- b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
- msleep(1);
- }
+ if (workaround) {
+ b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
+ b43_radio_mask(dev, B2055_C2_RX_BB_REG, 0x7F);
}
- b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0x3F, 0x2C);
- msleep(1);
- b43_radio_write16(dev, B2055_CAL_MISC, 0x3C);
- msleep(1);
+ b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0xFFC0, 0x2C);
+ b43_radio_write(dev, B2055_CAL_MISC, 0x3C);
b43_radio_mask(dev, B2055_CAL_MISC, 0xFFBE);
- msleep(1);
b43_radio_set(dev, B2055_CAL_LPOCTL, 0x80);
- msleep(1);
b43_radio_set(dev, B2055_CAL_MISC, 0x1);
msleep(1);
b43_radio_set(dev, B2055_CAL_MISC, 0x40);
- msleep(1);
- for (i = 0; i < 100; i++) {
- val = b43_radio_read16(dev, B2055_CAL_COUT2);
- if (val & 0x80)
+ for (i = 0; i < 200; i++) {
+ val = b43_radio_read(dev, B2055_CAL_COUT2);
+ if (val & 0x80) {
+ i = 0;
break;
+ }
udelay(10);
}
- msleep(1);
+ if (i)
+ b43err(dev->wl, "radio post init timeout\n");
b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
- msleep(1);
nphy_channel_switch(dev, dev->phy.channel);
- b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9);
- b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9);
- b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
- b43_radio_write16(dev, B2055_C2_RX_BB_MIDACHP, 0x83);
+ b43_radio_write(dev, B2055_C1_RX_BB_LPF, 0x9);
+ b43_radio_write(dev, B2055_C2_RX_BB_LPF, 0x9);
+ b43_radio_write(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
+ b43_radio_write(dev, B2055_C2_RX_BB_MIDACHP, 0x83);
+ b43_radio_maskset(dev, B2055_C1_LNA_GAINBST, 0xFFF8, 0x6);
+ b43_radio_maskset(dev, B2055_C2_LNA_GAINBST, 0xFFF8, 0x6);
+ if (!nphy->gain_boost) {
+ b43_radio_set(dev, B2055_C1_RX_RFSPC1, 0x2);
+ b43_radio_set(dev, B2055_C2_RX_RFSPC1, 0x2);
+ } else {
+ b43_radio_mask(dev, B2055_C1_RX_RFSPC1, 0xFFFD);
+ b43_radio_mask(dev, B2055_C2_RX_RFSPC1, 0xFFFD);
+ }
+ udelay(2);
}
-/* Initialize a Broadcom 2055 N-radio */
+/*
+ * Initialize a Broadcom 2055 N-radio
+ * http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init
+ */
static void b43_radio_init2055(struct b43_wldev *dev)
{
b43_radio_init2055_pre(dev);
@@ -230,16 +254,15 @@ static void b43_radio_init2055(struct b43_wldev *dev)
b43_radio_init2055_post(dev);
}
-void b43_nphy_radio_turn_on(struct b43_wldev *dev)
+/*
+ * Initialize a Broadcom 2056 N-radio
+ * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
+ */
+static void b43_radio_init2056(struct b43_wldev *dev)
{
- b43_radio_init2055(dev);
+ /* TODO */
}
-void b43_nphy_radio_turn_off(struct b43_wldev *dev)
-{
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_EN);
-}
/*
* Upload the N-PHY tables.
@@ -647,6 +670,41 @@ static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
+static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
+{
+ if (dev->phy.rev >= 3) {
+ if (!init)
+ return;
+ if (0 /* FIXME */) {
+ b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
+ b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
+ b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
+ b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
+ }
+ } else {
+ b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
+ b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
+
+ ssb_chipco_gpio_control(&dev->dev->bus->chipco, 0xFC00,
+ 0xFC00);
+ b43_write32(dev, B43_MMIO_MACCTL,
+ b43_read32(dev, B43_MMIO_MACCTL) &
+ ~B43_MACCTL_GPOUTSMSK);
+ b43_write16(dev, B43_MMIO_GPIO_MASK,
+ b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
+ b43_write16(dev, B43_MMIO_GPIO_CONTROL,
+ b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
+
+ if (init) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+ }
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
{
@@ -723,7 +781,7 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
- unsigned int channel;
+ u8 channel = nphy->radio_chanspec.channel;
int tone[2] = { 57, 58 };
u32 noise[2] = { 0x3FF, 0x3FF };
@@ -732,8 +790,6 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
- /* FIXME: channel = radio_chanspec */
-
if (nphy->gband_spurwar_en) {
/* TODO: N PHY Adjust Analog Pfbw (7) */
if (channel == 11 && dev->phy.is_40mhz)
@@ -779,6 +835,62 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 0);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
+static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 i;
+ s16 tmp;
+ u16 data[4];
+ s16 gain[2];
+ u16 minmax[2];
+ u16 lna_gain[4] = { -2, 10, 19, 25 };
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ gain[0] = 6;
+ gain[1] = 6;
+ } else {
+ tmp = 40370 - 315 * nphy->radio_chanspec.channel;
+ gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ tmp = 23242 - 224 * nphy->radio_chanspec.channel;
+ gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ }
+ } else {
+ gain[0] = 0;
+ gain[1] = 0;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (nphy->elna_gain_config) {
+ data[0] = 19 + gain[i];
+ data[1] = 25 + gain[i];
+ data[2] = 25 + gain[i];
+ data[3] = 25 + gain[i];
+ } else {
+ data[0] = lna_gain[0] + gain[i];
+ data[1] = lna_gain[1] + gain[i];
+ data[2] = lna_gain[2] + gain[i];
+ data[3] = lna_gain[3] + gain[i];
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(10, 8), 4, data);
+
+ minmax[i] = 23 + gain[i];
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
+ minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
+ b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
+ minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
{
@@ -863,7 +975,7 @@ static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
(code << 8 | 0x7C));
- /* TODO: b43_nphy_adjust_lna_gain_table(dev); */
+ b43_nphy_adjust_lna_gain_table(dev);
if (nphy->elna_gain_config) {
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
@@ -1970,12 +2082,12 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
u16 *rssical_phy_regs = NULL;
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- if (!nphy->rssical_chanspec_2G)
+ if (b43_empty_chanspec(&nphy->rssical_chanspec_2G))
return;
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
} else {
- if (!nphy->rssical_chanspec_5G)
+ if (b43_empty_chanspec(&nphy->rssical_chanspec_5G))
return;
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
@@ -2395,7 +2507,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
u16 *txcal_radio_regs = NULL;
- u8 *iqcal_chanspec;
+ struct b43_chanspec *iqcal_chanspec;
u16 *table = NULL;
if (nphy->hang_avoid)
@@ -2451,12 +2563,12 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- if (nphy->iqcal_chanspec_2G == 0)
+ if (b43_empty_chanspec(&nphy->iqcal_chanspec_2G))
return;
table = nphy->cal_cache.txcal_coeffs_2G;
loft = &nphy->cal_cache.txcal_coeffs_2G[5];
} else {
- if (nphy->iqcal_chanspec_5G == 0)
+ if (b43_empty_chanspec(&nphy->iqcal_chanspec_5G))
return;
table = nphy->cal_cache.txcal_coeffs_5G;
loft = &nphy->cal_cache.txcal_coeffs_5G[5];
@@ -2689,7 +2801,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
}
b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
buffer);
- b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2,
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 101), 2,
buffer);
b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
buffer);
@@ -2701,8 +2813,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
nphy->txiqlocal_bestc);
nphy->txiqlocal_coeffsvalid = true;
- /* TODO: Set nphy->txiqlocal_chanspec to
- the current channel */
+ nphy->txiqlocal_chanspec = nphy->radio_chanspec;
} else {
length = 11;
if (dev->phy.rev < 3)
@@ -2737,7 +2848,8 @@ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
u16 buffer[7];
bool equal = true;
- if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */)
+ if (!nphy->txiqlocal_coeffsvalid ||
+ b43_eq_chanspecs(&nphy->txiqlocal_chanspec, &nphy->radio_chanspec))
return;
b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
@@ -3092,9 +3204,11 @@ int b43_phy_initn(struct b43_wldev *dev)
do_rssi_cal = false;
if (phy->rev >= 3) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- do_rssi_cal = (nphy->rssical_chanspec_2G == 0);
+ do_rssi_cal =
+ b43_empty_chanspec(&nphy->rssical_chanspec_2G);
else
- do_rssi_cal = (nphy->rssical_chanspec_5G == 0);
+ do_rssi_cal =
+ b43_empty_chanspec(&nphy->rssical_chanspec_5G);
if (do_rssi_cal)
b43_nphy_rssi_cal(dev);
@@ -3106,9 +3220,9 @@ int b43_phy_initn(struct b43_wldev *dev)
if (!((nphy->measure_hold & 0x6) != 0)) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- do_cal = (nphy->iqcal_chanspec_2G == 0);
+ do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_2G);
else
- do_cal = (nphy->iqcal_chanspec_5G == 0);
+ do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_5G);
if (nphy->mute)
do_cal = false;
@@ -3117,7 +3231,7 @@ int b43_phy_initn(struct b43_wldev *dev)
target = b43_nphy_get_tx_gains(dev);
if (nphy->antsel_type == 2)
- ;/*TODO NPHY Superswitch Init with argument 1*/
+ b43_nphy_superswitch_init(dev, true);
if (nphy->perical != 2) {
b43_nphy_rssi_cal(dev);
if (phy->rev >= 3) {
@@ -3155,6 +3269,133 @@ int b43_phy_initn(struct b43_wldev *dev)
return 0;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
+static void b43_nphy_chanspec_setup(struct b43_wldev *dev,
+ const struct b43_phy_n_sfo_cfg *e,
+ struct b43_chanspec chanspec)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 tmp;
+ u32 tmp32;
+
+ tmp = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
+ if (chanspec.b_freq == 1 && tmp == 0) {
+ tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
+ b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
+ b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
+ } else if (chanspec.b_freq == 1) {
+ b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
+ tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
+ b43_phy_mask(dev, B43_PHY_B_BBCFG, (u16)~0xC000);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
+ }
+
+ b43_chantab_phy_upload(dev, e);
+
+ tmp = chanspec.channel;
+ if (chanspec.b_freq == 1)
+ tmp |= 0x0100;
+ if (chanspec.b_width == 3)
+ tmp |= 0x0200;
+ b43_shm_write16(dev, B43_SHM_SHARED, 0xA0, tmp);
+
+ if (nphy->radio_chanspec.channel == 14) {
+ b43_nphy_classifier(dev, 2, 0);
+ b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
+ } else {
+ b43_nphy_classifier(dev, 2, 2);
+ if (chanspec.b_freq == 2)
+ b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
+ }
+
+ if (nphy->txpwrctrl)
+ b43_nphy_tx_power_fix(dev);
+
+ if (dev->phy.rev < 3)
+ b43_nphy_adjust_lna_gain_table(dev);
+
+ b43_nphy_tx_lp_fbw(dev);
+
+ if (dev->phy.rev >= 3 && 0) {
+ /* TODO */
+ }
+
+ b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
+
+ if (phy->rev >= 3)
+ b43_nphy_spur_workaround(dev);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
+static int b43_nphy_set_chanspec(struct b43_wldev *dev,
+ struct b43_chanspec chanspec)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ const struct b43_nphy_channeltab_entry_rev2 *tabent_r2;
+ const struct b43_nphy_channeltab_entry_rev3 *tabent_r3;
+
+ u8 tmp;
+ u8 channel = chanspec.channel;
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
+ tabent_r3 = NULL;
+ if (!tabent_r3)
+ return -ESRCH;
+ } else {
+ tabent_r2 = b43_nphy_get_chantabent_rev2(dev, channel);
+ if (!tabent_r2)
+ return -ESRCH;
+ }
+
+ nphy->radio_chanspec = chanspec;
+
+ if (chanspec.b_width != nphy->b_width)
+ ; /* TODO: BMAC BW Set (chanspec.b_width) */
+
+ /* TODO: use defines */
+ if (chanspec.b_width == 3) {
+ if (chanspec.sideband == 2)
+ b43_phy_set(dev, B43_NPHY_RXCTL,
+ B43_NPHY_RXCTL_BSELU20);
+ else
+ b43_phy_mask(dev, B43_NPHY_RXCTL,
+ ~B43_NPHY_RXCTL_BSELU20);
+ }
+
+ if (dev->phy.rev >= 3) {
+ tmp = (chanspec.b_freq == 1) ? 4 : 0;
+ b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
+ /* TODO: PHY Radio2056 Setup (dev, tabent_r3); */
+ b43_nphy_chanspec_setup(dev, &(tabent_r3->phy_regs), chanspec);
+ } else {
+ tmp = (chanspec.b_freq == 1) ? 0x0020 : 0x0050;
+ b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
+ b43_radio_2055_setup(dev, tabent_r2);
+ b43_nphy_chanspec_setup(dev, &(tabent_r2->phy_regs), chanspec);
+ }
+
+ return 0;
+}
+
+/* Tune the hardware to a new channel */
+static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ struct b43_chanspec chanspec;
+ chanspec = nphy->radio_chanspec;
+ chanspec.channel = channel;
+
+ return b43_nphy_set_chanspec(dev, chanspec);
+}
+
static int b43_nphy_op_allocate(struct b43_wldev *dev)
{
struct b43_phy_n *nphy;
@@ -3243,9 +3484,43 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
}
+/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
bool blocked)
-{//TODO
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
+ b43err(dev->wl, "MAC not suspended\n");
+
+ if (blocked) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+ if (dev->phy.rev >= 3) {
+ b43_radio_mask(dev, 0x09, ~0x2);
+
+ b43_radio_write(dev, 0x204D, 0);
+ b43_radio_write(dev, 0x2053, 0);
+ b43_radio_write(dev, 0x2058, 0);
+ b43_radio_write(dev, 0x205E, 0);
+ b43_radio_mask(dev, 0x2062, ~0xF0);
+ b43_radio_write(dev, 0x2064, 0);
+
+ b43_radio_write(dev, 0x304D, 0);
+ b43_radio_write(dev, 0x3053, 0);
+ b43_radio_write(dev, 0x3058, 0);
+ b43_radio_write(dev, 0x305E, 0);
+ b43_radio_mask(dev, 0x3062, ~0xF0);
+ b43_radio_write(dev, 0x3064, 0);
+ }
+ } else {
+ if (dev->phy.rev >= 3) {
+ b43_radio_init2056(dev);
+ b43_nphy_set_chanspec(dev, nphy->radio_chanspec);
+ } else {
+ b43_radio_init2055(dev);
+ }
+ }
}
static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 403aad3..8b6d570 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -711,6 +711,8 @@
#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
+#define B43_PHY_B_BBCFG B43_PHY_N_BMODE(0x001) /* BB config */
+#define B43_PHY_B_TEST B43_PHY_N_BMODE(0x00A)
/* Broadcom 2055 radio registers */
@@ -924,6 +926,13 @@
struct b43_wldev;
+struct b43_chanspec {
+ u8 channel;
+ u8 sideband;
+ u8 b_width;
+ u8 b_freq;
+};
+
struct b43_phy_n_iq_comp {
s16 a0;
s16 b0;
@@ -975,7 +984,8 @@ struct b43_phy_n {
u16 papd_epsilon_offset[2];
s32 preamble_override;
u32 bb_mult_save;
- u16 radio_chanspec;
+ u8 b_width;
+ struct b43_chanspec radio_chanspec;
bool gain_boost;
bool elna_gain_config;
@@ -991,6 +1001,7 @@ struct b43_phy_n {
u16 txiqlocal_bestc[11];
bool txiqlocal_coeffsvalid;
struct b43_phy_n_txpwrindex txpwrindex[2];
+ struct b43_chanspec txiqlocal_chanspec;
u8 txrx_chain;
u16 tx_rx_cal_phy_saveregs[11];
@@ -1006,12 +1017,12 @@ struct b43_phy_n {
bool gband_spurwar_en;
bool ipa2g_on;
- u8 iqcal_chanspec_2G;
- u8 rssical_chanspec_2G;
+ struct b43_chanspec iqcal_chanspec_2G;
+ struct b43_chanspec rssical_chanspec_2G;
bool ipa5g_on;
- u8 iqcal_chanspec_5G;
- u8 rssical_chanspec_5G;
+ struct b43_chanspec iqcal_chanspec_5G;
+ struct b43_chanspec rssical_chanspec_5G;
struct b43_phy_n_rssical_cache rssical_cache;
struct b43_phy_n_cal_cache cal_cache;
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index a00d509..d96e870 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -318,14 +318,14 @@ void b2055_upload_inittab(struct b43_wldev *dev,
.radio_c2_tx_mxbgtrim = r21
#define PHYREGS(r0, r1, r2, r3, r4, r5) \
- .phy_bw1a = r0, \
- .phy_bw2 = r1, \
- .phy_bw3 = r2, \
- .phy_bw4 = r3, \
- .phy_bw5 = r4, \
- .phy_bw6 = r5
-
-static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = {
+ .phy_regs.phy_bw1a = r0, \
+ .phy_regs.phy_bw2 = r1, \
+ .phy_regs.phy_bw3 = r2, \
+ .phy_regs.phy_bw4 = r3, \
+ .phy_regs.phy_bw5 = r4, \
+ .phy_regs.phy_bw6 = r5
+
+static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab[] = {
{ .channel = 184,
.freq = 4920, /* MHz */
.unk2 = 3280,
@@ -1320,10 +1320,10 @@ static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = {
},
};
-const struct b43_nphy_channeltab_entry *
-b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
+const struct b43_nphy_channeltab_entry_rev2 *
+b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel)
{
- const struct b43_nphy_channeltab_entry *e;
+ const struct b43_nphy_channeltab_entry_rev2 *e;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab); i++) {
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 9c1c6ec..8fc1da9 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -4,9 +4,22 @@
#include <linux/types.h>
-struct b43_nphy_channeltab_entry {
+struct b43_phy_n_sfo_cfg {
+ u16 phy_bw1a;
+ u16 phy_bw2;
+ u16 phy_bw3;
+ u16 phy_bw4;
+ u16 phy_bw5;
+ u16 phy_bw6;
+};
+
+struct b43_nphy_channeltab_entry_rev2 {
/* The channel number */
u8 channel;
+ /* The channel frequency in MHz */
+ u16 freq;
+ /* An unknown value */
+ u16 unk2;
/* Radio register values on channelswitch */
u8 radio_pll_ref;
u8 radio_rf_pllmod0;
@@ -31,16 +44,18 @@ struct b43_nphy_channeltab_entry {
u8 radio_c2_tx_pgapadtn;
u8 radio_c2_tx_mxbgtrim;
/* PHY register values on channelswitch */
- u16 phy_bw1a;
- u16 phy_bw2;
- u16 phy_bw3;
- u16 phy_bw4;
- u16 phy_bw5;
- u16 phy_bw6;
+ struct b43_phy_n_sfo_cfg phy_regs;
+};
+
+struct b43_nphy_channeltab_entry_rev3 {
+ /* The channel number */
+ u8 channel;
/* The channel frequency in MHz */
u16 freq;
- /* An unknown value */
- u16 unk2;
+ /* Radio register values on channelswitch */
+ /* TODO */
+ /* PHY register values on channelswitch */
+ struct b43_phy_n_sfo_cfg phy_regs;
};
@@ -77,8 +92,8 @@ void b2055_upload_inittab(struct b43_wldev *dev,
/* Get the NPHY Channel Switch Table entry for a channel number.
* Returns NULL on failure to find an entry. */
-const struct b43_nphy_channeltab_entry *
-b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
+const struct b43_nphy_channeltab_entry_rev2 *
+b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel);
/* The N-PHY tables. */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index eda0652..e6b0528 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -610,7 +610,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
}
/* Link quality statistics */
- status.noise = dev->stats.link_noise;
if ((chanstat & B43_RX_CHAN_PHYTYPE) == B43_PHYTYPE_N) {
// s8 rssi = max(rxhdr->power0, rxhdr->power1);
//TODO: Find out what the rssi value is (dBm or percentage?)
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index bb2dd93..1713f5f 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3482,6 +3482,23 @@ static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
return 0;
}
+static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
+ struct b43legacy_wldev *dev = wl->current_dev;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = dev->stats.link_noise;
+
+ return 0;
+}
+
static const struct ieee80211_ops b43legacy_hw_ops = {
.tx = b43legacy_op_tx,
.conf_tx = b43legacy_op_conf_tx,
@@ -3494,6 +3511,7 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
.start = b43legacy_op_start,
.stop = b43legacy_op_stop,
.set_tim = b43legacy_op_beacon_set_tim,
+ .get_survey = b43legacy_op_get_survey,
.rfkill_poll = b43legacy_rfkill_poll,
};
@@ -3769,8 +3787,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
/* fill hw info */
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 9c8882d..7d177d9 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -548,7 +548,6 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
(phystat0 & B43legacy_RX_PHYST0_OFDM),
(phystat0 & B43legacy_RX_PHYST0_GAINCTL),
(phystat3 & B43legacy_RX_PHYST3_TRSTATE));
- status.noise = dev->stats.link_noise;
/* change to support A PHY */
if (phystat0 & B43legacy_RX_PHYST0_OFDM)
status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index f4c5612..e0b3e8d 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -355,8 +355,7 @@ static struct hostap_bss_info *__hostap_add_bss(local_info_t *local, u8 *bssid,
list_del(&bss->list);
local->num_bss_info--;
} else {
- bss = (struct hostap_bss_info *)
- kmalloc(sizeof(*bss), GFP_ATOMIC);
+ bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
if (bss == NULL)
return NULL;
}
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 7e72ac1..231dbd7 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -349,7 +349,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
default:
policy_txt = "unknown";
break;
- };
+ }
p += sprintf(p, "MAC policy: %s\n", policy_txt);
p += sprintf(p, "MAC entries: %u\n", ap->mac_restrictions.entries);
p += sprintf(p, "MAC list:\n");
diff --git a/drivers/net/wireless/hostap/hostap_download.c b/drivers/net/wireless/hostap/hostap_download.c
index 89d3849..e73bf73 100644
--- a/drivers/net/wireless/hostap/hostap_download.c
+++ b/drivers/net/wireless/hostap/hostap_download.c
@@ -744,7 +744,7 @@ static int prism2_download(local_info_t *local,
local->dev->name, param->dl_cmd);
ret = -EINVAL;
break;
- };
+ }
out:
if (ret == 0 && dl &&
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 9a08230..a85e43a 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3039,8 +3039,7 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
p->length > 1024 || !p->pointer)
return -EINVAL;
- param = (struct prism2_download_param *)
- kmalloc(p->length, GFP_KERNEL);
+ param = kmalloc(p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 2b05fe5..0bd4dfa 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2141,7 +2141,7 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
DECLARE_SSID_BUF(ssid);
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
- "disassociated: '%s' %pM \n",
+ "disassociated: '%s' %pM\n",
print_ssid(ssid, priv->essid, priv->essid_len),
priv->bssid);
@@ -3240,7 +3240,6 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
txq->next);
}
- return;
}
static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
@@ -3286,7 +3285,7 @@ static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
if (inta & IPW2100_INTA_PARITY_ERROR) {
printk(KERN_ERR DRV_NAME
- ": ***** PARITY ERROR INTERRUPT !!!! \n");
+ ": ***** PARITY ERROR INTERRUPT !!!!\n");
priv->inta_other++;
write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR);
}
@@ -6103,7 +6102,7 @@ static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-/* Look into using netdev destructor to shutdown ieee80211? */
+/* Look into using netdev destructor to shutdown libipw? */
static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
void __iomem * base_addr,
@@ -6113,7 +6112,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
struct ipw2100_priv *priv;
struct net_device *dev;
- dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0);
+ dev = alloc_libipw(sizeof(struct ipw2100_priv), 0);
if (!dev)
return NULL;
priv = libipw_priv(dev);
@@ -6426,7 +6425,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
sysfs_remove_group(&pci_dev->dev.kobj,
&ipw2100_attribute_group);
- free_ieee80211(dev, 0);
+ free_libipw(dev, 0);
pci_set_drvdata(pci_dev, NULL);
}
@@ -6484,10 +6483,10 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
if (dev->base_addr)
iounmap((void __iomem *)dev->base_addr);
- /* wiphy_unregister needs to be here, before free_ieee80211 */
+ /* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->bg_band.channels);
- free_ieee80211(dev, 0);
+ free_libipw(dev, 0);
}
pci_release_regions(pci_dev);
@@ -6754,7 +6753,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev,
err = -EOPNOTSUPP;
goto done;
} else { /* Set the channel */
- IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
+ IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
err = ipw2100_set_channel(priv, fwrq->m, 0);
}
@@ -6783,7 +6782,7 @@ static int ipw2100_wx_get_freq(struct net_device *dev,
else
wrqu->freq.m = 0;
- IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
+ IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
return 0;
}
@@ -6795,7 +6794,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
int err = 0;
- IPW_DEBUG_WX("SET Mode -> %d \n", wrqu->mode);
+ IPW_DEBUG_WX("SET Mode -> %d\n", wrqu->mode);
if (wrqu->mode == priv->ieee->iw_mode)
return 0;
@@ -7150,7 +7149,7 @@ static int ipw2100_wx_set_nick(struct net_device *dev,
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
- IPW_DEBUG_WX("SET Nickname -> %s \n", priv->nick);
+ IPW_DEBUG_WX("SET Nickname -> %s\n", priv->nick);
return 0;
}
@@ -7169,7 +7168,7 @@ static int ipw2100_wx_get_nick(struct net_device *dev,
memcpy(extra, priv->nick, wrqu->data.length);
wrqu->data.flags = 1; /* active */
- IPW_DEBUG_WX("GET Nickname -> %s \n", extra);
+ IPW_DEBUG_WX("GET Nickname -> %s\n", extra);
return 0;
}
@@ -7208,7 +7207,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev,
err = ipw2100_set_tx_rates(priv, rate, 0);
- IPW_DEBUG_WX("SET Rate -> %04X \n", rate);
+ IPW_DEBUG_WX("SET Rate -> %04X\n", rate);
done:
mutex_unlock(&priv->action_mutex);
return err;
@@ -7259,7 +7258,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev,
wrqu->bitrate.value = 0;
}
- IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
+ IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
done:
mutex_unlock(&priv->action_mutex);
@@ -7295,7 +7294,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev,
err = ipw2100_set_rts_threshold(priv, value);
- IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value);
+ IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X\n", value);
done:
mutex_unlock(&priv->action_mutex);
return err;
@@ -7317,7 +7316,7 @@ static int ipw2100_wx_get_rts(struct net_device *dev,
/* If RTS is set to the default value, then it is disabled */
wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0;
- IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X \n", wrqu->rts.value);
+ IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X\n", wrqu->rts.value);
return 0;
}
@@ -7356,7 +7355,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev,
err = ipw2100_set_tx_power(priv, value);
- IPW_DEBUG_WX("SET TX Power -> %d \n", value);
+ IPW_DEBUG_WX("SET TX Power -> %d\n", value);
done:
mutex_unlock(&priv->action_mutex);
@@ -7385,7 +7384,7 @@ static int ipw2100_wx_get_txpow(struct net_device *dev,
wrqu->txpower.flags = IW_TXPOW_DBM;
- IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->txpower.value);
+ IPW_DEBUG_WX("GET TX Power -> %d\n", wrqu->txpower.value);
return 0;
}
@@ -7415,7 +7414,7 @@ static int ipw2100_wx_set_frag(struct net_device *dev,
priv->frag_threshold = priv->ieee->fts;
}
- IPW_DEBUG_WX("SET Frag Threshold -> %d \n", priv->ieee->fts);
+ IPW_DEBUG_WX("SET Frag Threshold -> %d\n", priv->ieee->fts);
return 0;
}
@@ -7433,7 +7432,7 @@ static int ipw2100_wx_get_frag(struct net_device *dev,
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0;
- IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
+ IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@@ -7459,14 +7458,14 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
if (wrqu->retry.flags & IW_RETRY_SHORT) {
err = ipw2100_set_short_retry(priv, wrqu->retry.value);
- IPW_DEBUG_WX("SET Short Retry Limit -> %d \n",
+ IPW_DEBUG_WX("SET Short Retry Limit -> %d\n",
wrqu->retry.value);
goto done;
}
if (wrqu->retry.flags & IW_RETRY_LONG) {
err = ipw2100_set_long_retry(priv, wrqu->retry.value);
- IPW_DEBUG_WX("SET Long Retry Limit -> %d \n",
+ IPW_DEBUG_WX("SET Long Retry Limit -> %d\n",
wrqu->retry.value);
goto done;
}
@@ -7475,7 +7474,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
if (!err)
err = ipw2100_set_long_retry(priv, wrqu->retry.value);
- IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value);
+ IPW_DEBUG_WX("SET Both Retry Limits -> %d\n", wrqu->retry.value);
done:
mutex_unlock(&priv->action_mutex);
@@ -7509,7 +7508,7 @@ static int ipw2100_wx_get_retry(struct net_device *dev,
wrqu->retry.value = priv->short_retry_limit;
}
- IPW_DEBUG_WX("GET Retry -> %d \n", wrqu->retry.value);
+ IPW_DEBUG_WX("GET Retry -> %d\n", wrqu->retry.value);
return 0;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d72e3d..3aa3bb1 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -459,7 +459,7 @@ static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
{
u32 word;
_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
- IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
+ IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
word = _ipw_read32(priv, IPW_INDIRECT_DATA);
return (word >> ((reg & 0x3) * 8)) & 0xff;
}
@@ -473,7 +473,7 @@ static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
value = _ipw_read32(priv, IPW_INDIRECT_DATA);
- IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
+ IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
return value;
}
@@ -2349,16 +2349,25 @@ static void ipw_bg_adapter_restart(struct work_struct *work)
mutex_unlock(&priv->mutex);
}
-#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
+static void ipw_abort_scan(struct ipw_priv *priv);
+
+#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
static void ipw_scan_check(void *data)
{
struct ipw_priv *priv = data;
- if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
+
+ if (priv->status & STATUS_SCAN_ABORTING) {
IPW_DEBUG_SCAN("Scan completion watchdog resetting "
"adapter after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
queue_work(priv->workqueue, &priv->adapter_restart);
+ } else if (priv->status & STATUS_SCANNING) {
+ IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
+ "after (%dms).\n",
+ jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
+ ipw_abort_scan(priv);
+ queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
}
}
@@ -2598,8 +2607,6 @@ static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
/* the eeprom requires some time to complete the operation */
udelay(p->eeprom_delay);
-
- return;
}
/* perform a chip select operation */
@@ -2739,7 +2746,7 @@ static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
static int ipw_fw_dma_enable(struct ipw_priv *priv)
{ /* start dma engine but no transfers yet */
- IPW_DEBUG_FW(">> : \n");
+ IPW_DEBUG_FW(">> :\n");
/* Start the dma */
ipw_fw_dma_reset_command_blocks(priv);
@@ -2747,7 +2754,7 @@ static int ipw_fw_dma_enable(struct ipw_priv *priv)
/* Write CB base address */
ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
- IPW_DEBUG_FW("<< : \n");
+ IPW_DEBUG_FW("<< :\n");
return 0;
}
@@ -2762,7 +2769,7 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv)
ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
priv->sram_desc.last_cb_index = 0;
- IPW_DEBUG_FW("<< \n");
+ IPW_DEBUG_FW("<<\n");
}
static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
@@ -2813,29 +2820,29 @@ static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
IPW_DEBUG_FW(">> :\n");
address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
- IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
+ IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
/* Read the DMA Controlor register */
register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
- IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
/* Print the CB values */
cb_fields_address = address;
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
+ IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
IPW_DEBUG_FW(">> :\n");
}
@@ -2851,7 +2858,7 @@ static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
sizeof(struct command_block);
- IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
+ IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
current_cb_index, current_cb_address);
IPW_DEBUG_FW(">> :\n");
@@ -2910,7 +2917,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
int ret, i;
u32 size;
- IPW_DEBUG_FW(">> \n");
+ IPW_DEBUG_FW(">>\n");
IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
nr, dest_address, len);
@@ -2927,7 +2934,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
IPW_DEBUG_FW_INFO(": Added new cb\n");
}
- IPW_DEBUG_FW("<< \n");
+ IPW_DEBUG_FW("<<\n");
return 0;
}
@@ -2936,7 +2943,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
u32 current_index = 0, previous_index;
u32 watchdog = 0;
- IPW_DEBUG_FW(">> : \n");
+ IPW_DEBUG_FW(">> :\n");
current_index = ipw_fw_dma_command_block_index(priv);
IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
@@ -2965,7 +2972,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
ipw_set_bit(priv, IPW_RESET_REG,
IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
- IPW_DEBUG_FW("<< dmaWaitSync \n");
+ IPW_DEBUG_FW("<< dmaWaitSync\n");
return 0;
}
@@ -3026,7 +3033,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
{
int rc;
- IPW_DEBUG_TRACE(">> \n");
+ IPW_DEBUG_TRACE(">>\n");
/* stop master. typical delay - 0 */
ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
@@ -3045,7 +3052,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
static void ipw_arc_release(struct ipw_priv *priv)
{
- IPW_DEBUG_TRACE(">> \n");
+ IPW_DEBUG_TRACE(">>\n");
mdelay(5);
ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
@@ -3067,7 +3074,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
image = (__le16 *) data;
- IPW_DEBUG_TRACE(">> \n");
+ IPW_DEBUG_TRACE(">>\n");
rc = ipw_stop_master(priv);
@@ -3181,7 +3188,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
void **virts;
dma_addr_t *phys;
- IPW_DEBUG_TRACE("<< : \n");
+ IPW_DEBUG_TRACE("<< :\n");
virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
GFP_KERNEL);
@@ -4482,7 +4489,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
case CMAS_ASSOCIATED:{
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
IPW_DL_ASSOC,
- "associated: '%s' %pM \n",
+ "associated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@@ -4563,7 +4570,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DL_ASSOC,
"deauthenticated: '%s' "
"%pM"
- ": (0x%04X) - %s \n",
+ ": (0x%04X) - %s\n",
print_ssid(ssid,
priv->
essid,
@@ -4614,7 +4621,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
IPW_DL_ASSOC,
- "disassociated: '%s' %pM \n",
+ "disassociated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@@ -4652,7 +4659,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
switch (auth->state) {
case CMAS_AUTHENTICATED:
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
- "authenticated: '%s' %pM \n",
+ "authenticated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@@ -6925,7 +6932,7 @@ static u8 ipw_qos_current_mode(struct ipw_priv * priv)
} else {
mode = priv->ieee->mode;
}
- IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
+ IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
return mode;
}
@@ -6965,7 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
&def_parameters_OFDM, size);
if ((network->qos_data.active == 1) && (active_network == 1)) {
- IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
+ IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
schedule_work(&priv->qos_activate);
}
@@ -7542,7 +7549,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
return err;
}
- IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
+ IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
print_ssid(ssid, priv->essid, priv->essid_len),
priv->bssid);
@@ -8793,7 +8800,7 @@ static int ipw_wx_set_freq(struct net_device *dev,
}
}
- IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
+ IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
mutex_lock(&priv->mutex);
ret = ipw_set_channel(priv, channel);
mutex_unlock(&priv->mutex);
@@ -8835,7 +8842,7 @@ static int ipw_wx_get_freq(struct net_device *dev,
wrqu->freq.m = 0;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
+ IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
return 0;
}
@@ -9230,7 +9237,7 @@ static int ipw_wx_get_sens(struct net_device *dev,
wrqu->sens.value = priv->roaming_threshold;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
+ IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
return 0;
@@ -9358,7 +9365,7 @@ static int ipw_wx_get_rate(struct net_device *dev,
wrqu->bitrate.value = priv->last_rate;
wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
+ IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
return 0;
}
@@ -9381,7 +9388,7 @@ static int ipw_wx_set_rts(struct net_device *dev,
ipw_send_rts_threshold(priv, priv->rts_threshold);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
+ IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
return 0;
}
@@ -9395,7 +9402,7 @@ static int ipw_wx_get_rts(struct net_device *dev,
wrqu->rts.fixed = 0; /* no auto select */
wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
+ IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
return 0;
}
@@ -9445,7 +9452,7 @@ static int ipw_wx_get_txpow(struct net_device *dev,
wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET TX Power -> %s %d \n",
+ IPW_DEBUG_WX("GET TX Power -> %s %d\n",
wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
return 0;
@@ -9471,7 +9478,7 @@ static int ipw_wx_set_frag(struct net_device *dev,
ipw_send_frag_threshold(priv, wrqu->frag.value);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
+ IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@@ -9485,7 +9492,7 @@ static int ipw_wx_get_frag(struct net_device *dev,
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
+ IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@@ -9549,7 +9556,7 @@ static int ipw_wx_get_retry(struct net_device *dev,
}
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
+ IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
return 0;
}
@@ -9996,49 +10003,48 @@ static int ipw_wx_sw_reset(struct net_device *dev,
}
/* Rebase the WE IOCTLs to zero for the handler array */
-#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
static iw_handler ipw_wx_handlers[] = {
- IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname,
- IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
- IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
- IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
- IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
- IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
- IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
- IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
- IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
- IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
- IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
- IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
- IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
- IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
- IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
- IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
- IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
- IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
- IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
- IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
- IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
- IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
- IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
- IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
- IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
- IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
- IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
- IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
- IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
- IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
- IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
- IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
- IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
- IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
- IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
- IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
- IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
- IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
- IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
- IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
- IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
+ IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
+ IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
+ IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
+ IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
+ IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
+ IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
+ IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
+ IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
+ IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
+ IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
+ IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
+ IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
+ IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
+ IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
+ IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
+ IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
+ IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
+ IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
+ IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
+ IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
+ IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
+ IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
+ IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
+ IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
+ IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
+ IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
+ IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
};
enum {
@@ -11667,7 +11673,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
if (priv->prom_net_dev)
return -EPERM;
- priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1);
+ priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
if (priv->prom_net_dev == NULL)
return -ENOMEM;
@@ -11686,7 +11692,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
rc = register_netdev(priv->prom_net_dev);
if (rc) {
- free_ieee80211(priv->prom_net_dev, 1);
+ free_libipw(priv->prom_net_dev, 1);
priv->prom_net_dev = NULL;
return rc;
}
@@ -11700,7 +11706,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
return;
unregister_netdev(priv->prom_net_dev);
- free_ieee80211(priv->prom_net_dev, 1);
+ free_libipw(priv->prom_net_dev, 1);
priv->prom_net_dev = NULL;
}
@@ -11728,7 +11734,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
struct ipw_priv *priv;
int i;
- net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0);
+ net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
if (net_dev == NULL) {
err = -ENOMEM;
goto out;
@@ -11748,7 +11754,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
mutex_init(&priv->mutex);
if (pci_enable_device(pdev)) {
err = -ENODEV;
- goto out_free_ieee80211;
+ goto out_free_libipw;
}
pci_set_master(pdev);
@@ -11875,8 +11881,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
out_pci_disable_device:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- out_free_ieee80211:
- free_ieee80211(priv->net_dev, 0);
+ out_free_libipw:
+ free_libipw(priv->net_dev, 0);
out:
return err;
}
@@ -11943,11 +11949,11 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- /* wiphy_unregister needs to be here, before free_ieee80211 */
+ /* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->a_band.channels);
kfree(priv->ieee->bg_band.channels);
- free_ieee80211(priv->net_dev, 0);
+ free_libipw(priv->net_dev, 0);
free_firmware();
}
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index a6d5e42..284b0e4 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -64,7 +64,7 @@
extern u32 libipw_debug_level;
#define LIBIPW_DEBUG(level, fmt, args...) \
do { if (libipw_debug_level & (level)) \
- printk(KERN_DEBUG "ieee80211: %c %s " fmt, \
+ printk(KERN_DEBUG "libipw: %c %s " fmt, \
in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
static inline bool libipw_ratelimit_debug(u32 level)
{
@@ -116,8 +116,8 @@ static inline bool libipw_ratelimit_debug(u32 level)
#define LIBIPW_DL_RX (1<<9)
#define LIBIPW_DL_QOS (1<<31)
-#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a)
-#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a)
+#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "libipw: " f, ## a)
+#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "libipw: " f, ## a)
#define LIBIPW_DEBUG_INFO(f, a...) LIBIPW_DEBUG(LIBIPW_DL_INFO, f, ## a)
#define LIBIPW_DEBUG_WX(f, a...) LIBIPW_DEBUG(LIBIPW_DL_WX, f, ## a)
@@ -905,7 +905,7 @@ struct libipw_device {
struct libipw_reassoc_request * req);
/* This must be the last item so that it points to the data
- * allocated beyond this structure by alloc_ieee80211 */
+ * allocated beyond this structure by alloc_libipw */
u8 priv[0];
};
@@ -1017,9 +1017,9 @@ static inline int libipw_is_cck_rate(u8 rate)
return 0;
}
-/* ieee80211.c */
-extern void free_ieee80211(struct net_device *dev, int monitor);
-extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor);
+/* libipw.c */
+extern void free_libipw(struct net_device *dev, int monitor);
+extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 2fa5586..5596540 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -53,7 +53,7 @@
#include "libipw.h"
#define DRV_DESCRIPTION "802.11 data/management/control stack"
-#define DRV_NAME "ieee80211"
+#define DRV_NAME "libipw"
#define DRV_VERSION LIBIPW_VERSION
#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
@@ -140,7 +140,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
}
EXPORT_SYMBOL(libipw_change_mtu);
-struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
+struct net_device *alloc_libipw(int sizeof_priv, int monitor)
{
struct libipw_device *ieee;
struct net_device *dev;
@@ -222,8 +222,9 @@ failed_free_netdev:
failed:
return NULL;
}
+EXPORT_SYMBOL(alloc_libipw);
-void free_ieee80211(struct net_device *dev, int monitor)
+void free_libipw(struct net_device *dev, int monitor)
{
struct libipw_device *ieee = netdev_priv(dev);
@@ -237,6 +238,7 @@ void free_ieee80211(struct net_device *dev, int monitor)
free_netdev(dev);
}
+EXPORT_SYMBOL(free_libipw);
#ifdef CONFIG_LIBIPW_DEBUG
@@ -291,7 +293,7 @@ static int __init libipw_init(void)
struct proc_dir_entry *e;
libipw_debug_level = debug;
- libipw_proc = proc_mkdir(DRV_NAME, init_net.proc_net);
+ libipw_proc = proc_mkdir("ieee80211", init_net.proc_net);
if (libipw_proc == NULL) {
LIBIPW_ERROR("Unable to create " DRV_NAME
" proc directory\n");
@@ -331,6 +333,3 @@ MODULE_PARM_DESC(debug, "debug output mask");
module_exit(libipw_exit);
module_init(libipw_init);
-
-EXPORT_SYMBOL(alloc_ieee80211);
-EXPORT_SYMBOL(free_ieee80211);
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 39a34da..0de1b18 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -918,7 +918,6 @@ void libipw_rx_any(struct libipw_device *ieee,
drop_free:
dev_kfree_skb_irq(skb);
ieee->dev->stats.rx_dropped++;
- return;
}
#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 4e378fa..7c72353 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -9,7 +9,10 @@ CFLAGS_iwl-devtrace.o := -I$(src)
# AGN
obj-$(CONFIG_IWLAGN) += iwlagn.o
-iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
+iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
+iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
+iwlagn-objs += iwl-agn-lib.o
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
@@ -19,5 +22,6 @@ iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 3bf2e6e..6be2992 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -42,9 +42,11 @@
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-sta.h"
+#include "iwl-agn.h"
#include "iwl-helpers.h"
-#include "iwl-5000-hw.h"
+#include "iwl-agn-hw.h"
#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
#define IWL1000_UCODE_API_MAX 3
@@ -117,7 +119,7 @@ static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@@ -125,13 +127,13 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
- sizeof(struct iwl5000_scd_bc_tbl);
+ sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
+ priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
priv->hw_params.max_bsm_size = 0;
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -161,25 +163,25 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
static struct iwl_lib_ops iwl1000_lib = {
.set_hw_params = iwl1000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
- .load_ucode = iwl5000_load_ucode,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.apm_ops = {
.init = iwl_apm_init,
@@ -189,40 +191,47 @@ static struct iwl_lib_ops iwl1000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl1000_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl1000_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl1000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
struct iwl_cfg iwl1000_bgn_cfg = {
- .name = "1000 Series BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
.fw_name_pre = IWL1000_FW_PRE,
.ucode_api_max = IWL1000_UCODE_API_MAX,
.ucode_api_min = IWL1000_UCODE_API_MIN,
@@ -230,10 +239,10 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.ops = &iwl1000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -248,10 +257,15 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 128,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl1000_bg_cfg = {
- .name = "1000 Series BG",
+ .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
.fw_name_pre = IWL1000_FW_PRE,
.ucode_api_max = IWL1000_UCODE_API_MAX,
.ucode_api_min = IWL1000_UCODE_API_MIN,
@@ -259,10 +273,10 @@ struct iwl_cfg iwl1000_bg_cfg = {
.ops = &iwl1000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -270,12 +284,16 @@ struct iwl_cfg iwl1000_bg_cfg = {
.use_bsm = false,
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
.shadow_ram_support = false,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 128,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
new file mode 100644
index 0000000..6a9c64a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
@@ -0,0 +1,500 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-3945-debugfs.h"
+
+ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
+ sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
+ ssize_t ret;
+ struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct iwl39_statistics_rx_non_phy *general, *accum_general;
+ struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &priv->_3945.statistics.rx.ofdm;
+ cck = &priv->_3945.statistics.rx.cck;
+ general = &priv->_3945.statistics.rx.general;
+ accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
+ accum_cck = &priv->_3945.accum_statistics.rx.cck;
+ accum_general = &priv->_3945.accum_statistics.rx.general;
+ delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
+ delta_cck = &priv->_3945.delta_statistics.rx.cck;
+ delta_general = &priv->_3945.delta_statistics.rx.general;
+ max_ofdm = &priv->_3945.max_delta.rx.ofdm;
+ max_cck = &priv->_3945.max_delta.rx.cck;
+ max_general = &priv->_3945.max_delta.rx.general;
+
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err),
+ accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+ max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:", le32_to_cpu(ofdm->crc32_good),
+ accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+ max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout),
+ accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout),
+ accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt),
+ accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt),
+ accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "overrun_err:",
+ le32_to_cpu(cck->overrun_err),
+ accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good,
+ max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout),
+ accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout),
+ accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts,
+ delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt),
+ accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt),
+ accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bogus_cts:",
+ le32_to_cpu(general->bogus_cts),
+ accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bogus_ack:",
+ le32_to_cpu(general->bogus_ack),
+ accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
+ ssize_t ret;
+ struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &priv->_3945.statistics.tx;
+ accum_tx = &priv->_3945.accum_statistics.tx;
+ delta_tx = &priv->_3945.delta_statistics.tx;
+ max_tx = &priv->_3945.max_delta.tx;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "preamble:",
+ le32_to_cpu(tx->preamble_cnt),
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt),
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout),
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt),
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
+ ssize_t ret;
+ struct iwl39_statistics_general *general, *accum_general;
+ struct iwl39_statistics_general *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &priv->_3945.statistics.general;
+ dbg = &priv->_3945.statistics.general.dbg;
+ div = &priv->_3945.statistics.general.div;
+ accum_general = &priv->_3945.accum_statistics.general;
+ delta_general = &priv->_3945.delta_statistics.general;
+ max_general = &priv->_3945.max_delta.general;
+ accum_dbg = &priv->_3945.accum_statistics.general.dbg;
+ delta_dbg = &priv->_3945.delta_statistics.general.dbg;
+ max_dbg = &priv->_3945.max_delta.general.dbg;
+ accum_div = &priv->_3945.accum_statistics.general.div;
+ delta_div = &priv->_3945.delta_statistics.general.div;
+ max_div = &priv->_3945.max_delta.general.div;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_check:",
+ le32_to_cpu(dbg->burst_check),
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_count:",
+ le32_to_cpu(dbg->burst_count),
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_out:",
+ le32_to_cpu(general->slots_out),
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
+ pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+ le32_to_cpu(general->ttl_timestamp));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
new file mode 100644
index 0000000..70809c5
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#else
+static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 3a876a8..91bcb4e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -71,13 +71,11 @@
#include "iwl-eeprom.h"
-/* Time constants */
-#define SHORT_SLOT_TIME 9
-#define LONG_SLOT_TIME 20
-
/* RSSI to dBm */
#define IWL39_RSSI_OFFSET 95
+#define IWL_DEFAULT_TX_POWER 0x0F
+
/*
* EEPROM related constants, enums, and structures.
*/
@@ -228,7 +226,6 @@ struct iwl3945_eeprom {
/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
#define IWL39_NUM_QUEUES 5
-#define IWL_NUM_SCAN_RATES (2)
#define IWL_DEFAULT_TX_RETRY 15
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 902c4d4..8e84a08 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -330,16 +330,25 @@ static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
}
-static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
{
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_conf *conf = &priv->hw->conf;
+ struct iwl3945_sta_priv *psta;
+ struct iwl3945_rs_sta *rs_sta;
+ struct ieee80211_supported_band *sband;
int i;
- IWL_DEBUG_RATE(priv, "enter\n");
+ IWL_DEBUG_INFO(priv, "enter\n");
+ if (sta_id == priv->hw_params.bcast_sta_id)
+ goto out;
- spin_lock_init(&rs_sta->lock);
+ psta = (struct iwl3945_sta_priv *) sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
rs_sta->priv = priv;
@@ -352,9 +361,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
rs_sta->last_flush = jiffies;
rs_sta->flush_time = IWL_RATE_FLUSH;
rs_sta->last_tx_packets = 0;
- rs_sta->ibss_sta_added = 0;
- init_timer(&rs_sta->rate_scale_flush);
rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
@@ -373,16 +380,18 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
}
}
- priv->sta_supp_rates = sta->supp_rates[sband->band];
+ priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
/* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
if (sband->band == IEEE80211_BAND_5GHZ) {
rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- priv->sta_supp_rates = priv->sta_supp_rates <<
+ priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
IWL_FIRST_OFDM_RATE;
}
+out:
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- IWL_DEBUG_RATE(priv, "leave\n");
+ IWL_DEBUG_INFO(priv, "leave\n");
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -406,6 +415,9 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
rs_sta = &psta->rs_sta;
+ spin_lock_init(&rs_sta->lock);
+ init_timer(&rs_sta->rate_scale_flush);
+
IWL_DEBUG_RATE(priv, "leave\n");
return rs_sta;
@@ -414,13 +426,14 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
void *priv_sta)
{
- struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
- struct iwl3945_rs_sta *rs_sta = &psta->rs_sta;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
+ struct iwl3945_rs_sta *rs_sta = priv_sta;
- IWL_DEBUG_RATE(priv, "enter\n");
+ /*
+ * Be careful not to use any members of iwl3945_rs_sta (like trying
+ * to use iwl_priv to print out debugging) since it may not be fully
+ * initialized at this point.
+ */
del_timer_sync(&rs_sta->rate_scale_flush);
- IWL_DEBUG_RATE(priv, "leave\n");
}
@@ -459,6 +472,13 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
return;
}
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!rs_sta->priv) {
+ IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
+ return;
+ }
+
+
rs_sta->tx_packets++;
scale_rate_index = first_index;
@@ -525,8 +545,6 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
spin_unlock_irqrestore(&rs_sta->lock, flags);
IWL_DEBUG_RATE(priv, "leave\n");
-
- return;
}
static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
@@ -626,14 +644,19 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
u32 fail_count;
s8 scale_action = 0;
unsigned long flags;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0;
s8 max_rate_idx = -1;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
IWL_DEBUG_RATE(priv, "enter\n");
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (rs_sta && !rs_sta->priv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
+ priv_sta = NULL;
+ }
+
if (rate_control_send_low(sta, priv_sta, txrc))
return;
@@ -651,20 +674,6 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
if (sband->band == IEEE80211_BAND_5GHZ)
rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !rs_sta->ibss_sta_added) {
- u8 sta_id = iwl_find_station(priv, hdr->addr1);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
- hdr->addr1);
- sta_id = iwl_add_station(priv, hdr->addr1, false,
- CMD_ASYNC, NULL);
- }
- if (sta_id != IWL_INVALID_STATION)
- rs_sta->ibss_sta_added = 1;
- }
-
spin_lock_irqsave(&rs_sta->lock, flags);
/* for recent assoc, choose best rate regarding
@@ -884,12 +893,22 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
}
#endif
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
- .rate_init = rs_rate_init,
+ .rate_init = rs_rate_init_stub,
.alloc = rs_alloc,
.free = rs_free,
.alloc_sta = rs_alloc_sta,
@@ -900,7 +919,6 @@ static struct rate_control_ops rs_ops = {
#endif
};
-
void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
{
struct iwl_priv *priv = hw->priv;
@@ -917,6 +935,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
sta = ieee80211_find_sta(priv->vif,
priv->stations[sta_id].sta.sta.addr);
if (!sta) {
+ IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
rcu_read_unlock();
return;
}
@@ -947,7 +966,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
spin_unlock_irqrestore(&rs_sta->lock, flags);
- rssi = priv->last_rx_rssi;
+ rssi = priv->_3945.last_rx_rssi;
if (rssi == 0)
rssi = IWL_MIN_RSSI_VAL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 0728054..068f7f8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -50,6 +50,7 @@
#include "iwl-helpers.h"
#include "iwl-led.h"
#include "iwl-3945-led.h"
+#include "iwl-3945-debugfs.h"
#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -192,12 +193,12 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
}
#ifdef CONFIG_IWLWIFI_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
static const char *iwl3945_get_tx_fail_reason(u32 status)
{
switch (status & TX_STATUS_MSK) {
- case TX_STATUS_SUCCESS:
+ case TX_3945_STATUS_SUCCESS:
return "SUCCESS";
TX_STATUS_ENTRY(SHORT_LIMIT);
TX_STATUS_ENTRY(LONG_LIMIT);
@@ -243,7 +244,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
next_rate = IWL_RATE_6M_INDEX;
break;
case IEEE80211_BAND_2GHZ:
- if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) &&
+ if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
iwl_is_associated(priv)) {
if (rate == IWL_RATE_11M_INDEX)
next_rate = IWL_RATE_5M_INDEX;
@@ -293,7 +294,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
* iwl3945_rx_reply_tx - Handle Tx response
*/
static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+ struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -351,18 +352,143 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
* RX handler implementations
*
*****************************************************************************/
+#ifdef CONFIG_IWLWIFI_DEBUG
+/*
+ * based on the assumption of all statistics counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
+ __le32 *stats)
+{
+ int i;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+
+ prev_stats = (__le32 *)&priv->_3945.statistics;
+ accum_stats = (u32 *)&priv->_3945.accum_statistics;
+ delta = (u32 *)&priv->_3945.delta_statistics;
+ max_delta = (u32 *)&priv->_3945.max_delta;
+
+ for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
+ i += sizeof(__le32), stats++, prev_stats++, delta++,
+ max_delta++, accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta = (le32_to_cpu(*stats) -
+ le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative statistics for "no-counter" type statistics */
+ priv->_3945.accum_statistics.general.temperature =
+ priv->_3945.statistics.general.temperature;
+ priv->_3945.accum_statistics.general.ttl_timestamp =
+ priv->_3945.statistics.general.ttl_timestamp;
+}
+#endif
+
+/**
+ * iwl3945_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ bool rc = true;
+ struct iwl3945_notif_statistics current_stat;
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
+
+ memcpy(&current_stat, pkt->u.raw, sizeof(struct
+ iwl3945_notif_statistics));
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+ * the plcp error threshold plcp_delta.
+ */
+ plcp_received_jiffies = jiffies;
+ plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+ (long) priv->plcp_jiffies);
+ priv->plcp_jiffies = plcp_received_jiffies;
+ /*
+ * check to make sure plcp_msec is not 0 to prevent division
+ * by zero.
+ */
+ if (plcp_msec) {
+ combined_plcp_delta =
+ (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
+ le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
+
+ if ((combined_plcp_delta > 0) &&
+ ((combined_plcp_delta * 100) / plcp_msec) >
+ priv->cfg->plcp_delta_threshold) {
+ /*
+ * if plcp_err exceed the threshold, the following
+ * data is printed in csv format:
+ * Text: plcp_err exceeded %d,
+ * Received ofdm.plcp_err,
+ * Current ofdm.plcp_err,
+ * combined_plcp_delta,
+ * plcp_msec
+ */
+ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+ "%u, %d, %u mSecs\n",
+ priv->cfg->plcp_delta_threshold,
+ le32_to_cpu(current_stat.rx.ofdm.plcp_err),
+ combined_plcp_delta, plcp_msec);
+ /*
+ * Reset the RF radio due to the high plcp
+ * error rate
+ */
+ rc = false;
+ }
+ }
+ return rc;
+}
void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(struct iwl3945_notif_statistics),
le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLWIFI_DEBUG
+ iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
+#endif
+ iwl_recover_from_statistics(priv, pkt);
+
+ memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
+}
+
+void iwl3945_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ __le32 *flag = (__le32 *)&pkt->u.raw;
- memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39));
+ if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUG
+ memset(&priv->_3945.accum_statistics, 0,
+ sizeof(struct iwl3945_notif_statistics));
+ memset(&priv->_3945.delta_statistics, 0,
+ sizeof(struct iwl3945_notif_statistics));
+ memset(&priv->_3945.max_delta, 0,
+ sizeof(struct iwl3945_notif_statistics));
+#endif
+ IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+ }
+ iwl3945_hw_rx_statistics(priv, rxb);
}
+
/******************************************************************************
*
* Misc. internal state and helper functions
@@ -487,7 +613,7 @@ static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
* but you can hack it to show more, if you'd like to. */
if (dataframe)
IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
- "len=%u, rssi=%d, chnl=%d, rate=%d, \n",
+ "len=%u, rssi=%d, chnl=%d, rate=%d,\n",
title, le16_to_cpu(fc), header->addr1[5],
length, rssi, channel, rate);
else {
@@ -549,7 +675,6 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
u16 len = le16_to_cpu(rx_hdr->len);
struct sk_buff *skb;
- int ret;
__le16 fc = hdr->frame_control;
/* We received data from the HW, so stop the watchdog */
@@ -566,9 +691,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
return;
}
- skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
+ skb = dev_alloc_skb(128);
if (!skb) {
- IWL_ERR(priv, "alloc_skb failed\n");
+ IWL_ERR(priv, "dev_alloc_skb failed\n");
return;
}
@@ -577,37 +702,13 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
(struct ieee80211_hdr *)rxb_addr(rxb),
le32_to_cpu(rx_end->status), stats);
- skb_reserve(skb, IWL_LINK_HDR_MAX);
skb_add_rx_frag(skb, 0, rxb->page,
(void *)rx_hdr->payload - (void *)pkt, len);
- /* mac80211 currently doesn't support paged SKB. Convert it to
- * linear SKB for management frame and data frame requires
- * software decryption or software defragementation. */
- if (ieee80211_is_mgmt(fc) ||
- ieee80211_has_protected(fc) ||
- ieee80211_has_morefrags(fc) ||
- le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
- ret = skb_linearize(skb);
- else
- ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
- 0 : -ENOMEM;
-
- if (ret) {
- kfree_skb(skb);
- goto out;
- }
-
- /*
- * XXX: We cannot touch the page and its virtual memory (pkt) after
- * here. It might have already been freed by the above skb change.
- */
-
iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
- out:
priv->alloc_rxb_page--;
rxb->page = NULL;
}
@@ -623,9 +724,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- int snr;
- u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg);
- u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
+ u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+ u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
u8 network_packet;
rx_status.flag = 0;
@@ -663,53 +763,29 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
/* Convert 3945's rssi indicator to dBm */
rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
- /* Set default noise value to -127 */
- if (priv->last_rx_noise == 0)
- priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
- /* 3945 provides noise info for OFDM frames only.
- * sig_avg and noise_diff are measured by the 3945's digital signal
- * processor (DSP), and indicate linear levels of signal level and
- * distortion/noise within the packet preamble after
- * automatic gain control (AGC). sig_avg should stay fairly
- * constant if the radio's AGC is working well.
- * Since these values are linear (not dB or dBm), linear
- * signal-to-noise ratio (SNR) is (sig_avg / noise_diff).
- * Convert linear SNR to dB SNR, then subtract that from rssi dBm
- * to obtain noise level in dBm.
- * Calculate rx_status.signal (quality indicator in %) based on SNR. */
- if (rx_stats_noise_diff) {
- snr = rx_stats_sig_avg / rx_stats_noise_diff;
- rx_status.noise = rx_status.signal -
- iwl3945_calc_db_from_ratio(snr);
- } else {
- rx_status.noise = priv->last_rx_noise;
- }
-
-
- IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
- rx_status.signal, rx_status.noise,
- rx_stats_sig_avg, rx_stats_noise_diff);
+ IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
+ rx_status.signal, rx_stats_sig_avg,
+ rx_stats_noise_diff);
header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
network_packet = iwl3945_is_network_packet(priv, header);
- IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n",
+ IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
network_packet ? '*' : ' ',
le16_to_cpu(rx_hdr->channel),
rx_status.signal, rx_status.signal,
- rx_status.noise, rx_status.rate_idx);
+ rx_status.rate_idx);
/* Set "1" to report good data frames in groups of 100 */
iwl3945_dbg_report_frame(priv, pkt, header, 1);
iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
if (network_packet) {
- priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp);
- priv->last_tsf = le64_to_cpu(rx_end->timestamp);
- priv->last_rx_rssi = rx_status.signal;
- priv->last_rx_noise = rx_status.noise;
+ priv->_3945.last_beacon_time =
+ le32_to_cpu(rx_end->beacon_timestamp);
+ priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+ priv->_3945.last_rx_rssi = rx_status.signal;
}
iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
@@ -871,7 +947,8 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
}
-u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
+static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
+ u16 tx_rate, u8 flags)
{
unsigned long flags_spin;
struct iwl_station_entry *station;
@@ -957,7 +1034,7 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
- priv->shared_phys);
+ priv->_3945.shared_phys);
iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
@@ -1049,7 +1126,7 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
- IWL_DEBUG_INFO(priv, "RTP type \n");
+ IWL_DEBUG_INFO(priv, "RTP type\n");
else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
@@ -1607,7 +1684,7 @@ static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
int power;
/* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
+ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
/* Get this channel's rate-to-current-power settings table */
power_info = ch_info->power_info;
@@ -1701,6 +1778,11 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
int ref_temp;
int temperature = priv->temperature;
+ if (priv->disable_tx_power_cal ||
+ test_bit(STATUS_SCANNING, &priv->status)) {
+ /* do not perform tx power calibration */
+ return 0;
+ }
/* set up new Tx power info for each and every channel, 2.4 and 5.x */
for (i = 0; i < priv->channel_count; i++) {
ch_info = &priv->channel_info[i];
@@ -1733,7 +1815,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
}
/* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
+ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
/* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
for (scan_tbl_index = 0;
@@ -1911,6 +1993,8 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
"configuration (%d).\n", rc);
return rc;
}
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
}
IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1941,7 +2025,10 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
- iwl_clear_stations_table(priv);
+ if (!new_assoc) {
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
+ }
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
@@ -1951,19 +2038,6 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
return rc;
}
- /* Add the broadcast address so we can send broadcast frames */
- priv->cfg->ops->lib->add_bcast_station(priv);
-
- /* If we have set the ASSOC_MSK and we are in BSS mode then
- * add the IWL_AP_ID to the station rate table */
- if (iwl_is_associated(priv) &&
- (priv->iw_mode == NL80211_IFTYPE_STATION))
- if (iwl_add_station(priv, priv->active_rxon.bssid_addr,
- true, CMD_SYNC, NULL) == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Error adding AP address for transmit\n");
- return -EIO;
- }
-
/* Init the hardware's rate fallback order based on the band */
rc = iwl3945_init_hw_rate_table(priv);
if (rc) {
@@ -1998,13 +2072,13 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
reschedule:
queue_delayed_work(priv->workqueue,
- &priv->thermal_periodic, REG_RECALIB_PERIOD * HZ);
+ &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
}
static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv,
- thermal_periodic.work);
+ _3945.thermal_periodic.work);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -2140,7 +2214,7 @@ static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
* power peaks, without too much distortion (clipping).
*/
/* we'll fill in this array with h/w max power levels */
- clip_pwrs = (s8 *) priv->clip39_groups[i].clip_powers;
+ clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
/* divide factory saturation power by 2 to find -3dB level */
satur_pwr = (s8) (group->saturation_power >> 1);
@@ -2224,7 +2298,7 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
/* Get this chnlgrp's rate->max/clip-powers table */
- clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
+ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
/* calculate power index *adjustment* value according to
* diff between current temperature and factory temperature */
@@ -2332,7 +2406,7 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
int txq_id = txq->q.id;
- struct iwl3945_shared *shared_data = priv->shared_virt;
+ struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
@@ -2385,6 +2459,30 @@ static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
return (u16)sizeof(struct iwl3945_addsta_cmd);
}
+static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int ret;
+
+ if (add) {
+ ret = iwl_add_bssid_station(priv, vif->bss_conf.bssid, false,
+ &vif_priv->ibss_bssid_sta_id);
+ if (ret)
+ return ret;
+
+ iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
+ (priv->band == IEEE80211_BAND_5GHZ) ?
+ IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
+ CMD_ASYNC);
+ iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
+
+ return 0;
+ }
+
+ return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
/**
* iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
@@ -2432,7 +2530,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
/* If an OFDM rate is used, have it fall back to the
* 1M CCK rates */
- if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) &&
+ if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
iwl_is_associated(priv)) {
index = IWL_FIRST_CCK_RATE;
@@ -2471,12 +2569,12 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
memset((void *)&priv->hw_params, 0,
sizeof(struct iwl_hw_params));
- priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- &priv->shared_phys, GFP_KERNEL);
- if (!priv->shared_virt) {
+ priv->_3945.shared_virt =
+ dma_alloc_coherent(&priv->pci_dev->dev,
+ sizeof(struct iwl3945_shared),
+ &priv->_3945.shared_phys, GFP_KERNEL);
+ if (!priv->_3945.shared_virt) {
IWL_ERR(priv, "failed to allocate pci memory\n");
- mutex_unlock(&priv->mutex);
return -ENOMEM;
}
@@ -2537,13 +2635,13 @@ void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
{
- INIT_DELAYED_WORK(&priv->thermal_periodic,
+ INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
iwl3945_bg_reg_txpower_periodic);
}
void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
{
- cancel_delayed_work(&priv->thermal_periodic);
+ cancel_delayed_work(&priv->_3945.thermal_periodic);
}
/* check contents of special bootstrap uCode SRAM */
@@ -2714,48 +2812,10 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
return 0;
}
-#define IWL3945_UCODE_GET(item) \
-static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode,\
- u32 api_ver) \
-{ \
- return le32_to_cpu(ucode->u.v1.item); \
-}
-
-static u32 iwl3945_ucode_get_header_size(u32 api_ver)
-{
- return UCODE_HEADER_SIZE(1);
-}
-static u32 iwl3945_ucode_get_build(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return 0;
-}
-static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return (u8 *) ucode->u.v1.data;
-}
-
-IWL3945_UCODE_GET(inst_size);
-IWL3945_UCODE_GET(data_size);
-IWL3945_UCODE_GET(init_size);
-IWL3945_UCODE_GET(init_data_size);
-IWL3945_UCODE_GET(boot_size);
-
static struct iwl_hcmd_ops iwl3945_hcmd = {
.rxon_assoc = iwl3945_send_rxon_assoc,
.commit_rxon = iwl3945_commit_rxon,
-};
-
-static struct iwl_ucode_ops iwl3945_ucode = {
- .get_header_size = iwl3945_ucode_get_header_size,
- .get_build = iwl3945_ucode_get_build,
- .get_inst_size = iwl3945_ucode_get_inst_size,
- .get_data_size = iwl3945_ucode_get_data_size,
- .get_init_size = iwl3945_ucode_get_init_size,
- .get_init_data_size = iwl3945_ucode_get_init_data_size,
- .get_boot_size = iwl3945_ucode_get_boot_size,
- .get_data = iwl3945_ucode_get_data,
+ .send_bt_config = iwl_send_bt_config,
};
static struct iwl_lib_ops iwl3945_lib = {
@@ -2791,17 +2851,24 @@ static struct iwl_lib_ops iwl3945_lib = {
.post_associate = iwl3945_post_associate,
.isr = iwl_isr_legacy,
.config_ap = iwl3945_config_ap,
- .add_bcast_station = iwl3945_add_bcast_station,
+ .manage_ibss_station = iwl3945_manage_ibss_station,
+ .check_plcp_health = iwl3945_good_plcp_health,
+
+ .debugfs_ops = {
+ .rx_stats_read = iwl3945_ucode_rx_stats_read,
+ .tx_stats_read = iwl3945_ucode_tx_stats_read,
+ .general_stats_read = iwl3945_ucode_general_stats_read,
+ },
};
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.get_hcmd_size = iwl3945_get_hcmd_size,
.build_addsta_hcmd = iwl3945_build_addsta_hcmd,
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
+ .request_scan = iwl3945_request_scan,
};
static const struct iwl_ops iwl3945_ops = {
- .ucode = &iwl3945_ucode,
.lib = &iwl3945_lib,
.hcmd = &iwl3945_hcmd,
.utils = &iwl3945_hcmd_utils,
@@ -2826,7 +2893,10 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .tx_power_by_driver = true,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2844,7 +2914,10 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .tx_power_by_driver = true,
};
DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 452dfd5..bb2aeeb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -95,7 +95,6 @@ struct iwl3945_rs_sta {
u8 tgg;
u8 flush_pending;
u8 start_rate;
- u8 ibss_sta_added;
struct timer_list rate_scale_flush;
struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -107,7 +106,12 @@ struct iwl3945_rs_sta {
};
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and agn!
+ */
struct iwl3945_sta_priv {
+ struct iwl_station_priv_common common;
struct iwl3945_rs_sta rs_sta;
};
@@ -212,13 +216,6 @@ extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
char **buf, bool display);
extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
-/*
- * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
- * call this... todo... fix that.
-*/
-extern u8 iwl3945_sync_station(struct iwl_priv *priv, int sta_id,
- u16 tx_rate, u8 flags);
-
/******************************************************************************
*
* Functions implemented in iwl-[34]*.c which are forward declared here
@@ -265,10 +262,14 @@ extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+void iwl3945_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
extern void iwl3945_disable_events(struct iwl_priv *priv);
extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv);
-extern void iwl3945_config_ap(struct iwl_priv *priv);
+extern void iwl3945_post_associate(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
+extern void iwl3945_config_ap(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
/**
* iwl3945_hw_find_station - Find station id for a given BSSID
@@ -287,14 +288,15 @@ extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
-extern u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
- u16 tx_rate, u8 flags);
extern const struct iwl_channel_info *iwl3945_get_channel_info(
const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
+/* scanning */
+void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+
/* Requires full declaration of iwl_priv before including */
#include "iwl-io.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index 67ef562..cd4b61a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -81,26 +81,6 @@
*/
#define IWL49_FIRST_AMPDU_QUEUE 7
-/* Time constants */
-#define SHORT_SLOT_TIME 9
-#define LONG_SLOT_TIME 20
-
-/* RSSI to dBm */
-#define IWL49_RSSI_OFFSET 44
-
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT 0x041
-
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
-#define IWL_NUM_SCAN_RATES (2)
-
-#define IWL_DEFAULT_TX_RETRY 15
-
-
/* Sizes and addresses for instruction and data memory (SRAM) in
* 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
@@ -393,10 +373,6 @@ static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
* location(s) in command (struct iwl4965_txpowertable_cmd).
*/
-/* Limit range of txpower output target to be between these values */
-#define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */
-#define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
-
/**
* When MIMO is used (2 transmitters operating simultaneously), driver should
* limit each transmitter to deliver a max of 3 dB below the regulatory limit
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 8972166..d3afdda 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -46,6 +46,8 @@
#include "iwl-calib.h"
#include "iwl-sta.h"
#include "iwl-agn-led.h"
+#include "iwl-agn.h"
+#include "iwl-agn-debugfs.h"
static int iwl4965_send_tx_power(struct iwl_priv *priv);
static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -60,14 +62,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
-
-/* module parameters */
-static struct iwl_mod_params iwl4965_mod_params = {
- .amsdu_size_8K = 1,
- .restart_fw = 1,
- /* the rest are 0 by default */
-};
-
/* check contents of special bootstrap uCode SRAM */
static int iwl4965_verify_bsm(struct iwl_priv *priv)
{
@@ -417,7 +411,7 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
sizeof(cmd), &cmd);
if (ret)
IWL_DEBUG_CALIB(priv, "fail sending cmd "
- "REPLY_PHY_CALIBRATION_CMD \n");
+ "REPLY_PHY_CALIBRATION_CMD\n");
/* TODO we might want recalculate
* rx_chain in rxon cmd */
@@ -502,14 +496,14 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
}
-static const u16 default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC0,
+static const s8 default_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
IWL49_CMD_FIFO_NUM,
- IWL_TX_FIFO_HCCA_1,
- IWL_TX_FIFO_HCCA_2
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
};
static int iwl4965_alive_notify(struct iwl_priv *priv)
@@ -589,9 +583,15 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
/* reset to 0 to enable all the queue first */
priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
+ BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
+
iwl_txq_ctx_activate(priv, i);
+
+ if (ac == IWL_TX_FIFO_UNUSED)
+ continue;
+
iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
}
@@ -1613,19 +1613,19 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
/* get absolute value */
if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d, \n", temp_diff);
+ IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
temp_diff = -temp_diff;
} else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Same temp, \n");
+ IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d, \n", temp_diff);
+ IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
- IWL_DEBUG_POWER(priv, "Thermal txpower calib not needed\n");
+ IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
return 0;
}
- IWL_DEBUG_POWER(priv, "Thermal txpower calib needed\n");
+ IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
return 1;
}
@@ -1874,7 +1874,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
+ iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
/* FIXME: code repetition end */
IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
@@ -1953,6 +1953,60 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
return 0;
}
+static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
+{
+ int i;
+ int start = 0;
+ int ret = IWL_INVALID_STATION;
+ unsigned long flags;
+
+ if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
+ (priv->iw_mode == NL80211_IFTYPE_AP))
+ start = IWL_STA_ID;
+
+ if (is_broadcast_ether_addr(addr))
+ return priv->hw_params.bcast_sta_id;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ for (i = start; i < priv->hw_params.max_stations; i++)
+ if (priv->stations[i].used &&
+ (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+ addr))) {
+ ret = i;
+ goto out;
+ }
+
+ IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
+ addr, priv->num_stations);
+
+ out:
+ /*
+ * It may be possible that more commands interacting with stations
+ * arrive before we completed processing the adding of
+ * station
+ */
+ if (ret != IWL_INVALID_STATION &&
+ (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
+ ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
+ (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
+ IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
+ ret);
+ ret = IWL_INVALID_STATION;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return ret;
+}
+
+static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
+{
+ if (priv->iw_mode == NL80211_IFTYPE_STATION) {
+ return IWL_AP_ID;
+ } else {
+ u8 *da = ieee80211_get_DA(hdr);
+ return iwl_find_station(priv, da);
+ }
+}
+
/**
* iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
*/
@@ -2014,7 +2068,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
if (qc)
iwl_free_tfds_in_queue(priv, sta_id,
tid, freed);
@@ -2031,7 +2085,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
} else {
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv,
+ iwlagn_hwrate_to_tx_control(priv,
le32_to_cpu(tx_resp->rate_n_flags),
info);
@@ -2042,7 +2096,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
le32_to_cpu(tx_resp->rate_n_flags),
tx_resp->failure_frame);
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
if (qc && likely(sta_id != IWL_INVALID_STATION))
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
else if (sta_id == IWL_INVALID_STATION)
@@ -2053,10 +2107,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
iwl_wake_queue(priv, txq_id);
}
if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+ iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
- if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
- IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
+ iwl_check_abort_status(priv, tx_resp->frame_count, status);
}
static int iwl4965_calc_rssi(struct iwl_priv *priv,
@@ -2090,7 +2143,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
/* dBm = max_rssi dB - agc dB - constant.
* Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL49_RSSI_OFFSET;
+ return max_rssi - agc - IWLAGN_RSSI_OFFSET;
}
@@ -2098,7 +2151,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
{
/* Legacy Rx frames */
- priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx;
+ priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
/* Tx response */
priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
}
@@ -2113,50 +2166,13 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->txpower_work);
}
-#define IWL4965_UCODE_GET(item) \
-static u32 iwl4965_ucode_get_##item(const struct iwl_ucode_header *ucode,\
- u32 api_ver) \
-{ \
- return le32_to_cpu(ucode->u.v1.item); \
-}
-
-static u32 iwl4965_ucode_get_header_size(u32 api_ver)
-{
- return UCODE_HEADER_SIZE(1);
-}
-static u32 iwl4965_ucode_get_build(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return 0;
-}
-static u8 *iwl4965_ucode_get_data(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return (u8 *) ucode->u.v1.data;
-}
-
-IWL4965_UCODE_GET(inst_size);
-IWL4965_UCODE_GET(data_size);
-IWL4965_UCODE_GET(init_size);
-IWL4965_UCODE_GET(init_data_size);
-IWL4965_UCODE_GET(boot_size);
-
static struct iwl_hcmd_ops iwl4965_hcmd = {
.rxon_assoc = iwl4965_send_rxon_assoc,
.commit_rxon = iwl_commit_rxon,
.set_rxon_chain = iwl_set_rxon_chain,
+ .send_bt_config = iwl_send_bt_config,
};
-static struct iwl_ucode_ops iwl4965_ucode = {
- .get_header_size = iwl4965_ucode_get_header_size,
- .get_build = iwl4965_ucode_get_build,
- .get_inst_size = iwl4965_ucode_get_inst_size,
- .get_data_size = iwl4965_ucode_get_data_size,
- .get_init_size = iwl4965_ucode_get_init_size,
- .get_init_data_size = iwl4965_ucode_get_init_data_size,
- .get_boot_size = iwl4965_ucode_get_boot_size,
- .get_data = iwl4965_ucode_get_data,
-};
static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.get_hcmd_size = iwl4965_get_hcmd_size,
.build_addsta_hcmd = iwl4965_build_addsta_hcmd,
@@ -2164,6 +2180,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.gain_computation = iwl4965_gain_computation,
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
.calc_rssi = iwl4965_calc_rssi,
+ .request_scan = iwlagn_request_scan,
};
static struct iwl_lib_ops iwl4965_lib = {
@@ -2184,6 +2201,7 @@ static struct iwl_lib_ops iwl4965_lib = {
.load_ucode = iwl4965_load_bsm,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_fh = iwl_dump_fh,
.set_channel_switch = iwl4965_hw_channel_switch,
.apm_ops = {
.init = iwl_apm_init,
@@ -2216,11 +2234,16 @@ static struct iwl_lib_ops iwl4965_lib = {
.temperature = iwl4965_temperature_calib,
.set_ct_kill = iwl4965_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .check_plcp_health = iwl_good_plcp_health,
};
static const struct iwl_ops iwl4965_ops = {
- .ucode = &iwl4965_ucode,
.lib = &iwl4965_lib,
.hcmd = &iwl4965_hcmd,
.utils = &iwl4965_hcmd_utils,
@@ -2228,7 +2251,7 @@ static const struct iwl_ops iwl4965_ops = {
};
struct iwl_cfg iwl4965_agn_cfg = {
- .name = "4965AGN",
+ .name = "Intel(R) Wireless WiFi Link 4965AGN",
.fw_name_pre = IWL4965_FW_PRE,
.ucode_api_max = IWL4965_UCODE_API_MAX,
.ucode_api_min = IWL4965_UCODE_API_MIN,
@@ -2239,7 +2262,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.ops = &iwl4965_ops,
.num_of_queues = IWL49_NUM_QUEUES,
.num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
- .mod_params = &iwl4965_mod_params,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = 0,
@@ -2251,27 +2274,20 @@ struct iwl_cfg iwl4965_agn_cfg = {
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .temperature_kelvin = true,
+ .max_event_log_size = 512,
+ .tx_power_by_driver = true,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ /*
+ * Force use of chains B and C for scan RX on 5 GHz band
+ * because the device has off-channel reception on chain A.
+ */
+ .scan_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
};
/* Module firmware */
MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
-module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO);
-MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(
- disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO);
-MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
-
-module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
-/* 11n */
-module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
-module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
- int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
-
-module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 714e032..146e643 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -68,25 +68,6 @@
#ifndef __iwl_5000_hw_h__
#define __iwl_5000_hw_h__
-#define IWL50_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL50_RTC_INST_UPPER_BOUND (0x020000)
-
-#define IWL50_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000)
-
-#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - \
- IWL50_RTC_INST_LOWER_BOUND)
-#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - \
- IWL50_RTC_DATA_LOWER_BOUND)
-
-/* EEPROM */
-#define IWL_5000_EEPROM_IMG_SIZE 2048
-
-#define IWL50_CMD_FIFO_NUM 7
-#define IWL50_NUM_QUEUES 20
-#define IWL50_NUM_AMPDU_QUEUES 10
-#define IWL50_FIRST_AMPDU_QUEUE 10
-
/* 5150 only */
#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
@@ -103,19 +84,5 @@ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
}
-/* Fixed (non-configurable) rx data from phy */
-
-/**
- * struct iwl5000_schedq_bc_tbl scheduler byte count table
- * base physical address of iwl5000_shared
- * is provided to SCD_DRAM_BASE_ADDR
- * @tfd_offset 0-12 - tx command byte count
- * 12-16 - station index
- */
-struct iwl5000_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-} __attribute__ ((packed));
-
-
#endif /* __iwl_5000_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e476acb..a28af7e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -19,6 +19,7 @@
* file called LICENSE.
*
* Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
@@ -43,9 +44,11 @@
#include "iwl-io.h"
#include "iwl-sta.h"
#include "iwl-helpers.h"
+#include "iwl-agn.h"
#include "iwl-agn-led.h"
+#include "iwl-agn-hw.h"
#include "iwl-5000-hw.h"
-#include "iwl-6000-hw.h"
+#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 2
@@ -63,18 +66,8 @@
#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api)
-static const u16 iwl5000_default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC0,
- IWL50_CMD_FIFO_NUM,
- IWL_TX_FIFO_HCCA_1,
- IWL_TX_FIFO_HCCA_2
-};
-
/* NIC configuration for 5000 series */
-void iwl5000_nic_config(struct iwl_priv *priv)
+static void iwl5000_nic_config(struct iwl_priv *priv)
{
unsigned long flags;
u16 radio_cfg;
@@ -107,162 +100,6 @@ void iwl5000_nic_config(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
}
-
-/*
- * EEPROM
- */
-static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
-{
- u16 offset = 0;
-
- if ((address & INDIRECT_ADDRESS) == 0)
- return address;
-
- switch (address & INDIRECT_TYPE_MSK) {
- case INDIRECT_HOST:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
- break;
- case INDIRECT_GENERAL:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
- break;
- case INDIRECT_REGULATORY:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
- break;
- case INDIRECT_CALIBRATION:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
- break;
- case INDIRECT_PROCESS_ADJST:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
- break;
- case INDIRECT_OTHERS:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
- break;
- default:
- IWL_ERR(priv, "illegal indirect type: 0x%X\n",
- address & INDIRECT_TYPE_MSK);
- break;
- }
-
- /* translate the offset from words to byte */
- return (address & ADDRESS_MSK) + (offset << 1);
-}
-
-u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv)
-{
- struct iwl_eeprom_calib_hdr {
- u8 version;
- u8 pa_type;
- u16 voltage;
- } *hdr;
-
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
- EEPROM_5000_CALIB_ALL);
- return hdr->version;
-
-}
-
-static void iwl5000_gain_computation(struct iwl_priv *priv,
- u32 average_noise[NUM_RX_CHAINS],
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
-{
- int i;
- s32 delta_g;
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
-
- /*
- * Find Gain Code for the chains based on "default chain"
- */
- for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
- if ((data->disconn_array[i])) {
- data->delta_gain_code[i] = 0;
- continue;
- }
-
- delta_g = (priv->cfg->chain_noise_scale *
- ((s32)average_noise[default_chain] -
- (s32)average_noise[i])) / 1500;
-
- /* bound gain by 2 bits value max, 3rd bit is sign */
- data->delta_gain_code[i] =
- min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
-
- if (delta_g < 0)
- /*
- * set negative sign ...
- * note to Intel developers: This is uCode API format,
- * not the format of any internal device registers.
- * Do not change this format for e.g. 6050 or similar
- * devices. Change format only if more resolution
- * (i.e. more than 2 bits magnitude) is needed.
- */
- data->delta_gain_code[i] |= (1 << 2);
- }
-
- IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
- data->delta_gain_code[1], data->delta_gain_code[2]);
-
- if (!data->radio_write) {
- struct iwl_calib_chain_noise_gain_cmd cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- cmd.delta_gain_1 = data->delta_gain_code[1];
- cmd.delta_gain_2 = data->delta_gain_code[2];
- iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd, NULL);
-
- data->radio_write = 1;
- data->state = IWL_CHAIN_NOISE_CALIBRATED;
- }
-
- data->chain_noise_a = 0;
- data->chain_noise_b = 0;
- data->chain_noise_c = 0;
- data->chain_signal_a = 0;
- data->chain_signal_b = 0;
- data->chain_signal_c = 0;
- data->beacon_count = 0;
-}
-
-static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
-{
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
- int ret;
-
- if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
- struct iwl_calib_chain_noise_reset_cmd cmd;
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(priv,
- "Could not send REPLY_PHY_CALIBRATION_CMD\n");
- data->state = IWL_CHAIN_NOISE_ACCUMULATE;
- IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
- }
-}
-
-void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
- __le32 *tx_flags)
-{
- if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
- (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
- else
- *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
-}
-
static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.min_nrg_cck = 95,
.max_nrg_cck = 0, /* not used, set to 0 */
@@ -314,14 +151,6 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
.nrg_th_cca = 62,
};
-const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset)
-{
- u32 address = eeprom_indirect_address(priv, offset);
- BUG_ON(address >= priv->cfg->eeprom_size);
- return &priv->eeprom[address];
-}
-
static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
@@ -337,356 +166,10 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
}
-/*
- * Calibration
- */
-static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
-{
- struct iwl_calib_xtal_freq_cmd cmd;
- __le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
-
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
- cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
- (u8 *)&cmd, sizeof(cmd));
-}
-
-static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
-{
- struct iwl_calib_cfg_cmd calib_cfg_cmd;
- struct iwl_host_cmd cmd = {
- .id = CALIBRATION_CFG_CMD,
- .len = sizeof(struct iwl_calib_cfg_cmd),
- .data = &calib_cfg_cmd,
- };
-
- memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
- calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
- calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
- calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
- calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
-
- return iwl_send_cmd(priv, &cmd);
-}
-
-static void iwl5000_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
- int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- int index;
-
- /* reduce the size of the length field itself */
- len -= 4;
-
- /* Define the order in which the results will be sent to the runtime
- * uCode. iwl_send_calib_results sends them in a row according to their
- * index. We sort them here */
- switch (hdr->op_code) {
- case IWL_PHY_CALIBRATE_DC_CMD:
- index = IWL_CALIB_DC;
- break;
- case IWL_PHY_CALIBRATE_LO_CMD:
- index = IWL_CALIB_LO;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_CMD:
- index = IWL_CALIB_TX_IQ;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
- index = IWL_CALIB_TX_IQ_PERD;
- break;
- case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
- index = IWL_CALIB_BASE_BAND;
- break;
- default:
- IWL_ERR(priv, "Unknown calibration notification %d\n",
- hdr->op_code);
- return;
- }
- iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
-}
-
-static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
- queue_work(priv->workqueue, &priv->restart);
-}
-
-/*
- * ucode
- */
-static int iwl5000_load_section(struct iwl_priv *priv, const char *name,
- struct fw_desc *image, u32 dst_addr)
-{
- dma_addr_t phy_addr = image->p_addr;
- u32 byte_cnt = image->len;
- int ret;
-
- priv->ucode_write_complete = 0;
-
- iwl_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
-
- iwl_write_direct32(priv,
- FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
-
- iwl_write_direct32(priv,
- FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
- phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
-
- iwl_write_direct32(priv,
- FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
- (iwl_get_dma_hi_addr(phy_addr)
- << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
-
- iwl_write_direct32(priv,
- FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
- FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
-
- iwl_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
-
- IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
- if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the %s uCode section due "
- "to interrupt\n", name);
- return ret;
- }
- if (!ret) {
- IWL_ERR(priv, "Could not load the %s uCode section\n",
- name);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int iwl5000_load_given_ucode(struct iwl_priv *priv,
- struct fw_desc *inst_image,
- struct fw_desc *data_image)
-{
- int ret = 0;
-
- ret = iwl5000_load_section(priv, "INST", inst_image,
- IWL50_RTC_INST_LOWER_BOUND);
- if (ret)
- return ret;
-
- return iwl5000_load_section(priv, "DATA", data_image,
- IWL50_RTC_DATA_LOWER_BOUND);
-}
-
-int iwl5000_load_ucode(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* check whether init ucode should be loaded, or rather runtime ucode */
- if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
- IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
- ret = iwl5000_load_given_ucode(priv,
- &priv->ucode_init, &priv->ucode_init_data);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
- priv->ucode_type = UCODE_INIT;
- }
- } else {
- IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
- "Loading runtime ucode...\n");
- ret = iwl5000_load_given_ucode(priv,
- &priv->ucode_code, &priv->ucode_data);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
- priv->ucode_type = UCODE_RT;
- }
- }
-
- return ret;
-}
-
-void iwl5000_init_alive_start(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
- /* initialize uCode was loaded... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- iwl_clear_stations_table(priv);
- ret = priv->cfg->ops->lib->alive_notify(priv);
- if (ret) {
- IWL_WARN(priv,
- "Could not complete ALIVE transition: %d\n", ret);
- goto restart;
- }
-
- iwl5000_send_calib_cfg(priv);
- return;
-
-restart:
- /* real restart (first load init_ucode) */
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl5000_set_wr_ptrs(struct iwl_priv *priv,
- int txq_id, u32 index)
-{
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
- (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
- (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
- IWL50_SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-int iwl5000_alive_notify(struct iwl_priv *priv)
-{
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
- for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_write_targ_mem(priv, a, 0);
-
- iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
- IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
- iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
-
- /* initiate the queues */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
- iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- }
-
- iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
- IWL_MASK(0, priv->hw_params.max_txq_num));
-
- /* Activate all Tx DMA/FIFO channels */
- priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
-
- iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
- /* map qos queues to fifos one-to-one */
- for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
- int ac = iwl5000_default_queue_to_tx_fifo[i];
- iwl_txq_ctx_activate(priv, i);
- iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
- }
-
- /*
- * TODO - need to initialize these queues and map them to FIFOs
- * in the loop above, not only mark them as active. We do this
- * because we want the first aggregation queue to be queue #10,
- * but do not use 8 or 9 otherwise yet.
- */
- iwl_txq_ctx_activate(priv, 7);
- iwl_txq_ctx_activate(priv, 8);
- iwl_txq_ctx_activate(priv, 9);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
-
- iwl_send_wimax_coex(priv);
-
- iwl5000_set_Xtal_calib(priv);
- iwl_send_calib_results(priv);
-
- return 0;
-}
-
-int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
+static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@@ -694,13 +177,13 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
- sizeof(struct iwl5000_scd_bc_tbl);
+ sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
+ priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
priv->hw_params.max_bsm_size = 0;
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -717,571 +200,61 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_5150:
- priv->hw_params.sens = &iwl5150_sensitivity;
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_DC) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
-
- break;
- default:
- priv->hw_params.sens = &iwl5000_sensitivity;
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
- break;
- }
-
- return 0;
-}
-
-/**
- * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
-{
- struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int write_ptr = txq->q.write_ptr;
- int txq_id = txq->q.id;
- u8 sec_ctl = 0;
- u8 sta_id = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
-
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != IWL_CMD_QUEUE_NUM) {
- sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
- sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += WEP_IV_LEN + WEP_ICV_LEN;
- break;
- }
- }
-
- bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != IWL_CMD_QUEUE_NUM)
- sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
-}
-
-static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-static void iwl5000_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_write_prph(priv,
- IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
- unsigned long flags;
- u16 ra_tid;
-
- if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
- <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL50_FIRST_AMPDU_QUEUE,
- IWL50_FIRST_AMPDU_QUEUE +
- priv->cfg->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- /* Modify device's station table to Tx this TID */
- iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwl5000_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwl5000_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id));
-
- /* enable aggregations for the queue */
- iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
-
- iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw_params.sens = &iwl5000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_TX_IQ_PERD) |
+ BIT(IWL_CALIB_BASE_BAND);
return 0;
}
-int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
+static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
{
- if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
- <= txq_id)) {
- IWL_ERR(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL50_FIRST_AMPDU_QUEUE,
- IWL50_FIRST_AMPDU_QUEUE +
- priv->cfg->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwl5000_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
- u16 size = (u16)sizeof(struct iwl_addsta_cmd);
- struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
- memcpy(addsta, cmd, size);
- /* resrved in 5000 */
- addsta->rate_n_flags = cpu_to_le16(0);
- return size;
-}
-
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
-}
-
-
-static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
-{
- return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & MAX_SN;
-}
-
-static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl5000_tx_resp *tx_resp,
- int txq_id, u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = &tx_resp->status;
- struct ieee80211_tx_info *info = NULL;
- struct ieee80211_hdr *hdr = NULL;
- u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- int i, sh, idx;
- u16 seq;
-
- if (agg->wait_for_ba)
- IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = rate_n_flags;
- agg->bitmap = 0;
-
- /* # frames attempted by Tx command */
- if (agg->frame_count == 1) {
- /* Only one frame was attempted; no block-ack will arrive */
- status = le16_to_cpu(frame_status[0].status);
- idx = start_idx;
-
- /* FIXME: code repetition */
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
-
- info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
-
- /* FIXME: code repetition end */
-
- IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
- status & 0xff, tx_resp->failure_frame);
- IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
-
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx window */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_INDEX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+ priv->cfg->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
+ priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+ priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
- hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
- if (!hdr) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
+ priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n",
- idx, SEQ_TO_SN(sc),
- hdr->seq_ctrl);
- return -1;
- }
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
- IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
- i, idx, SEQ_TO_SN(sc));
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
- sh = idx - start;
- if (sh > 64) {
- sh = (start - idx) + 0xff;
- bitmap = bitmap << sh;
- sh = 0;
- start = idx;
- } else if (sh < -64)
- sh = 0xff - (start - idx);
- else if (sh < 0) {
- sh = start - idx;
- start = idx;
- bitmap = bitmap << sh;
- sh = 0;
- }
- bitmap |= 1ULL << sh;
- IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
- start, (unsigned long long)bitmap);
- }
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
- agg->bitmap = bitmap;
- agg->start_idx = start;
- IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl5150_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_DC) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
- if (bitmap)
- agg->wait_for_ba = 1;
- }
return 0;
}
-static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_tx_info *info;
- struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le16_to_cpu(tx_resp->status.status);
- int tid;
- int sta_id;
- int freed;
-
- if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
- memset(&info->status, 0, sizeof(info->status));
-
- tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
- sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
-
- if (txq->sched_retry) {
- const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg = NULL;
-
- agg = &priv->stations[sta_id].tid[tid].agg;
-
- iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
- IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
- "scd_ssn=%d idx=%d txq=%d swq=%d\n",
- scd_ssn , index, txq_id, txq->swq_id);
-
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
- if (agg->state == IWL_AGG_OFF)
- iwl_wake_queue(priv, txq_id);
- else
- iwl_wake_queue(priv, txq->swq_id);
- }
- }
- } else {
- BUG_ON(txq_id != txq->swq_id);
-
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv,
- le32_to_cpu(tx_resp->rate_n_flags),
- info);
-
- IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
- "0x%x retries %d\n",
- txq_id,
- iwl_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark))
- iwl_wake_queue(priv, txq_id);
- }
-
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
-
- if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
- IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
-}
-
-/* Currently 5000 is the superset of everything */
-u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
-{
- return len;
-}
-
-void iwl5000_setup_deferred_work(struct iwl_priv *priv)
-{
- /* in 5000 the tx power calibration is done in uCode */
- priv->disable_tx_power_cal = 1;
-}
-
-void iwl5000_rx_handler_setup(struct iwl_priv *priv)
-{
- /* init calibration handlers */
- priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
- iwl5000_rx_calib_result;
- priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
- iwl5000_rx_calib_complete;
- priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx;
-}
-
-
-int iwl5000_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL50_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL50_RTC_DATA_UPPER_BOUND);
-}
-
-static int iwl5000_send_rxon_assoc(struct iwl_priv *priv)
-{
- int ret = 0;
- struct iwl5000_rxon_assoc_cmd rxon_assoc;
- const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
- const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_ht_single_stream_basic_rates ==
- rxon2->ofdm_ht_single_stream_basic_rates) &&
- (rxon1->ofdm_ht_dual_stream_basic_rates ==
- rxon2->ofdm_ht_dual_stream_basic_rates) &&
- (rxon1->ofdm_ht_triple_stream_basic_rates ==
- rxon2->ofdm_ht_triple_stream_basic_rates) &&
- (rxon1->acquisition_data == rxon2->acquisition_data) &&
- (rxon1->rx_chain == rxon2->rx_chain) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = priv->staging_rxon.flags;
- rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
- rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
- rxon_assoc.reserved1 = 0;
- rxon_assoc.reserved2 = 0;
- rxon_assoc.reserved3 = 0;
- rxon_assoc.ofdm_ht_single_stream_basic_rates =
- priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
- rxon_assoc.ofdm_ht_dual_stream_basic_rates =
- priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
- rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
- rxon_assoc.ofdm_ht_triple_stream_basic_rates =
- priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
- rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
-
- ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
- sizeof(rxon_assoc), &rxon_assoc, NULL);
- if (ret)
- return ret;
-
- return ret;
-}
-int iwl5000_send_tx_power(struct iwl_priv *priv)
-{
- struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
- u8 tx_ant_cfg_cmd;
-
- /* half dBm need to multiply */
- tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
-
- if (priv->tx_power_lmt_in_half_dbm &&
- priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
- /*
- * For the newer devices which using enhanced/extend tx power
- * table in EEPROM, the format is in half dBm. driver need to
- * convert to dBm format before report to mac80211.
- * By doing so, there is a possibility of 1/2 dBm resolution
- * lost. driver will perform "round-up" operation before
- * reporting, but it will cause 1/2 dBm tx power over the
- * regulatory limit. Perform the checking here, if the
- * "tx_power_user_lmt" is higher than EEPROM value (in
- * half-dBm format), lower the tx power based on EEPROM
- */
- tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
- }
- tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
- tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
-
- if (IWL_UCODE_API(priv->ucode_ver) == 1)
- tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
- else
- tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
-
- return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
- sizeof(tx_power_cmd), &tx_power_cmd,
- NULL);
-}
-
-void iwl5000_temperature(struct iwl_priv *priv)
-{
- /* store temperature from statistics (in Celsius) */
- priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
- iwl_tt_handler(priv);
-}
-
static void iwl5150_temperature(struct iwl_priv *priv)
{
u32 vt = 0;
@@ -1294,100 +267,6 @@ static void iwl5150_temperature(struct iwl_priv *priv)
iwl_tt_handler(priv);
}
-/* Calc max signal level (dBm) among 3 possible receivers */
-int iwl5000_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host
- */
- struct iwl5000_non_cfg_phy *ncphy =
- (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
- u8 agc;
-
- val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
- agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info.
- */
- val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
- rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
- rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
- val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
- rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
-
- max_rssi = max_t(u32, rssi_a, rssi_b);
- max_rssi = max_t(u32, max_rssi, rssi_c);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- rssi_a, rssi_b, rssi_c, max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL49_RSSI_OFFSET;
-}
-
-static int iwl5000_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
-{
- struct iwl_tx_ant_config_cmd tx_ant_cmd = {
- .valid = cpu_to_le32(valid_tx_ant),
- };
-
- if (IWL_UCODE_API(priv->ucode_ver) > 1) {
- IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
- return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
- sizeof(struct iwl_tx_ant_config_cmd),
- &tx_ant_cmd);
- } else {
- IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
- return -EOPNOTSUPP;
- }
-}
-
-
-#define IWL5000_UCODE_GET(item) \
-static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\
- u32 api_ver) \
-{ \
- if (api_ver <= 2) \
- return le32_to_cpu(ucode->u.v1.item); \
- return le32_to_cpu(ucode->u.v2.item); \
-}
-
-static u32 iwl5000_ucode_get_header_size(u32 api_ver)
-{
- if (api_ver <= 2)
- return UCODE_HEADER_SIZE(1);
- return UCODE_HEADER_SIZE(2);
-}
-
-static u32 iwl5000_ucode_get_build(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- if (api_ver <= 2)
- return 0;
- return le32_to_cpu(ucode->u.v2.build);
-}
-
-static u8 *iwl5000_ucode_get_data(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- if (api_ver <= 2)
- return (u8 *) ucode->u.v1.data;
- return (u8 *) ucode->u.v2.data;
-}
-
-IWL5000_UCODE_GET(inst_size);
-IWL5000_UCODE_GET(data_size);
-IWL5000_UCODE_GET(init_size);
-IWL5000_UCODE_GET(init_data_size);
-IWL5000_UCODE_GET(boot_size);
-
static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
{
struct iwl5000_channel_switch_cmd cmd;
@@ -1420,54 +299,27 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
return iwl_send_cmd_sync(priv, &hcmd);
}
-struct iwl_hcmd_ops iwl5000_hcmd = {
- .rxon_assoc = iwl5000_send_rxon_assoc,
- .commit_rxon = iwl_commit_rxon,
- .set_rxon_chain = iwl_set_rxon_chain,
- .set_tx_ant = iwl5000_send_tx_ant_config,
-};
-
-struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
- .get_hcmd_size = iwl5000_get_hcmd_size,
- .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
- .gain_computation = iwl5000_gain_computation,
- .chain_noise_reset = iwl5000_chain_noise_reset,
- .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
- .calc_rssi = iwl5000_calc_rssi,
-};
-
-struct iwl_ucode_ops iwl5000_ucode = {
- .get_header_size = iwl5000_ucode_get_header_size,
- .get_build = iwl5000_ucode_get_build,
- .get_inst_size = iwl5000_ucode_get_inst_size,
- .get_data_size = iwl5000_ucode_get_data_size,
- .get_init_size = iwl5000_ucode_get_init_size,
- .get_init_data_size = iwl5000_ucode_get_init_data_size,
- .get_boot_size = iwl5000_ucode_get_boot_size,
- .get_data = iwl5000_ucode_get_data,
-};
-
-struct iwl_lib_ops iwl5000_lib = {
+static struct iwl_lib_ops iwl5000_lib = {
.set_hw_params = iwl5000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .load_ucode = iwl5000_load_ucode,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .load_ucode = iwlagn_load_ucode,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl5000_hw_channel_switch,
.apm_ops = {
@@ -1478,50 +330,58 @@ struct iwl_lib_ops iwl5000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl5000_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static struct iwl_lib_ops iwl5150_lib = {
- .set_hw_params = iwl5000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .set_hw_params = iwl5150_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
- .load_ucode = iwl5000_load_ucode,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .load_ucode = iwlagn_load_ucode,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl5000_hw_channel_switch,
.apm_ops = {
@@ -1532,19 +392,19 @@ static struct iwl_lib_ops iwl5150_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
@@ -1553,45 +413,44 @@ static struct iwl_lib_ops iwl5150_lib = {
.temperature = iwl5150_temperature,
.set_ct_kill = iwl5150_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl5000_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl5000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
static const struct iwl_ops iwl5150_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl5150_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
-struct iwl_mod_params iwl50_mod_params = {
- .amsdu_size_8K = 1,
- .restart_fw = 1,
- /* the rest are 0 by default */
-};
-
-
struct iwl_cfg iwl5300_agn_cfg = {
- .name = "5300AGN",
+ .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_ABC,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1603,21 +462,26 @@ struct iwl_cfg iwl5300_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5100_bgn_cfg = {
- .name = "5100BGN",
+ .name = "Intel(R) WiFi Link 5100 BGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_B,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1629,21 +493,26 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5100_abg_cfg = {
- .name = "5100ABG",
+ .name = "Intel(R) WiFi Link 5100 ABG",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_B,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1653,21 +522,26 @@ struct iwl_cfg iwl5100_abg_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5100_agn_cfg = {
- .name = "5100AGN",
+ .name = "Intel(R) WiFi Link 5100 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_B,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1679,21 +553,26 @@ struct iwl_cfg iwl5100_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5350_agn_cfg = {
- .name = "5350AGN",
+ .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_ABC,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1705,21 +584,26 @@ struct iwl_cfg iwl5350_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5150_agn_cfg = {
- .name = "5150AGN",
+ .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
.fw_name_pre = IWL5150_FW_PRE,
.ucode_api_max = IWL5150_UCODE_API_MAX,
.ucode_api_min = IWL5150_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5150_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1731,21 +615,26 @@ struct iwl_cfg iwl5150_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5150_abg_cfg = {
- .name = "5150ABG",
+ .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
.fw_name_pre = IWL5150_FW_PRE,
.ucode_api_max = IWL5150_UCODE_API_MAX,
.ucode_api_min = IWL5150_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G,
.ops = &iwl5150_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1755,20 +644,12 @@ struct iwl_cfg iwl5150_abg_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
-
-module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, S_IRUGO);
-MODULE_PARM_DESC(swcrypto50,
- "using software crypto engine (default 0 [hardware])\n");
-module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
-module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality");
-module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K,
- int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
-module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 92b3e64..9fbf54c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -42,18 +42,22 @@
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-sta.h"
+#include "iwl-agn.h"
#include "iwl-helpers.h"
-#include "iwl-5000-hw.h"
+#include "iwl-agn-hw.h"
#include "iwl-6000-hw.h"
#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 4
#define IWL6050_UCODE_API_MAX 4
+#define IWL6000G2_UCODE_API_MAX 4
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
#define IWL6050_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 4
#define IWL6000_FW_PRE "iwlwifi-6000-"
#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -63,6 +67,11 @@
#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
+#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-"
+#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
+#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
+
+
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
@@ -136,7 +145,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@@ -144,7 +153,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
- sizeof(struct iwl5000_scd_bc_tbl);
+ sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
@@ -168,24 +177,56 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial sensitivity parameters */
/* Set initial calibration set */
priv->hw_params.sens = &iwl6000_sensitivity;
- switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_6x50:
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_DC) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
-
- break;
- default:
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- break;
- }
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
+
+ return 0;
+}
+
+static int iwl6050_hw_set_hw_params(struct iwl_priv *priv)
+{
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+ priv->cfg->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
+
+ priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+ priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
+
+ priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
+
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl6000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_DC) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
return 0;
}
@@ -225,25 +266,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
static struct iwl_lib_ops iwl6000_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
- .load_ucode = iwl5000_load_ucode,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
.apm_ops = {
@@ -254,60 +295,67 @@ static struct iwl_lib_ops iwl6000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl6000_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl6000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
static struct iwl_lib_ops iwl6050_lib = {
- .set_hw_params = iwl6000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .set_hw_params = iwl6050_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
- .load_ucode = iwl5000_load_ucode,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
.apm_ops = {
@@ -318,45 +366,90 @@ static struct iwl_lib_ops iwl6050_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
.set_calib_version = iwl6050_set_calib_version,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl6050_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl6050_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
+
+struct iwl_cfg iwl6000g2a_2agn_cfg = {
+ .name = "6000 Series 2x2 AGN Gen2a",
+ .fw_name_pre = IWL6000G2A_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
+ .ops = &iwl6000_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .ht_greenfield_support = true,
+ .led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+};
+
/*
* "i": Internal configuration, use internal Power Amplifier
*/
struct iwl_cfg iwl6000i_2agn_cfg = {
- .name = "6000 Series 2x2 AGN",
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -364,10 +457,10 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@@ -385,10 +478,15 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
- .name = "6000 Series 2x2 ABG",
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -396,10 +494,10 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@@ -408,7 +506,6 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.pa_type = IWL_PA_INTERNAL,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
@@ -416,10 +513,15 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6000i_2bg_cfg = {
- .name = "6000 Series 2x2 BG",
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -427,10 +529,10 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@@ -439,7 +541,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.pa_type = IWL_PA_INTERNAL,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
@@ -447,10 +548,15 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6050_2agn_cfg = {
- .name = "6050 Series 2x2 AGN",
+ .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
.fw_name_pre = IWL6050_FW_PRE,
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
@@ -458,10 +564,10 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.ops = &iwl6050_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = 0,
@@ -479,10 +585,15 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6050_2abg_cfg = {
- .name = "6050 Series 2x2 ABG",
+ .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
.fw_name_pre = IWL6050_FW_PRE,
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
@@ -490,10 +601,10 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.ops = &iwl6050_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = 0,
@@ -502,7 +613,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.pa_type = IWL_PA_SYSTEM,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
@@ -510,10 +620,15 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6000_3agn_cfg = {
- .name = "6000 Series 3x3 AGN",
+ .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -521,10 +636,10 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_ABC,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = 0,
@@ -542,7 +657,13 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
new file mode 100644
index 0000000..48c023b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -0,0 +1,850 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+* Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+
+#include "iwl-agn-debugfs.h"
+
+ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+ {
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+ sizeof(struct statistics_rx_non_phy) * 40 +
+ sizeof(struct statistics_rx_ht_phy) * 40 + 400;
+ ssize_t ret;
+ struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct statistics_rx_non_phy *general, *accum_general;
+ struct statistics_rx_non_phy *delta_general, *max_general;
+ struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &priv->statistics.rx.ofdm;
+ cck = &priv->statistics.rx.cck;
+ general = &priv->statistics.rx.general;
+ ht = &priv->statistics.rx.ofdm_ht;
+ accum_ofdm = &priv->accum_statistics.rx.ofdm;
+ accum_cck = &priv->accum_statistics.rx.cck;
+ accum_general = &priv->accum_statistics.rx.general;
+ accum_ht = &priv->accum_statistics.rx.ofdm_ht;
+ delta_ofdm = &priv->delta_statistics.rx.ofdm;
+ delta_cck = &priv->delta_statistics.rx.cck;
+ delta_general = &priv->delta_statistics.rx.general;
+ delta_ht = &priv->delta_statistics.rx.ofdm_ht;
+ max_ofdm = &priv->max_delta.rx.ofdm;
+ max_cck = &priv->max_delta.rx.cck;
+ max_general = &priv->max_delta.rx.general;
+ max_ht = &priv->max_delta.rx.ofdm_ht;
+
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err),
+ accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+ max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:", le32_to_cpu(ofdm->crc32_good),
+ accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+ max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout),
+ accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout),
+ accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt),
+ accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt),
+ accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
+ max_ofdm->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_ba_rsp_cnt:",
+ le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+ accum_ofdm->sent_ba_rsp_cnt,
+ delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:",
+ le32_to_cpu(ofdm->dsp_self_kill),
+ accum_ofdm->dsp_self_kill,
+ delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "mh_format_err:",
+ le32_to_cpu(ofdm->mh_format_err),
+ accum_ofdm->mh_format_err,
+ delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "overrun_err:",
+ le32_to_cpu(cck->overrun_err),
+ accum_cck->overrun_err, delta_cck->overrun_err,
+ max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout),
+ accum_cck->sfd_timeout, delta_cck->sfd_timeout,
+ max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout),
+ accum_cck->fina_timeout, delta_cck->fina_timeout,
+ max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt),
+ accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt),
+ accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ba_rsp_cnt:",
+ le32_to_cpu(cck->sent_ba_rsp_cnt),
+ accum_cck->sent_ba_rsp_cnt,
+ delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:",
+ le32_to_cpu(cck->dsp_self_kill),
+ accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
+ max_cck->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "mh_format_err:",
+ le32_to_cpu(cck->mh_format_err),
+ accum_cck->mh_format_err, delta_cck->mh_format_err,
+ max_cck->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(cck->re_acq_main_rssi_sum),
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
+ le32_to_cpu(general->bogus_cts),
+ accum_general->bogus_cts, delta_general->bogus_cts,
+ max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
+ le32_to_cpu(general->bogus_ack),
+ accum_general->bogus_ack, delta_general->bogus_ack,
+ max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "channel_beacons:",
+ le32_to_cpu(general->channel_beacons),
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "num_missed_bcon:",
+ le32_to_cpu(general->num_missed_bcon),
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "adc_rx_saturation_time:",
+ le32_to_cpu(general->adc_rx_saturation_time),
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_detect_search_tm:",
+ le32_to_cpu(general->ina_detection_search_time),
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_silence_rssi_a:",
+ le32_to_cpu(general->beacon_silence_rssi_a),
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_silence_rssi_b:",
+ le32_to_cpu(general->beacon_silence_rssi_b),
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_silence_rssi_c:",
+ le32_to_cpu(general->beacon_silence_rssi_c),
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "interference_data_flag:",
+ le32_to_cpu(general->interference_data_flag),
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "channel_load:",
+ le32_to_cpu(general->channel_load),
+ accum_general->channel_load,
+ delta_general->channel_load,
+ max_general->channel_load);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "dsp_false_alarms:",
+ le32_to_cpu(general->dsp_false_alarms),
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_rssi_a:",
+ le32_to_cpu(general->beacon_rssi_a),
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a,
+ max_general->beacon_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_rssi_b:",
+ le32_to_cpu(general->beacon_rssi_b),
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b,
+ max_general->beacon_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_rssi_c:",
+ le32_to_cpu(general->beacon_rssi_c),
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c,
+ max_general->beacon_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_energy_a:",
+ le32_to_cpu(general->beacon_energy_a),
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_energy_b:",
+ le32_to_cpu(general->beacon_energy_b),
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_energy_c:",
+ le32_to_cpu(general->beacon_energy_c),
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM_HT:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(ht->early_overrun_err),
+ accum_ht->early_overrun_err,
+ delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "mh_format_err:",
+ le32_to_cpu(ht->mh_format_err),
+ accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg_crc32_good:",
+ le32_to_cpu(ht->agg_crc32_good),
+ accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg_mpdu_cnt:",
+ le32_to_cpu(ht->agg_mpdu_cnt),
+ accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs),
+ accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
+ ssize_t ret;
+ struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &priv->statistics.tx;
+ accum_tx = &priv->accum_statistics.tx;
+ delta_tx = &priv->delta_statistics.tx;
+ max_tx = &priv->max_delta.tx;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "preamble:",
+ le32_to_cpu(tx->preamble_cnt),
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt),
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout),
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt),
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "dump_msdu_cnt:",
+ le32_to_cpu(tx->dump_msdu_cnt),
+ accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt,
+ max_tx->dump_msdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "abort_nxt_frame_mismatch:",
+ le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "abort_missing_nxt_frame:",
+ le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "cts_timeout_collision:",
+ le32_to_cpu(tx->cts_timeout_collision),
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ack_ba_timeout_collision:",
+ le32_to_cpu(tx->ack_or_ba_timeout_collision),
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg ba_timeout:",
+ le32_to_cpu(tx->agg.ba_timeout),
+ accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout,
+ max_tx->agg.ba_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg ba_resched_frames:",
+ le32_to_cpu(tx->agg.ba_reschedule_frames),
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_agg_frame:",
+ le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_no_agg:",
+ le32_to_cpu(tx->agg.scd_query_no_agg),
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_agg:",
+ le32_to_cpu(tx->agg.scd_query_agg),
+ accum_tx->agg.scd_query_agg,
+ delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_mismatch:",
+ le32_to_cpu(tx->agg.scd_query_mismatch),
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg frame_not_ready:",
+ le32_to_cpu(tx->agg.frame_not_ready),
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg underrun:",
+ le32_to_cpu(tx->agg.underrun),
+ accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg bt_prio_kill:",
+ le32_to_cpu(tx->agg.bt_prio_kill),
+ accum_tx->agg.bt_prio_kill,
+ delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg rx_ba_rsp_cnt:",
+ le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+ accum_tx->agg.rx_ba_rsp_cnt,
+ delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
+
+ if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "tx power: (1/2 dB step)\n");
+ if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tantenna A: 0x%X\n",
+ tx->tx_power.ant_a);
+ if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tantenna B: 0x%X\n",
+ tx->tx_power.ant_b);
+ if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tantenna C: 0x%X\n",
+ tx->tx_power.ant_c);
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct statistics_general) * 10 + 300;
+ ssize_t ret;
+ struct statistics_general *general, *accum_general;
+ struct statistics_general *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct statistics_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &priv->statistics.general;
+ dbg = &priv->statistics.general.dbg;
+ div = &priv->statistics.general.div;
+ accum_general = &priv->accum_statistics.general;
+ delta_general = &priv->delta_statistics.general;
+ max_general = &priv->max_delta.general;
+ accum_dbg = &priv->accum_statistics.general.dbg;
+ delta_dbg = &priv->delta_statistics.general.dbg;
+ max_dbg = &priv->max_delta.general.dbg;
+ accum_div = &priv->accum_statistics.general.div;
+ delta_div = &priv->delta_statistics.general.div;
+ max_div = &priv->max_delta.general.div;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n",
+ "temperature:",
+ le32_to_cpu(general->temperature));
+ pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n",
+ "temperature_m:",
+ le32_to_cpu(general->temperature_m));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_check:",
+ le32_to_cpu(dbg->burst_check),
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_count:",
+ le32_to_cpu(dbg->burst_count),
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_out:",
+ le32_to_cpu(general->slots_out),
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
+ pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+ le32_to_cpu(general->ttl_timestamp));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rx_enable_counter:",
+ le32_to_cpu(general->rx_enable_counter),
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
new file mode 100644
index 0000000..59b1f25
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+#else
+static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
new file mode 100644
index 0000000..44ef5d9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -0,0 +1,276 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn.h"
+
+static int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
+{
+ int ret = 0;
+ struct iwl5000_rxon_assoc_cmd rxon_assoc;
+ const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
+ const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
+
+ if ((rxon1->flags == rxon2->flags) &&
+ (rxon1->filter_flags == rxon2->filter_flags) &&
+ (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
+ (rxon1->ofdm_ht_single_stream_basic_rates ==
+ rxon2->ofdm_ht_single_stream_basic_rates) &&
+ (rxon1->ofdm_ht_dual_stream_basic_rates ==
+ rxon2->ofdm_ht_dual_stream_basic_rates) &&
+ (rxon1->ofdm_ht_triple_stream_basic_rates ==
+ rxon2->ofdm_ht_triple_stream_basic_rates) &&
+ (rxon1->acquisition_data == rxon2->acquisition_data) &&
+ (rxon1->rx_chain == rxon2->rx_chain) &&
+ (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
+ IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = priv->staging_rxon.flags;
+ rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
+ rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
+ rxon_assoc.reserved1 = 0;
+ rxon_assoc.reserved2 = 0;
+ rxon_assoc.reserved3 = 0;
+ rxon_assoc.ofdm_ht_single_stream_basic_rates =
+ priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
+ rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+ priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
+ rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
+ rxon_assoc.ofdm_ht_triple_stream_basic_rates =
+ priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
+ rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
+
+ ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
+ sizeof(rxon_assoc), &rxon_assoc, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
+{
+ struct iwl_tx_ant_config_cmd tx_ant_cmd = {
+ .valid = cpu_to_le32(valid_tx_ant),
+ };
+
+ if (IWL_UCODE_API(priv->ucode_ver) > 1) {
+ IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
+ return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
+ sizeof(struct iwl_tx_ant_config_cmd),
+ &tx_ant_cmd);
+ } else {
+ IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Currently this is the superset of everything */
+static u16 iwlagn_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ return len;
+}
+
+static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+{
+ u16 size = (u16)sizeof(struct iwl_addsta_cmd);
+ struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
+ memcpy(addsta, cmd, size);
+ /* resrved in 5000 */
+ addsta->rate_n_flags = cpu_to_le16(0);
+ return size;
+}
+
+static void iwlagn_gain_computation(struct iwl_priv *priv,
+ u32 average_noise[NUM_RX_CHAINS],
+ u16 min_average_noise_antenna_i,
+ u32 min_average_noise,
+ u8 default_chain)
+{
+ int i;
+ s32 delta_g;
+ struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+
+ /*
+ * Find Gain Code for the chains based on "default chain"
+ */
+ for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
+ if ((data->disconn_array[i])) {
+ data->delta_gain_code[i] = 0;
+ continue;
+ }
+
+ delta_g = (priv->cfg->chain_noise_scale *
+ ((s32)average_noise[default_chain] -
+ (s32)average_noise[i])) / 1500;
+
+ /* bound gain by 2 bits value max, 3rd bit is sign */
+ data->delta_gain_code[i] =
+ min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+ if (delta_g < 0)
+ /*
+ * set negative sign ...
+ * note to Intel developers: This is uCode API format,
+ * not the format of any internal device registers.
+ * Do not change this format for e.g. 6050 or similar
+ * devices. Change format only if more resolution
+ * (i.e. more than 2 bits magnitude) is needed.
+ */
+ data->delta_gain_code[i] |= (1 << 2);
+ }
+
+ IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
+ data->delta_gain_code[1], data->delta_gain_code[2]);
+
+ if (!data->radio_write) {
+ struct iwl_calib_chain_noise_gain_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
+ cmd.hdr.first_group = 0;
+ cmd.hdr.groups_num = 1;
+ cmd.hdr.data_valid = 1;
+ cmd.delta_gain_1 = data->delta_gain_code[1];
+ cmd.delta_gain_2 = data->delta_gain_code[2];
+ iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
+ sizeof(cmd), &cmd, NULL);
+
+ data->radio_write = 1;
+ data->state = IWL_CHAIN_NOISE_CALIBRATED;
+ }
+
+ data->chain_noise_a = 0;
+ data->chain_noise_b = 0;
+ data->chain_noise_c = 0;
+ data->chain_signal_a = 0;
+ data->chain_signal_b = 0;
+ data->chain_signal_c = 0;
+ data->beacon_count = 0;
+}
+
+static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
+{
+ struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+ int ret;
+
+ if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
+ struct iwl_calib_chain_noise_reset_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
+ cmd.hdr.first_group = 0;
+ cmd.hdr.groups_num = 1;
+ cmd.hdr.data_valid = 1;
+ ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(priv,
+ "Could not send REPLY_PHY_CALIBRATION_CMD\n");
+ data->state = IWL_CHAIN_NOISE_ACCUMULATE;
+ IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
+ }
+}
+
+static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
+ __le32 *tx_flags)
+{
+ if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
+ (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+ *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
+ else
+ *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
+}
+
+/* Calc max signal level (dBm) among 3 possible receivers */
+static int iwlagn_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host
+ */
+ struct iwl5000_non_cfg_phy *ncphy =
+ (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
+ u8 agc;
+
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
+ agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info.
+ */
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
+ rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
+ rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
+ rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
+
+ max_rssi = max_t(u32, rssi_a, rssi_b);
+ max_rssi = max_t(u32, max_rssi, rssi_c);
+
+ IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ rssi_a, rssi_b, rssi_c, max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IWLAGN_RSSI_OFFSET;
+}
+
+struct iwl_hcmd_ops iwlagn_hcmd = {
+ .rxon_assoc = iwlagn_send_rxon_assoc,
+ .commit_rxon = iwl_commit_rxon,
+ .set_rxon_chain = iwl_set_rxon_chain,
+ .set_tx_ant = iwlagn_send_tx_ant_config,
+ .send_bt_config = iwl_send_bt_config,
+};
+
+struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
+ .get_hcmd_size = iwlagn_get_hcmd_size,
+ .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
+ .gain_computation = iwlagn_gain_computation,
+ .chain_noise_reset = iwlagn_chain_noise_reset,
+ .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag,
+ .calc_rssi = iwlagn_calc_rssi,
+ .request_scan = iwlagn_request_scan,
+};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
new file mode 100644
index 0000000..f9a3fbb
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -0,0 +1,118 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-agn-hw.h) only for hardware-related definitions.
+ */
+
+#ifndef __iwl_agn_hw_h__
+#define __iwl_agn_hw_h__
+
+#define IWLAGN_RTC_INST_LOWER_BOUND (0x000000)
+#define IWLAGN_RTC_INST_UPPER_BOUND (0x020000)
+
+#define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000)
+#define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000)
+
+#define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \
+ IWLAGN_RTC_INST_LOWER_BOUND)
+#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
+ IWLAGN_RTC_DATA_LOWER_BOUND)
+
+/* RSSI to dBm */
+#define IWLAGN_RSSI_OFFSET 44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+#define IWLAGN_DEFAULT_TX_RETRY 15
+
+/* Limit range of txpower output target to be between these values */
+#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
+#define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
+
+/* EEPROM */
+#define IWLAGN_EEPROM_IMG_SIZE 2048
+
+#define IWLAGN_CMD_FIFO_NUM 7
+#define IWLAGN_NUM_QUEUES 20
+#define IWLAGN_NUM_AMPDU_QUEUES 10
+#define IWLAGN_FIRST_AMPDU_QUEUE 10
+
+/* Fixed (non-configurable) rx data from phy */
+
+/**
+ * struct iwlagn_schedq_bc_tbl scheduler byte count table
+ * base physical address provided by SCD_DRAM_BASE_ADDR
+ * @tfd_offset 0-12 - tx command byte count
+ * 12-16 - station index
+ */
+struct iwlagn_scd_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+} __attribute__ ((packed));
+
+
+#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
new file mode 100644
index 0000000..a273e37
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -0,0 +1,307 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-agn.h"
+#include "iwl-helpers.h"
+
+#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* Free dram table */
+void iwl_free_isr_ict(struct iwl_priv *priv)
+{
+ if (priv->_agn.ict_tbl_vir) {
+ dma_free_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ priv->_agn.ict_tbl_vir,
+ priv->_agn.ict_tbl_dma);
+ priv->_agn.ict_tbl_vir = NULL;
+ }
+}
+
+
+/* allocate dram shared table it is a PAGE_SIZE aligned
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_alloc_isr_ict(struct iwl_priv *priv)
+{
+
+ if (priv->cfg->use_isr_legacy)
+ return 0;
+ /* allocate shrared data table */
+ priv->_agn.ict_tbl_vir =
+ dma_alloc_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ &priv->_agn.ict_tbl_dma, GFP_KERNEL);
+ if (!priv->_agn.ict_tbl_vir)
+ return -ENOMEM;
+
+ /* align table to PAGE_SIZE boundry */
+ priv->_agn.aligned_ict_tbl_dma = ALIGN(priv->_agn.ict_tbl_dma, PAGE_SIZE);
+
+ IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
+ (unsigned long long)priv->_agn.ict_tbl_dma,
+ (unsigned long long)priv->_agn.aligned_ict_tbl_dma,
+ (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
+
+ priv->_agn.ict_tbl = priv->_agn.ict_tbl_vir +
+ (priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma);
+
+ IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
+ priv->_agn.ict_tbl, priv->_agn.ict_tbl_vir,
+ (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
+
+ /* reset table and index to all 0 */
+ memset(priv->_agn.ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+ priv->_agn.ict_index = 0;
+
+ /* add periodic RX interrupt */
+ priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
+ return 0;
+}
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+int iwl_reset_ict(struct iwl_priv *priv)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (!priv->_agn.ict_tbl_vir)
+ return 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_disable_interrupts(priv);
+
+ memset(&priv->_agn.ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
+
+ val = priv->_agn.aligned_ict_tbl_dma >> PAGE_SHIFT;
+
+ val |= CSR_DRAM_INT_TBL_ENABLE;
+ val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+
+ IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
+ "aligned dma address %Lx\n",
+ val, (unsigned long long)priv->_agn.aligned_ict_tbl_dma);
+
+ iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
+ priv->_agn.use_ict = true;
+ priv->_agn.ict_index = 0;
+ iwl_write32(priv, CSR_INT, priv->inta_mask);
+ iwl_enable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/* Device is going down disable ict interrupt usage */
+void iwl_disable_ict(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->_agn.use_ict = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static irqreturn_t iwl_isr(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ unsigned long flags;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_fh;
+#endif
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(priv, CSR_INT);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta) {
+ IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
+ "fh 0x%08x\n", inta, inta_mask, inta_fh);
+ }
+#endif
+
+ priv->_agn.inta |= inta;
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&priv->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
+ iwl_enable_interrupts(priv);
+
+ unplugged:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if diabled by irq and no schedules tasklet. */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
+ iwl_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
+}
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+irqreturn_t iwl_isr_ict(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ u32 val = 0;
+ unsigned long flags;
+
+ if (!priv)
+ return IRQ_NONE;
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (!priv->_agn.use_ict)
+ return iwl_isr(irq, data);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!priv->_agn.ict_tbl[priv->_agn.ict_index]) {
+ IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ /* read all entries that not 0 start with ict_index */
+ while (priv->_agn.ict_tbl[priv->_agn.ict_index]) {
+
+ val |= le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]);
+ IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
+ priv->_agn.ict_index,
+ le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]));
+ priv->_agn.ict_tbl[priv->_agn.ict_index] = 0;
+ priv->_agn.ict_index = iwl_queue_inc_wrap(priv->_agn.ict_index,
+ ICT_COUNT);
+
+ }
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
+ inta, inta_mask, val);
+
+ inta &= priv->inta_mask;
+ priv->_agn.inta |= inta;
+
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&priv->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) {
+ /* Allow interrupt if was disabled by this handler and
+ * no tasklet was schedules, We should not enable interrupt,
+ * tasklet will enable it.
+ */
+ iwl_enable_interrupts(priv);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service.
+ * only Re-enable if disabled by irq.
+ */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
+ iwl_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
new file mode 100644
index 0000000..1004cfc
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -0,0 +1,1530 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+#include "iwl-sta.h"
+
+static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)&tx_resp->status +
+ tx_resp->frame_count) & MAX_SN;
+}
+
+static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
+ struct iwl_ht_agg *agg,
+ struct iwl5000_tx_resp *tx_resp,
+ int txq_id, u16 start_idx)
+{
+ u16 status;
+ struct agg_tx_status *frame_status = &tx_resp->status;
+ struct ieee80211_tx_info *info = NULL;
+ struct ieee80211_hdr *hdr = NULL;
+ u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+ int i, sh, idx;
+ u16 seq;
+
+ if (agg->wait_for_ba)
+ IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
+
+ agg->frame_count = tx_resp->frame_count;
+ agg->start_idx = start_idx;
+ agg->rate_n_flags = rate_n_flags;
+ agg->bitmap = 0;
+
+ /* # frames attempted by Tx command */
+ if (agg->frame_count == 1) {
+ /* Only one frame was attempted; no block-ack will arrive */
+ status = le16_to_cpu(frame_status[0].status);
+ idx = start_idx;
+
+ /* FIXME: code repetition */
+ IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
+ agg->frame_count, agg->start_idx, idx);
+
+ info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= iwl_tx_status_to_mac80211(status);
+ iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
+
+ /* FIXME: code repetition end */
+
+ IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
+ status & 0xff, tx_resp->failure_frame);
+ IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+ agg->wait_for_ba = 0;
+ } else {
+ /* Two or more frames were attempted; expect block-ack */
+ u64 bitmap = 0;
+ int start = agg->start_idx;
+
+ /* Construct bit-map of pending frames within Tx window */
+ for (i = 0; i < agg->frame_count; i++) {
+ u16 sc;
+ status = le16_to_cpu(frame_status[i].status);
+ seq = le16_to_cpu(frame_status[i].sequence);
+ idx = SEQ_TO_INDEX(seq);
+ txq_id = SEQ_TO_QUEUE(seq);
+
+ if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
+ AGG_TX_STATE_ABORT_MSK))
+ continue;
+
+ IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
+ agg->frame_count, txq_id, idx);
+
+ hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
+ if (!hdr) {
+ IWL_ERR(priv,
+ "BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
+
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+ IWL_ERR(priv,
+ "BUG_ON idx doesn't match seq control"
+ " idx=%d, seq_idx=%d, seq=%d\n",
+ idx, SEQ_TO_SN(sc),
+ hdr->seq_ctrl);
+ return -1;
+ }
+
+ IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
+ i, idx, SEQ_TO_SN(sc));
+
+ sh = idx - start;
+ if (sh > 64) {
+ sh = (start - idx) + 0xff;
+ bitmap = bitmap << sh;
+ sh = 0;
+ start = idx;
+ } else if (sh < -64)
+ sh = 0xff - (start - idx);
+ else if (sh < 0) {
+ sh = start - idx;
+ start = idx;
+ bitmap = bitmap << sh;
+ sh = 0;
+ }
+ bitmap |= 1ULL << sh;
+ IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
+ start, (unsigned long long)bitmap);
+ }
+
+ agg->bitmap = bitmap;
+ agg->start_idx = start;
+ IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
+ agg->frame_count, agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ if (bitmap)
+ agg->wait_for_ba = 1;
+ }
+ return 0;
+}
+
+void iwl_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IWL_ERR(priv, "TODO: Implement Tx flush command!!!\n");
+ }
+}
+
+static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct ieee80211_tx_info *info;
+ struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le16_to_cpu(tx_resp->status.status);
+ int tid;
+ int sta_id;
+ int freed;
+
+ if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
+ IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
+ "is out of range [0-%d] %d %d\n", txq_id,
+ index, txq->q.n_bd, txq->q.write_ptr,
+ txq->q.read_ptr);
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
+ memset(&info->status, 0, sizeof(info->status));
+
+ tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
+ sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
+
+ if (txq->sched_retry) {
+ const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
+ struct iwl_ht_agg *agg = NULL;
+
+ agg = &priv->stations[sta_id].tid[tid].agg;
+
+ iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
+
+ /* check if BAR is needed */
+ if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+ if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+ index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+ IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
+ "scd_ssn=%d idx=%d txq=%d swq=%d\n",
+ scd_ssn , index, txq_id, txq->swq_id);
+
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if (priv->mac80211_registered &&
+ (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
+ (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
+ if (agg->state == IWL_AGG_OFF)
+ iwl_wake_queue(priv, txq_id);
+ else
+ iwl_wake_queue(priv, txq->swq_id);
+ }
+ }
+ } else {
+ BUG_ON(txq_id != txq->swq_id);
+
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags |= iwl_tx_status_to_mac80211(status);
+ iwlagn_hwrate_to_tx_control(priv,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ info);
+
+ IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
+ "0x%x retries %d\n",
+ txq_id,
+ iwl_get_tx_fail_reason(status), status,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame);
+
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if (priv->mac80211_registered &&
+ (iwl_queue_space(&txq->q) > txq->q.low_mark))
+ iwl_wake_queue(priv, txq_id);
+ }
+
+ iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
+
+ iwl_check_abort_status(priv, tx_resp->frame_count, status);
+}
+
+void iwlagn_rx_handler_setup(struct iwl_priv *priv)
+{
+ /* init calibration handlers */
+ priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
+ iwlagn_rx_calib_result;
+ priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
+ iwlagn_rx_calib_complete;
+ priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+}
+
+void iwlagn_setup_deferred_work(struct iwl_priv *priv)
+{
+ /* in agn, the tx power calibration is done in uCode */
+ priv->disable_tx_power_cal = 1;
+}
+
+int iwlagn_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
+ (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
+}
+
+int iwlagn_send_tx_power(struct iwl_priv *priv)
+{
+ struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
+ u8 tx_ant_cfg_cmd;
+
+ /* half dBm need to multiply */
+ tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
+
+ if (priv->tx_power_lmt_in_half_dbm &&
+ priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
+ /*
+ * For the newer devices which using enhanced/extend tx power
+ * table in EEPROM, the format is in half dBm. driver need to
+ * convert to dBm format before report to mac80211.
+ * By doing so, there is a possibility of 1/2 dBm resolution
+ * lost. driver will perform "round-up" operation before
+ * reporting, but it will cause 1/2 dBm tx power over the
+ * regulatory limit. Perform the checking here, if the
+ * "tx_power_user_lmt" is higher than EEPROM value (in
+ * half-dBm format), lower the tx power based on EEPROM
+ */
+ tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
+ }
+ tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
+ tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
+
+ if (IWL_UCODE_API(priv->ucode_ver) == 1)
+ tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
+ else
+ tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
+
+ return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
+ sizeof(tx_power_cmd), &tx_power_cmd,
+ NULL);
+}
+
+void iwlagn_temperature(struct iwl_priv *priv)
+{
+ /* store temperature from statistics (in Celsius) */
+ priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
+ iwl_tt_handler(priv);
+}
+
+u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
+{
+ struct iwl_eeprom_calib_hdr {
+ u8 version;
+ u8 pa_type;
+ u16 voltage;
+ } *hdr;
+
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ EEPROM_CALIB_ALL);
+ return hdr->version;
+
+}
+
+/*
+ * EEPROM
+ */
+static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
+{
+ u16 offset = 0;
+
+ if ((address & INDIRECT_ADDRESS) == 0)
+ return address;
+
+ switch (address & INDIRECT_TYPE_MSK) {
+ case INDIRECT_HOST:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
+ break;
+ case INDIRECT_GENERAL:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
+ break;
+ case INDIRECT_REGULATORY:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
+ break;
+ case INDIRECT_CALIBRATION:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
+ break;
+ case INDIRECT_PROCESS_ADJST:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
+ break;
+ case INDIRECT_OTHERS:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
+ break;
+ default:
+ IWL_ERR(priv, "illegal indirect type: 0x%X\n",
+ address & INDIRECT_TYPE_MSK);
+ break;
+ }
+
+ /* translate the offset from words to byte */
+ return (address & ADDRESS_MSK) + (offset << 1);
+}
+
+const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
+ size_t offset)
+{
+ u32 address = eeprom_indirect_address(priv, offset);
+ BUG_ON(address >= priv->cfg->eeprom_size);
+ return &priv->eeprom[address];
+}
+
+struct iwl_mod_params iwlagn_mod_params = {
+ .amsdu_size_8K = 1,
+ .restart_fw = 1,
+ /* the rest are 0 by default */
+};
+
+void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __iwl_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
+
+ if (!priv->cfg->use_isr_legacy)
+ rb_timeout = RX_RB_TIMEOUT;
+
+ if (priv->cfg->mod_params->amsdu_size_8K)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->dma_addr >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size|
+ (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ return 0;
+}
+
+int iwlagn_hw_nic_init(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ int ret;
+
+ /* nic_init */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->cfg->ops->lib->apm_ops.init(priv);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
+
+ priv->cfg->ops->lib->apm_ops.config(priv);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ ret = iwl_rx_queue_alloc(priv);
+ if (ret) {
+ IWL_ERR(priv, "Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ iwlagn_rx_queue_reset(priv, rxq);
+
+ iwlagn_rx_replenish(priv);
+
+ iwlagn_rx_init(priv, rxq);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rxq->need_update = 1;
+ iwl_rx_queue_update_write_ptr(priv, rxq);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!priv->txq) {
+ ret = iwlagn_txq_ctx_alloc(priv);
+ if (ret)
+ return ret;
+ } else
+ iwlagn_txq_ctx_reset(priv);
+
+ set_bit(STATUS_INIT, &priv->status);
+
+ return 0;
+}
+
+/**
+ * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
+ dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void iwlagn_rx_queue_restock(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
+ rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(priv->workqueue, &priv->rx_replenish);
+
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ iwl_rx_queue_update_write_ptr(priv, rxq);
+ }
+}
+
+/**
+ * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (priv->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+ "order: %d\n",
+ priv->hw_params.rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, priv->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ priv->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void iwlagn_rx_replenish(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ iwlagn_rx_allocate(priv, GFP_KERNEL);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwlagn_rx_queue_restock(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void iwlagn_rx_replenish_now(struct iwl_priv *priv)
+{
+ iwlagn_rx_allocate(priv, GFP_ATOMIC);
+
+ iwlagn_rx_queue_restock(priv);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __iwl_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+int iwlagn_rxq_stop(struct iwl_priv *priv)
+{
+
+ /* stop Rx DMA */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+ return 0;
+}
+
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+ int idx = 0;
+ int band_offset = 0;
+
+ /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+ return idx;
+ /* Legacy rate format, search for match in table */
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IWL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+ if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx - band_offset;
+ }
+
+ return -1;
+}
+
+/* Calc max signal level (dBm) among 3 possible receivers */
+static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+/**
+ * iwlagn_dbg_report_frame - dump frame to syslog during debug sessions
+ *
+ * You may hack this function to show different aspects of received frames,
+ * including selective frame dumps.
+ * group100 parameter selects whether to show 1 out of 100 good data frames.
+ * All beacon and probe response frames are printed.
+ */
+static void iwlagn_dbg_report_frame(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *phy_res, u16 length,
+ struct ieee80211_hdr *header, int group100)
+{
+ u32 to_us;
+ u32 print_summary = 0;
+ u32 print_dump = 0; /* set to 1 to dump all frames' contents */
+ u32 hundred = 0;
+ u32 dataframe = 0;
+ __le16 fc;
+ u16 seq_ctl;
+ u16 channel;
+ u16 phy_flags;
+ u32 rate_n_flags;
+ u32 tsf_low;
+ int rssi;
+
+ if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
+ return;
+
+ /* MAC header */
+ fc = header->frame_control;
+ seq_ctl = le16_to_cpu(header->seq_ctrl);
+
+ /* metadata */
+ channel = le16_to_cpu(phy_res->channel);
+ phy_flags = le16_to_cpu(phy_res->phy_flags);
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* signal statistics */
+ rssi = iwlagn_calc_rssi(priv, phy_res);
+ tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
+
+ to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
+
+ /* if data frame is to us and all is good,
+ * (optionally) print summary for only 1 out of every 100 */
+ if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
+ cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
+ dataframe = 1;
+ if (!group100)
+ print_summary = 1; /* print each frame */
+ else if (priv->framecnt_to_us < 100) {
+ priv->framecnt_to_us++;
+ print_summary = 0;
+ } else {
+ priv->framecnt_to_us = 0;
+ print_summary = 1;
+ hundred = 1;
+ }
+ } else {
+ /* print summary for all other frames */
+ print_summary = 1;
+ }
+
+ if (print_summary) {
+ char *title;
+ int rate_idx;
+ u32 bitrate;
+
+ if (hundred)
+ title = "100Frames";
+ else if (ieee80211_has_retry(fc))
+ title = "Retry";
+ else if (ieee80211_is_assoc_resp(fc))
+ title = "AscRsp";
+ else if (ieee80211_is_reassoc_resp(fc))
+ title = "RasRsp";
+ else if (ieee80211_is_probe_resp(fc)) {
+ title = "PrbRsp";
+ print_dump = 1; /* dump frame contents */
+ } else if (ieee80211_is_beacon(fc)) {
+ title = "Beacon";
+ print_dump = 1; /* dump frame contents */
+ } else if (ieee80211_is_atim(fc))
+ title = "ATIM";
+ else if (ieee80211_is_auth(fc))
+ title = "Auth";
+ else if (ieee80211_is_deauth(fc))
+ title = "DeAuth";
+ else if (ieee80211_is_disassoc(fc))
+ title = "DisAssoc";
+ else
+ title = "Frame";
+
+ rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+ if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
+ bitrate = 0;
+ WARN_ON_ONCE(1);
+ } else {
+ bitrate = iwl_rates[rate_idx].ieee / 2;
+ }
+
+ /* print frame summary.
+ * MAC addresses show just the last byte (for brevity),
+ * but you can hack it to show more, if you'd like to. */
+ if (dataframe)
+ IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
+ "len=%u, rssi=%d, chnl=%d, rate=%u,\n",
+ title, le16_to_cpu(fc), header->addr1[5],
+ length, rssi, channel, bitrate);
+ else {
+ /* src/dst addresses assume managed mode */
+ IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
+ "len=%u, rssi=%d, tim=%lu usec, "
+ "phy=0x%02x, chnl=%d\n",
+ title, le16_to_cpu(fc), header->addr1[5],
+ header->addr3[5], length, rssi,
+ tsf_low - priv->scan_start_tsf,
+ phy_flags, channel);
+ }
+ }
+ if (print_dump)
+ iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
+}
+#endif
+
+static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ }
+
+ IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
+ decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u16 len,
+ u32 ampdu_status,
+ struct iwl_rx_mem_buffer *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!priv->is_open)) {
+ IWL_DEBUG_DROP_LIMIT(priv,
+ "Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!priv->cfg->mod_params->sw_crypto &&
+ iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IWL_ERR(priv, "dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ iwl_update_stats(priv, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(priv->hw, skb);
+ priv->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+/* Called for REPLY_RX (legacy ABG frames), or
+ * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct iwl4965_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
+ * REPLY_RX: physical layer info is in this buffer
+ * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
+ * command and cached in priv->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == REPLY_RX) {
+ phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ + phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!priv->_agn.last_phy_res_valid) {
+ IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+ return;
+ }
+ phy_res = &priv->_agn.last_phy_res;
+ amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status = iwlagn_translate_rx_status(priv,
+ le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(rx_pkt_status));
+ return;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
+ rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.rate_idx =
+ iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_TSFT;*/
+
+ priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ /* Set "1" to report good data frames in groups of 100 */
+ if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
+ iwlagn_dbg_report_frame(priv, phy_res, len, header, 1);
+#endif
+ iwl_dbg_log_rx_data_frame(priv, len, header);
+ IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+ rx_status.signal, (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+ rxb, &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ priv->_agn.last_phy_res_valid = true;
+ memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
+ sizeof(struct iwl_rx_phy_res));
+}
+
+static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int i, added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ /* only scan single channel, good enough to reset the RF */
+ /* pick the first valid not in-use channel */
+ if (band == IEEE80211_BAND_5GHZ) {
+ for (i = 14; i < priv->channel_count; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel = priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < 14; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel =
+ priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ }
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
+static int iwl_get_channels_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct iwl_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+ chan = priv->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = ieee80211_frequency_to_channel(chan->center_freq);
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = iwl_get_channel_info(priv, band, channel);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
+ channel, le32_to_cpu(scan_ch->type),
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ "ACTIVE" : "PASSIVE",
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+ return added;
+}
+
+void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_SCAN_CMD,
+ .len = sizeof(struct iwl_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct iwl_scan_cmd *scan;
+ struct ieee80211_conf *conf = NULL;
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = priv->hw_params.valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+
+ conf = ieee80211_get_hw_conf(priv->hw);
+
+ cancel_delayed_work(&priv->scan_check);
+
+ if (!iwl_is_ready(priv)) {
+ IWL_WARN(priv, "request scan called when driver not ready.\n");
+ goto done;
+ }
+
+ /* Make sure the scan wasn't canceled before this queued work
+ * was given the chance to run... */
+ if (!test_bit(STATUS_SCANNING, &priv->status))
+ goto done;
+
+ /* This should never be called or scheduled if there is currently
+ * a scan active in the hardware. */
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
+ "Ignoring second request.\n");
+ goto done;
+ }
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
+ goto done;
+ }
+
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
+ goto done;
+ }
+
+ if (iwl_is_rfkill(priv)) {
+ IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
+ goto done;
+ }
+
+ if (!test_bit(STATUS_READY, &priv->status)) {
+ IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
+ goto done;
+ }
+
+ if (!priv->scan_cmd) {
+ priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan_cmd) {
+ IWL_DEBUG_SCAN(priv,
+ "fail to allocate memory for scan\n");
+ goto done;
+ }
+ }
+ scan = priv->scan_cmd;
+ memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+ if (iwl_is_associated(priv)) {
+ u16 interval = 0;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+ unsigned long flags;
+
+ IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+ spin_lock_irqsave(&priv->lock, flags);
+ interval = vif ? vif->bss_conf.beacon_int : 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time = (extra |
+ ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (priv->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
+ >> RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = IWL_RATE_6M_PLCP;
+ } else {
+ rate = IWL_RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
+ scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = IWL_RATE_6M_PLCP;
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
+ * here instead of IWL_GOOD_CRC_TH_DISABLED.
+ */
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+ IWL_GOOD_CRC_TH_NEVER;
+ break;
+ default:
+ IWL_WARN(priv, "Invalid scan band count\n");
+ goto done;
+ }
+
+ band = priv->scan_band;
+
+ if (priv->cfg->scan_antennas[band])
+ rx_ant = priv->cfg->scan_antennas[band];
+
+ priv->scan_tx_ant[band] =
+ iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
+ rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
+ scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains = rx_ant &
+ ((u8)(priv->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+ priv->chain_noise_data.active_chains);
+
+ rx_ant = first_antenna(active_chains);
+ }
+ /* MIMO is not used here, but value is required */
+ rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+ if (!priv->is_internal_short_scan) {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ } else {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+
+ }
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+ RXON_FILTER_BCON_AWARE_MSK);
+
+ if (priv->is_internal_short_scan) {
+ scan->channel_count =
+ iwl_get_single_channel_for_scan(priv, vif, band,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ } else {
+ scan->channel_count =
+ iwl_get_channels_for_scan(priv, vif, band,
+ is_active, n_probes,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ }
+ if (scan->channel_count == 0) {
+ IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+ goto done;
+ }
+
+ cmd.len += le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct iwl_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(STATUS_SCAN_HW, &priv->status);
+ if (iwl_send_cmd_sync(priv, &cmd))
+ goto done;
+
+ queue_delayed_work(priv->workqueue, &priv->scan_check,
+ IWL_SCAN_CHECK_WATCHDOG);
+
+ return;
+
+ done:
+ /* Cannot perform scan. Make sure we clear scanning
+ * bits from status so next scan request can be performed.
+ * If we don't clear scanning status bit here all next scan
+ * will fail
+ */
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+ clear_bit(STATUS_SCANNING, &priv->status);
+ /* inform mac80211 scan aborted */
+ queue_work(priv->workqueue, &priv->scan_completed);
+}
+
+int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ if (add)
+ return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true,
+ &vif_priv->ibss_bssid_sta_id);
+ return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 1460116..cf4a95b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -295,11 +295,11 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
return tl->total;
}
-static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
+static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
- int ret;
+ int ret = -EAGAIN;
if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
@@ -313,29 +313,29 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
*/
IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
tid);
- ret = ieee80211_stop_tx_ba_session(sta, tid,
+ ieee80211_stop_tx_ba_session(sta, tid,
WLAN_BACK_INITIATOR);
}
- }
+ } else
+ IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid);
+ return ret;
}
static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
struct iwl_lq_sta *lq_data,
struct ieee80211_sta *sta)
{
- if ((tid < TID_MAX_LOAD_COUNT))
- rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
- else if (tid == IWL_AGG_ALL_TID)
- for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
- rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
- if (priv->cfg->use_rts_for_ht) {
- /*
- * switch to RTS/CTS if it is the prefer protection method
- * for HT traffic
- */
- IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
- priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
- iwlcore_commit_rxon(priv);
+ if ((tid < TID_MAX_LOAD_COUNT) &&
+ !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) {
+ if (priv->cfg->use_rts_for_ht) {
+ /*
+ * switch to RTS/CTS if it is the prefer protection
+ * method for HT traffic
+ */
+ IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
+ priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
+ iwlcore_commit_rxon(priv);
+ }
}
}
@@ -611,10 +611,6 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
struct ieee80211_hdr *hdr,
enum iwl_table_type rate_type)
{
- if (hdr && is_multicast_ether_addr(hdr->addr1) &&
- lq_sta->active_rate_basic)
- return lq_sta->active_rate_basic;
-
if (is_legacy(rate_type)) {
return lq_sta->active_legacy_rate;
} else {
@@ -775,6 +771,15 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+ return;
+ }
+
if (!ieee80211_is_data(hdr->frame_control) ||
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
@@ -784,10 +789,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !lq_sta->ibss_sta_added)
- return;
-
/*
* Ignore this Tx frame response if its initial rate doesn't match
* that of latest Link Quality command. There may be stragglers
@@ -833,7 +834,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
lq_sta->missed_rate_counter++;
if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
lq_sta->missed_rate_counter = 0;
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
}
/* Regardless, ignore this status info for outdated rate */
return;
@@ -867,14 +868,14 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
&rs_index);
rs_collect_tx_data(curr_tbl, rs_index,
- info->status.ampdu_ack_len,
- info->status.ampdu_ack_map);
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
/* Update success/fail counts if not searching for new mode */
if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += info->status.ampdu_ack_map;
- lq_sta->total_failed += (info->status.ampdu_ack_len -
- info->status.ampdu_ack_map);
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed += (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
}
} else {
/*
@@ -1913,7 +1914,7 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv,
/* Update uCode's rate table. */
rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
rs_fill_link_cmd(priv, lq_sta, rate);
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
return rate;
}
@@ -2002,7 +2003,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
/* rates available for this association, and for modulation mode */
rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
- IWL_DEBUG_RATE(priv, "mask 0x%04X \n", rate_mask);
+ IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
/* mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
@@ -2077,10 +2078,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
}
/* Else we have enough samples; calculate estimate of
* actual average throughput */
-
- /* Sanity-check TPT calculations */
- BUG_ON(window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128));
+ if (window->average_tpt != ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128)) {
+ IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
+ window->average_tpt = ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128);
+ }
/* If we are searching for better modulation mode, check success. */
if (lq_sta->search_better_tbl &&
@@ -2289,7 +2292,7 @@ lq_update:
IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
tbl->current_rate, index);
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
} else
done_search = 1;
}
@@ -2334,11 +2337,22 @@ out:
tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
i = index;
lq_sta->last_txrate_idx = i;
-
- return;
}
-
+/**
+ * rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
static void rs_initialize_lq(struct iwl_priv *priv,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
@@ -2357,10 +2371,6 @@ static void rs_initialize_lq(struct iwl_priv *priv,
i = lq_sta->last_txrate_idx;
- if ((lq_sta->lq.sta_id == 0xff) &&
- (priv->iw_mode == NL80211_IFTYPE_ADHOC))
- goto out;
-
valid_tx_ant = priv->hw_params.valid_tx_ant;
if (!lq_sta->search_better_tbl)
@@ -2388,7 +2398,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
tbl->current_rate = rate;
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_link_cmd(NULL, lq_sta, rate);
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_SYNC, true);
out:
return;
}
@@ -2399,10 +2410,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb = txrc->skb;
struct ieee80211_supported_band *sband = txrc->sband;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = priv_sta;
int rate_idx;
@@ -2420,30 +2428,18 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
lq_sta->max_rate_idx = -1;
}
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+ priv_sta = NULL;
+ }
+
/* Send management frames and NO_ACK data using lowest rate. */
if (rate_control_send_low(sta, priv_sta, txrc))
return;
rate_idx = lq_sta->last_txrate_idx;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !lq_sta->ibss_sta_added) {
- u8 sta_id = iwl_find_station(priv, hdr->addr1);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
- hdr->addr1);
- sta_id = iwl_add_station(priv, hdr->addr1,
- false, CMD_ASYNC, ht_cap);
- }
- if ((sta_id != IWL_INVALID_STATION)) {
- lq_sta->lq.sta_id = sta_id;
- lq_sta->lq.rs_table[0].rate_n_flags = 0;
- lq_sta->ibss_sta_added = 1;
- rs_initialize_lq(priv, conf, sta, lq_sta);
- }
- }
-
if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
rate_idx -= IWL_FIRST_OFDM_RATE;
/* 6M and 9M shared same MCS index */
@@ -2493,16 +2489,25 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
return lq_sta;
}
-static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
{
int i, j;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &priv->hw->conf;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct iwl_lq_sta *lq_sta = priv_sta;
+ struct iwl_station_priv *sta_priv;
+ struct iwl_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+
+ sta_priv = (struct iwl_station_priv *) sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
- lq_sta->lq.sta_id = 0xff;
+
+ lq_sta->lq.sta_id = sta_id;
for (j = 0; j < LQ_SIZE; j++)
for (i = 0; i < IWL_RATE_COUNT; i++)
@@ -2514,39 +2519,18 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
- IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init ***\n");
+ IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
+ sta_id);
/* TODO: what is a good starting rate for STA? About middle? Maybe not
* the lowest or the highest rate.. Could consider using RSSI from
* previous packets? Need to have IEEE 802.1X auth succeed immediately
* after assoc.. */
- lq_sta->ibss_sta_added = 0;
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
- u8 sta_id = iwl_find_station(priv,
- sta->addr);
-
- /* for IBSS the call are from tasklet */
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
- sta_id = iwl_add_station(priv, sta->addr, false,
- CMD_ASYNC, ht_cap);
- }
- if ((sta_id != IWL_INVALID_STATION)) {
- lq_sta->lq.sta_id = sta_id;
- lq_sta->lq.rs_table[0].rate_n_flags = 0;
- }
- /* FIXME: this is w/a remove it later */
- priv->assoc_station_added = 1;
- }
-
lq_sta->is_dup = 0;
lq_sta->max_rate_idx = -1;
lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config);
lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
- lq_sta->active_rate_basic = priv->active_rate_basic;
lq_sta->band = priv->band;
/*
* active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
@@ -2574,8 +2558,17 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
lq_sta->active_mimo3_rate);
/* These values will be overridden later */
- lq_sta->lq.general_params.single_stream_ant_msk = ANT_A;
- lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ lq_sta->lq.general_params.single_stream_ant_msk =
+ first_antenna(priv->hw_params.valid_tx_ant);
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant &
+ ~first_antenna(priv->hw_params.valid_tx_ant);
+ if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+ lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant;
+ }
/* as default allow aggregation for all tids */
lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
@@ -2794,7 +2787,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
if (lq_sta->dbg_fixed_rate) {
rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
- iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
}
return count;
@@ -2950,12 +2943,6 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
desc += sprintf(buff+desc,
"Bit Rate= %d Mb/s\n",
iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
- desc += sprintf(buff+desc,
- "Signal Level= %d dBm\tNoise Level= %d dBm\n",
- priv->last_rx_rssi, priv->last_rx_noise);
- desc += sprintf(buff+desc,
- "Tsf= 0x%llx\tBeacon time= 0x%08X\n",
- priv->last_tsf, priv->last_beacon_time);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
return ret;
@@ -2995,12 +2982,21 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
}
#endif
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+}
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
- .rate_init = rs_rate_init,
+ .rate_init = rs_rate_init_stub,
.alloc = rs_alloc,
.free = rs_free,
.alloc_sta = rs_alloc_sta,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index e719239..8292f6d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -403,7 +403,6 @@ struct iwl_lq_sta {
u8 is_green;
u8 is_dup;
enum ieee80211_band band;
- u8 ibss_sta_added;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
u32 supp_rates;
@@ -411,7 +410,6 @@ struct iwl_lq_sta {
u16 active_siso_rate;
u16 active_mimo2_rate;
u16 active_mimo3_rate;
- u16 active_rate_basic;
s8 max_rate_idx; /* Max rate set by user */
u8 missed_rate_counter;
@@ -479,6 +477,12 @@ static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
*/
extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+/* Initialize station's rate scaling information after adding station */
+extern void iwl_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta, u8 sta_id);
+extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta, u8 sta_id);
+
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
new file mode 100644
index 0000000..c402bfc
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -0,0 +1,1340 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ /* this matches the mac80211 numbers */
+ 2, 3, 3, 2, 1, 1, 0, 0
+};
+
+static const u8 ac_to_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+};
+
+static inline int get_fifo_from_ac(u8 ac)
+{
+ return ac_to_fifo[ac];
+}
+
+static inline int get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+static inline int get_fifo_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return get_fifo_from_ac(tid_to_ac[tid]);
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+/**
+ * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ int write_ptr = txq->q.write_ptr;
+ int txq_id = txq->q.id;
+ u8 sec_ctl = 0;
+ u8 sta_id = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+
+ WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != IWL_CMD_QUEUE_NUM) {
+ sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
+ sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += WEP_IV_LEN + WEP_ICV_LEN;
+ break;
+ }
+ }
+
+ bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
+}
+
+void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int read_ptr = txq->q.read_ptr;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != IWL_CMD_QUEUE_NUM)
+ sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
+ u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr = priv->scd_base_addr +
+ IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ iwl_write_prph(priv,
+ IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+ (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
+ int txq_id, u32 index)
+{
+ iwl_write_direct32(priv, HBUS_TARG_WRPTR,
+ (index & 0xff) | (txq_id << 8));
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index);
+}
+
+void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ int txq_id = txq->q.id;
+ int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
+
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) |
+ IWLAGN_SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
+ active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
+}
+
+int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
+ int tx_fifo, int sta_id, int tid, u16 ssn_idx)
+{
+ unsigned long flags;
+ u16 ra_tid;
+
+ if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+ <= txq_id)) {
+ IWL_WARN(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ priv->cfg->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ /* Modify device's station table to Tx this TID */
+ iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Stop this Tx queue before configuring it */
+ iwlagn_tx_queue_stop_scheduler(priv, txq_id);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
+
+ /* Set this queue as a chain-building queue */
+ iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id));
+
+ /* enable aggregations for the queue */
+ iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id));
+
+ /* Place first TFD at index corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+ /* Set up Tx window size and frame limit for this queue */
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
+ sizeof(u32),
+ ((SCD_WIN_SIZE <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((SCD_FRAME_LIMIT <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+
+ iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo)
+{
+ if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+ <= txq_id)) {
+ IWL_ERR(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ priv->cfg->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ iwlagn_tx_queue_stop_scheduler(priv, txq_id);
+
+ iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id));
+
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+ iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(priv, txq_id);
+ iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
+
+ return 0;
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->lock and mac access
+ */
+void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
+{
+ iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
+}
+
+static inline int get_queue_from_ac(u16 ac)
+{
+ return ac;
+}
+
+/*
+ * handle build REPLY_TX command notification.
+ */
+static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr,
+ u8 std_id)
+{
+ __le16 fc = hdr->frame_control;
+ __le32 tx_flags = tx_cmd->tx_flags;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
+
+ if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
+ tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+#define RTS_DFAULT_RETRY_LIMIT 60
+
+static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ __le16 fc)
+{
+ u32 rate_flags;
+ int rate_idx;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
+ u8 rate_plcp;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+
+ /* Set retry limit on RTS packets */
+ rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
+ if (data_retry_limit < rts_retry_limit)
+ rts_retry_limit = data_retry_limit;
+ tx_cmd->rts_retry_limit = rts_retry_limit;
+
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+ return;
+ }
+
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * index is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
+ (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
+ rate_idx = rate_lowest_index(&priv->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up RTS and CTS flags for certain packets */
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Set up antennas */
+ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+ rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
+}
+
+static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag,
+ int sta_id)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->alg) {
+ case ALG_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+ IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
+ break;
+
+ case ALG_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_key(keyconf, skb_frag,
+ IEEE80211_TKIP_P2_KEY, tx_cmd->key);
+ IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
+ break;
+
+ case ALG_WEP:
+ tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
+ (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
+
+ if (keyconf->keylen == WEP_KEY_LEN_128)
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+ IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
+ "with key %d\n", keyconf->keyidx);
+ break;
+
+ default:
+ IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
+ break;
+ }
+}
+
+/*
+ * start REPLY_TX command process
+ */
+int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+ struct iwl_station_priv *sta_priv = NULL;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_tx_cmd *tx_cmd;
+ int swq_id, txq_id;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, len_org, firstlen, secondlen;
+ u16 seq_number = 0;
+ __le16 fc;
+ u8 hdr_len;
+ u8 sta_id;
+ u8 wait_write_ptr = 0;
+ u8 tid = 0;
+ u8 *qc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (iwl_is_rfkill(priv)) {
+ IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (ieee80211_is_auth(fc))
+ IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
+#endif
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* Find index into station table for destination station */
+ if (!info->control.sta)
+ sta_id = priv->hw_params.bcast_sta_id;
+ else
+ sta_id = iwl_sta_id(info->control.sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+ hdr->addr1);
+ goto drop_unlock;
+ }
+
+ IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
+
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
+
+ if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
+ sta_priv->asleep) {
+ WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
+ /*
+ * This sends an asynchronous command to the device,
+ * but we can rely on it being processed before the
+ * next frame is processed -- and the next frame to
+ * this station is the one that will consume this
+ * counter.
+ * For now set the counter to just 1 since we do not
+ * support uAPSD yet.
+ */
+ iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
+ }
+
+ txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (unlikely(tid >= MAX_TID_COUNT))
+ goto drop_unlock;
+ seq_number = priv->stations[sta_id].tid[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl = hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
+ txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+ }
+ }
+
+ txq = &priv->txq[txq_id];
+ swq_id = txq->swq_id;
+ q = &txq->q;
+
+ if (unlikely(iwl_queue_space(q) < q->high_mark))
+ goto drop_unlock;
+
+ if (ieee80211_is_data_qos(fc))
+ priv->stations[sta_id].tid[tid].tfds_in_queue++;
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
+ txq->txb[q->write_ptr].skb[0] = skb;
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
+ tx_cmd = &out_cmd->cmd.tx;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD index within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = REPLY_TX;
+ out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+
+ /* Total # bytes to be transmitted */
+ len = (u16)skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ if (info->control.hw_key)
+ iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
+ iwl_dbg_log_tx_data_frame(priv, len, hdr);
+
+ iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+
+ iwl_update_stats(priv, true, fc, len);
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+
+ len_org = len;
+ firstlen = len = (len + 3) & ~3;
+
+ if (len_org != len)
+ len_org = 1;
+ else
+ len_org = 0;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (len_org)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = pci_map_single(priv->pci_dev,
+ &out_cmd->hdr, len,
+ PCI_DMA_BIDIRECTIONAL);
+ pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ pci_unmap_len_set(out_meta, len, len);
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ txcmd_phys, len, 1, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ if (qc)
+ priv->stations[sta_id].tid[tid].seq_number = seq_number;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = len = skb->len - hdr_len;
+ if (len) {
+ phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
+ len, PCI_DMA_TODEVICE);
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, len,
+ 0, 0);
+ }
+
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ /* take back ownership of DMA buffer to enable update */
+ pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
+ len, PCI_DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
+ le16_to_cpu(out_cmd->hdr.sequence));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
+ le16_to_cpu(tx_cmd->len));
+
+ pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
+ len, PCI_DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_dev_tx(priv,
+ &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &out_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_txq_update_write_ptr(priv, txq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+
+ /* avoid atomic ops if it isn't an associated client */
+ if (sta_priv && sta_priv->client)
+ atomic_inc(&sta_priv->pending_frames);
+
+ if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&priv->lock, flags);
+ txq->need_update = 1;
+ iwl_txq_update_write_ptr(priv, txq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ } else {
+ iwl_stop_queue(priv, txq->swq_id);
+ }
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -1;
+}
+
+static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
+ GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * iwlagn_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (priv->txq) {
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ iwl_cmd_queue_free(priv);
+ else
+ iwl_tx_queue_free(priv, txq_id);
+ }
+ iwlagn_free_dma_ptr(priv, &priv->kw);
+
+ iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
+
+ /* free tx queue structure */
+ iwl_free_txq_mem(priv);
+}
+
+/**
+ * iwlagn_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ /* Free all tx/cmd queues and keep-warm buffer */
+ iwlagn_hw_txq_ctx_free(priv);
+
+ ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
+ priv->hw_params.scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
+ goto error_bc_tbls;
+ }
+ /* Alloc keep-warm buffer */
+ ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(priv, "Keep Warm allocation failed\n");
+ goto error_kw;
+ }
+
+ /* allocate tx queue structure */
+ ret = iwl_alloc_txq_mem(priv);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
+ txq_id);
+ if (ret) {
+ IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return ret;
+
+ error:
+ iwlagn_hw_txq_ctx_free(priv);
+ iwlagn_free_dma_ptr(priv, &priv->kw);
+ error_kw:
+ iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
+ error_bc_tbls:
+ return ret;
+}
+
+void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
+ }
+}
+
+/**
+ * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
+{
+ int ch;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
+ iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+ 1000);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ int sta_id;
+ int tx_fifo;
+ int txq_id;
+ int ret;
+ unsigned long flags;
+ struct iwl_tid_data *tid_data;
+
+ tx_fifo = get_fifo_from_tid(tid);
+ if (unlikely(tx_fifo < 0))
+ return tx_fifo;
+
+ IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
+ __func__, sta->addr, tid);
+
+ sta_id = iwl_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Start AGG on invalid station\n");
+ return -ENXIO;
+ }
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
+ IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
+ return -ENXIO;
+ }
+
+ txq_id = iwlagn_txq_ctx_activate_free(priv);
+ if (txq_id == -1) {
+ IWL_ERR(priv, "No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ tid_data = &priv->stations[sta_id].tid[tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
+ sta_id, tid, *ssn);
+ if (ret)
+ return ret;
+
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
+ tid_data->tfds_in_queue);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ return ret;
+}
+
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ int tx_fifo_id, txq_id, sta_id, ssn = -1;
+ struct iwl_tid_data *tid_data;
+ int write_ptr, read_ptr;
+ unsigned long flags;
+
+ tx_fifo_id = get_fifo_from_tid(tid);
+ if (unlikely(tx_fifo_id < 0))
+ return tx_fifo_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ if (priv->stations[sta_id].tid[tid].agg.state ==
+ IWL_EMPTYING_HW_QUEUE_ADDBA) {
+ IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+ return 0;
+ }
+
+ if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
+ IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
+
+ tid_data = &priv->stations[sta_id].tid[tid];
+ ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+ txq_id = tid_data->agg.txq_id;
+ write_ptr = priv->txq[txq_id].q.write_ptr;
+ read_ptr = priv->txq[txq_id].q.read_ptr;
+
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
+ priv->stations[sta_id].tid[tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ return 0;
+ }
+
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
+ tx_fifo_id);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
+int iwlagn_txq_check_empty(struct iwl_priv *priv,
+ int sta_id, u8 tid, int txq_id)
+{
+ struct iwl_queue *q = &priv->txq[txq_id].q;
+ u8 *addr = priv->stations[sta_id].sta.sta.addr;
+ struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
+
+ switch (priv->stations[sta_id].tid[tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if ((txq_id == tid_data->agg.txq_id) &&
+ (q->read_ptr == q->write_ptr)) {
+ u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+ int tx_fifo = get_fifo_from_tid(tid);
+ IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
+ ssn, tx_fifo);
+ tid_data->agg.state = IWL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
+ }
+ break;
+ }
+ return 0;
+}
+
+static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_sta *sta;
+ struct iwl_station_priv *sta_priv;
+
+ sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ if (sta) {
+ sta_priv = (void *)sta->drv_priv;
+ /* avoid atomic ops if this isn't a client */
+ if (sta_priv->client &&
+ atomic_dec_return(&sta_priv->pending_frames) == 0)
+ ieee80211_sta_block_awake(priv->hw, sta, false);
+ }
+
+ ieee80211_tx_status_irqsafe(priv->hw, skb);
+}
+
+int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_tx_info *tx_info;
+ int nfreed = 0;
+ struct ieee80211_hdr *hdr;
+
+ if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
+ IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id,
+ index, q->n_bd, q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ for (index = iwl_queue_inc_wrap(index, q->n_bd);
+ q->read_ptr != index;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+ iwlagn_tx_status(priv, tx_info->skb[0]);
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
+ if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
+ tx_info->skb[0] = NULL;
+
+ if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
+ priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
+
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ }
+ return nfreed;
+}
+
+/**
+ * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
+ */
+static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_ht_agg *agg,
+ struct iwl_compressed_ba_resp *ba_resp)
+
+{
+ int i, sh, ack;
+ u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+ u64 bitmap;
+ int successes = 0;
+ struct ieee80211_tx_info *info;
+
+ if (unlikely(!agg->wait_for_ba)) {
+ IWL_ERR(priv, "Received BA when not expected\n");
+ return -EINVAL;
+ }
+
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = 0;
+ IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
+
+ /* Calculate shift to align block-ack bits with our Tx window bits */
+ sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
+ if (sh < 0) /* tbw something is wrong with indices */
+ sh += 0x100;
+
+ /* don't use 64-bit values for now */
+ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+ if (agg->frame_count > (64 - sh)) {
+ IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
+ return -1;
+ }
+
+ /* check for success or failure according to the
+ * transmitted bitmap and block-ack bitmap */
+ bitmap &= agg->bitmap;
+
+ /* For each frame attempted in aggregation,
+ * update driver's record of tx frame's status. */
+ for (i = 0; i < agg->frame_count ; i++) {
+ ack = bitmap & (1ULL << i);
+ successes += !!ack;
+ IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
+ ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
+ agg->start_idx + i);
+ }
+
+ info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = successes;
+ info->status.ampdu_ack_map = bitmap;
+ info->status.ampdu_len = agg->frame_count;
+ iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
+
+ IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
+
+ return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct iwl_tx_queue *txq = NULL;
+ struct iwl_ht_agg *agg;
+ int index;
+ int sta_id;
+ int tid;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+ if (scd_flow >= priv->hw_params.max_txq_num) {
+ IWL_ERR(priv,
+ "BUG_ON scd_flow is bigger than number of queues\n");
+ return;
+ }
+
+ txq = &priv->txq[scd_flow];
+ sta_id = ba_resp->sta_id;
+ tid = ba_resp->tid;
+ agg = &priv->stations[sta_id].tid[tid].agg;
+
+ /* Find index just before block-ack window */
+ index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+ /* TODO: Need to get this copy more safely - now good for debug */
+
+ IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+ "sta_id = %d\n",
+ agg->wait_for_ba,
+ (u8 *) &ba_resp->sta_addr_lo32,
+ ba_resp->sta_id);
+ IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
+ "%d, scd_ssn = %d\n",
+ ba_resp->tid,
+ ba_resp->seq_ctl,
+ (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+ ba_resp->scd_flow,
+ ba_resp->scd_ssn);
+ IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
+ agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ /* Update driver's record of ACK vs. not for each frame in window */
+ iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+ /* calculate mac80211 ampdu sw queue to wake */
+ int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
+ priv->mac80211_registered &&
+ (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+ iwl_wake_queue(priv, txq->swq_id);
+
+ iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
+ }
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
new file mode 100644
index 0000000..637286c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -0,0 +1,425 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+
+static const s8 iwlagn_default_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+ IWLAGN_CMD_FIFO_NUM,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+};
+
+static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
+ {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
+ 0, COEX_UNASSOC_IDLE_FLAGS},
+ {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
+ 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
+ {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
+ 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
+ {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
+ 0, COEX_CALIBRATION_FLAGS},
+ {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
+ 0, COEX_PERIODIC_CALIBRATION_FLAGS},
+ {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
+ 0, COEX_CONNECTION_ESTAB_FLAGS},
+ {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
+ 0, COEX_ASSOCIATED_IDLE_FLAGS},
+ {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
+ 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
+ {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
+ 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
+ {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
+ 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
+ {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
+ {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
+ {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
+ 0, COEX_STAND_ALONE_DEBUG_FLAGS},
+ {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
+ 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
+ {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
+ {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
+};
+
+/*
+ * ucode
+ */
+static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
+ struct fw_desc *image, u32 dst_addr)
+{
+ dma_addr_t phy_addr = image->p_addr;
+ u32 byte_cnt = image->len;
+ int ret;
+
+ priv->ucode_write_complete = 0;
+
+ iwl_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+ iwl_write_direct32(priv,
+ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
+
+ iwl_write_direct32(priv,
+ FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+ iwl_write_direct32(priv,
+ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+ iwl_write_direct32(priv,
+ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+ iwl_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+ IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
+ ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ priv->ucode_write_complete, 5 * HZ);
+ if (ret == -ERESTARTSYS) {
+ IWL_ERR(priv, "Could not load the %s uCode section due "
+ "to interrupt\n", name);
+ return ret;
+ }
+ if (!ret) {
+ IWL_ERR(priv, "Could not load the %s uCode section\n",
+ name);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int iwlagn_load_given_ucode(struct iwl_priv *priv,
+ struct fw_desc *inst_image,
+ struct fw_desc *data_image)
+{
+ int ret = 0;
+
+ ret = iwlagn_load_section(priv, "INST", inst_image,
+ IWLAGN_RTC_INST_LOWER_BOUND);
+ if (ret)
+ return ret;
+
+ return iwlagn_load_section(priv, "DATA", data_image,
+ IWLAGN_RTC_DATA_LOWER_BOUND);
+}
+
+int iwlagn_load_ucode(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ /* check whether init ucode should be loaded, or rather runtime ucode */
+ if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
+ IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
+ ret = iwlagn_load_given_ucode(priv,
+ &priv->ucode_init, &priv->ucode_init_data);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
+ priv->ucode_type = UCODE_INIT;
+ }
+ } else {
+ IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
+ "Loading runtime ucode...\n");
+ ret = iwlagn_load_given_ucode(priv,
+ &priv->ucode_code, &priv->ucode_data);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
+ priv->ucode_type = UCODE_RT;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Calibration
+ */
+static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
+{
+ struct iwl_calib_xtal_freq_cmd cmd;
+ __le16 *xtal_calib =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
+
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
+ cmd.hdr.first_group = 0;
+ cmd.hdr.groups_num = 1;
+ cmd.hdr.data_valid = 1;
+ cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
+ cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
+ return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
+ (u8 *)&cmd, sizeof(cmd));
+}
+
+static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
+{
+ struct iwl_calib_cfg_cmd calib_cfg_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = CALIBRATION_CFG_CMD,
+ .len = sizeof(struct iwl_calib_cfg_cmd),
+ .data = &calib_cfg_cmd,
+ };
+
+ memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
+ calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
+
+ return iwl_send_cmd(priv, &cmd);
+}
+
+void iwlagn_rx_calib_result(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
+ int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ int index;
+
+ /* reduce the size of the length field itself */
+ len -= 4;
+
+ /* Define the order in which the results will be sent to the runtime
+ * uCode. iwl_send_calib_results sends them in a row according to
+ * their index. We sort them here
+ */
+ switch (hdr->op_code) {
+ case IWL_PHY_CALIBRATE_DC_CMD:
+ index = IWL_CALIB_DC;
+ break;
+ case IWL_PHY_CALIBRATE_LO_CMD:
+ index = IWL_CALIB_LO;
+ break;
+ case IWL_PHY_CALIBRATE_TX_IQ_CMD:
+ index = IWL_CALIB_TX_IQ;
+ break;
+ case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
+ index = IWL_CALIB_TX_IQ_PERD;
+ break;
+ case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
+ index = IWL_CALIB_BASE_BAND;
+ break;
+ default:
+ IWL_ERR(priv, "Unknown calibration notification %d\n",
+ hdr->op_code);
+ return;
+ }
+ iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
+}
+
+void iwlagn_rx_calib_complete(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
+ queue_work(priv->workqueue, &priv->restart);
+}
+
+void iwlagn_init_alive_start(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ /* Check alive response for "valid" sign from uCode */
+ if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
+ goto restart;
+ }
+
+ /* initialize uCode was loaded... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (iwl_verify_ucode(priv)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ ret = priv->cfg->ops->lib->alive_notify(priv);
+ if (ret) {
+ IWL_WARN(priv,
+ "Could not complete ALIVE transition: %d\n", ret);
+ goto restart;
+ }
+
+ iwlagn_send_calib_cfg(priv);
+ return;
+
+restart:
+ /* real restart (first load init_ucode) */
+ queue_work(priv->workqueue, &priv->restart);
+}
+
+static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
+{
+ struct iwl_wimax_coex_cmd coex_cmd;
+
+ if (priv->cfg->support_wimax_coexist) {
+ /* UnMask wake up src at associated sleep */
+ coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
+
+ /* UnMask wake up src at unassociated sleep */
+ coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
+ memcpy(coex_cmd.sta_prio, cu_priorities,
+ sizeof(struct iwl_wimax_coex_event_entry) *
+ COEX_NUM_OF_EVENTS);
+
+ /* enabling the coexistence feature */
+ coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
+
+ /* enabling the priorities tables */
+ coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
+ } else {
+ /* coexistence is disabled */
+ memset(&coex_cmd, 0, sizeof(coex_cmd));
+ }
+ return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
+ sizeof(coex_cmd), &coex_cmd);
+}
+
+int iwlagn_alive_notify(struct iwl_priv *priv)
+{
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
+ a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
+ for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
+ a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
+ a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr +
+ IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+
+ iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
+ priv->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
+ iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
+ IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
+ iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
+
+ /* initiate the queues */
+ for (i = 0; i < priv->hw_params.max_txq_num; i++) {
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
+ iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ ((SCD_WIN_SIZE <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((SCD_FRAME_LIMIT <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+ }
+
+ iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
+ IWL_MASK(0, priv->hw_params.max_txq_num));
+
+ /* Activate all Tx DMA/FIFO channels */
+ priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
+
+ iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
+ /* map qos queues to fifos one-to-one */
+ BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
+
+ for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
+ int ac = iwlagn_default_queue_to_tx_fifo[i];
+
+ iwl_txq_ctx_activate(priv, i);
+
+ if (ac == IWL_TX_FIFO_UNUSED)
+ continue;
+
+ iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwlagn_send_wimax_coex(priv);
+
+ iwlagn_set_Xtal_calib(priv);
+ iwl_send_calib_results(priv);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index bdff565..aef4f71 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -55,6 +55,7 @@
#include "iwl-helpers.h"
#include "iwl-sta.h"
#include "iwl-calib.h"
+#include "iwl-agn.h"
/******************************************************************************
@@ -83,13 +84,6 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_ALIAS("iwl4965");
-/*************** STATION TABLE MANAGEMENT ****
- * mac80211 should be examined to determine if sta_info is duplicating
- * the functionality provided here
- */
-
-/**************************************************************/
-
/**
* iwl_commit_rxon - commit staging_rxon to hardware
*
@@ -144,9 +138,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
return 0;
}
- /* station table will be cleared */
- priv->assoc_station_added = 0;
-
/* If we are currently associated and the new config requires
* an RXON_ASSOC and the new config wants the associated mask enabled,
* we must clear the associated from the active configuration
@@ -166,6 +157,13 @@ int iwl_commit_rxon(struct iwl_priv *priv)
IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
return ret;
}
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
+ ret = iwl_restore_default_wep_keys(priv);
+ if (ret) {
+ IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
}
IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -179,9 +177,8 @@ int iwl_commit_rxon(struct iwl_priv *priv)
iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto);
/* Apply the new configuration
- * RXON unassoc clears the station table in uCode, send it before
- * we add the bcast station. If assoc bit is set, we will send RXON
- * after having added the bcast and bssid station.
+ * RXON unassoc clears the station table in uCode so restoration of
+ * stations is needed after it (the RXON command) completes
*/
if (!new_assoc) {
ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
@@ -190,35 +187,19 @@ int iwl_commit_rxon(struct iwl_priv *priv)
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
return ret;
}
+ IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
+ ret = iwl_restore_default_wep_keys(priv);
+ if (ret) {
+ IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
}
- iwl_clear_stations_table(priv);
-
priv->start_calib = 0;
-
- /* Add the broadcast address so we can send broadcast frames */
- priv->cfg->ops->lib->add_bcast_station(priv);
-
-
- /* If we have set the ASSOC_MSK and we are in BSS mode then
- * add the IWL_AP_ID to the station rate table */
if (new_assoc) {
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- ret = iwl_rxon_add_station(priv,
- priv->active_rxon.bssid_addr, 1);
- if (ret == IWL_INVALID_STATION) {
- IWL_ERR(priv,
- "Error adding AP address for TX.\n");
- return -EIO;
- }
- priv->assoc_station_added = 1;
- if (priv->default_wep_key &&
- iwl_send_static_wepkey_cmd(priv, 0))
- IWL_ERR(priv,
- "Could not send WEP static key.\n");
- }
-
/*
* allow CTS-to-self if possible for new association.
* this is relevant only for 5000 series and up,
@@ -907,10 +888,10 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
iwl_rx_missed_beacon_notif;
/* Rx handlers */
- priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
- priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
+ priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
+ priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
/* block ack */
- priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba;
+ priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
/* Set up hardware specific Rx handlers */
priv->cfg->ops->lib->rx_handler_setup(priv);
}
@@ -1038,7 +1019,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
count++;
if (count >= 8) {
rxq->read = i;
- iwl_rx_replenish_now(priv);
+ iwlagn_rx_replenish_now(priv);
count = 0;
}
}
@@ -1047,9 +1028,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
- iwl_rx_replenish_now(priv);
+ iwlagn_rx_replenish_now(priv);
else
- iwl_rx_queue_restock(priv);
+ iwlagn_rx_queue_restock(priv);
}
/* call this function to flush any scheduled tasklet */
@@ -1267,9 +1248,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
* hardware bugs here by ACKing all the possible interrupts so that
* interrupt coalescing can still be achieved.
*/
- iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask);
+ iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
- inta = priv->inta;
+ inta = priv->_agn.inta;
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
@@ -1282,8 +1263,8 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
- /* saved interrupt in inta variable now we can reset priv->inta */
- priv->inta = 0;
+ /* saved interrupt in inta variable now we can reset priv->_agn.inta */
+ priv->_agn.inta = 0;
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -1448,6 +1429,60 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
iwl_enable_interrupts(priv);
}
+/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
+#define ACK_CNT_RATIO (50)
+#define BA_TIMEOUT_CNT (5)
+#define BA_TIMEOUT_MAX (16)
+
+/**
+ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
+ *
+ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
+ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
+ * operation state.
+ */
+bool iwl_good_ack_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ bool rc = true;
+ int actual_ack_cnt_delta, expected_ack_cnt_delta;
+ int ba_timeout_delta;
+
+ actual_ack_cnt_delta =
+ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
+ le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
+ expected_ack_cnt_delta =
+ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
+ le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
+ ba_timeout_delta =
+ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
+ le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
+ if ((priv->_agn.agg_tids_count > 0) &&
+ (expected_ack_cnt_delta > 0) &&
+ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
+ < ACK_CNT_RATIO) &&
+ (ba_timeout_delta > BA_TIMEOUT_CNT)) {
+ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
+ " expected_ack_cnt = %d\n",
+ actual_ack_cnt_delta, expected_ack_cnt_delta);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
+ priv->delta_statistics.tx.rx_detected_cnt);
+ IWL_DEBUG_RADIO(priv,
+ "ack_or_ba_timeout_collision delta = %d\n",
+ priv->delta_statistics.tx.
+ ack_or_ba_timeout_collision);
+#endif
+ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
+ ba_timeout_delta);
+ if (!actual_ack_cnt_delta &&
+ (ba_timeout_delta >= BA_TIMEOUT_MAX))
+ rc = false;
+ }
+ return rc;
+}
+
/******************************************************************************
*
@@ -1471,9 +1506,13 @@ static void iwl_nic_start(struct iwl_priv *priv)
iwl_write32(priv, CSR_RESET, 0);
}
+struct iwlagn_ucode_capabilities {
+ u32 max_probe_length;
+};
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-static int iwl_mac_setup_register(struct iwl_priv *priv);
+static int iwl_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa);
static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
{
@@ -1500,6 +1539,199 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
iwl_ucode_callback);
}
+struct iwlagn_firmware_pieces {
+ const void *inst, *data, *init, *init_data, *boot;
+ size_t inst_size, data_size, init_size, init_data_size, boot_size;
+
+ u32 build;
+};
+
+static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
+ const struct firmware *ucode_raw,
+ struct iwlagn_firmware_pieces *pieces)
+{
+ struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size;
+ const u8 *src;
+
+ priv->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+ switch (api_ver) {
+ default:
+ /*
+ * 4965 doesn't revision the firmware file format
+ * along with the API version, it always uses v1
+ * file format.
+ */
+ if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) !=
+ CSR_HW_REV_TYPE_4965) {
+ hdr_size = 28;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(priv, "File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->build = le32_to_cpu(ucode->u.v2.build);
+ pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
+ pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
+ pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->u.v2.boot_size);
+ src = ucode->u.v2.data;
+ break;
+ }
+ /* fall through for 4965 */
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(priv, "File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->build = 0;
+ pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
+ pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
+ pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->u.v1.boot_size);
+ src = ucode->u.v1.data;
+ break;
+ }
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size != hdr_size + pieces->inst_size +
+ pieces->data_size + pieces->init_size +
+ pieces->init_data_size + pieces->boot_size) {
+
+ IWL_ERR(priv,
+ "uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+ pieces->inst = src;
+ src += pieces->inst_size;
+ pieces->data = src;
+ src += pieces->data_size;
+ pieces->init = src;
+ src += pieces->init_size;
+ pieces->init_data = src;
+ src += pieces->init_data_size;
+ pieces->boot = src;
+ src += pieces->boot_size;
+
+ return 0;
+}
+
+static int iwlagn_wanted_ucode_alternative = 1;
+
+static int iwlagn_load_firmware(struct iwl_priv *priv,
+ const struct firmware *ucode_raw,
+ struct iwlagn_firmware_pieces *pieces,
+ struct iwlagn_ucode_capabilities *capa)
+{
+ struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
+ struct iwl_ucode_tlv *tlv;
+ size_t len = ucode_raw->size;
+ const u8 *data;
+ int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp;
+ u64 alternatives;
+
+ if (len < sizeof(*ucode))
+ return -EINVAL;
+
+ if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC))
+ return -EINVAL;
+
+ /*
+ * Check which alternatives are present, and "downgrade"
+ * when the chosen alternative is not present, warning
+ * the user when that happens. Some files may not have
+ * any alternatives, so don't warn in that case.
+ */
+ alternatives = le64_to_cpu(ucode->alternatives);
+ tmp = wanted_alternative;
+ if (wanted_alternative > 63)
+ wanted_alternative = 63;
+ while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
+ wanted_alternative--;
+ if (wanted_alternative && wanted_alternative != tmp)
+ IWL_WARN(priv,
+ "uCode alternative %d not available, choosing %d\n",
+ tmp, wanted_alternative);
+
+ priv->ucode_ver = le32_to_cpu(ucode->ver);
+ pieces->build = le32_to_cpu(ucode->build);
+ data = ucode->data;
+
+ len -= sizeof(*ucode);
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len;
+ enum iwl_ucode_tlv_type tlv_type;
+ u16 tlv_alt;
+ const u8 *tlv_data;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_alt = le16_to_cpu(tlv->alternative);
+ tlv_data = tlv->data;
+
+ if (len < tlv_len)
+ return -EINVAL;
+ len -= ALIGN(tlv_len, 4);
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+
+ /*
+ * Alternative 0 is always valid.
+ *
+ * Skip alternative TLVs that are not selected.
+ */
+ if (tlv_alt != 0 && tlv_alt != wanted_alternative)
+ continue;
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_INST:
+ pieces->inst = tlv_data;
+ pieces->inst_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_DATA:
+ pieces->data = tlv_data;
+ pieces->data_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_INIT:
+ pieces->init = tlv_data;
+ pieces->init_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_INIT_DATA:
+ pieces->init_data = tlv_data;
+ pieces->init_data_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_BOOT:
+ pieces->boot = tlv_data;
+ pieces->boot_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_PROBE_MAX_LEN:
+ if (tlv_len != 4)
+ return -EINVAL;
+ capa->max_probe_length =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (len)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* iwl_ucode_callback - callback when firmware was loaded
*
@@ -1510,14 +1742,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
{
struct iwl_priv *priv = context;
struct iwl_ucode_header *ucode;
+ int err;
+ struct iwlagn_firmware_pieces pieces;
const unsigned int api_max = priv->cfg->ucode_api_max;
const unsigned int api_min = priv->cfg->ucode_api_min;
- u8 *src;
- size_t len;
- u32 api_ver, build;
- u32 inst_size, data_size, init_size, init_data_size, boot_size;
- int err;
- u16 eeprom_ver;
+ u32 api_ver;
+ char buildstr[25];
+ u32 build;
+ struct iwlagn_ucode_capabilities ucode_capa = {
+ .max_probe_length = 200,
+ };
+
+ memset(&pieces, 0, sizeof(pieces));
if (!ucode_raw) {
IWL_ERR(priv, "request for firmware file '%s' failed.\n",
@@ -1528,8 +1764,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
priv->firmware_name, ucode_raw->size);
- /* Make sure that we got at least the v1 header! */
- if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) {
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
IWL_ERR(priv, "File size way too small!\n");
goto try_again;
}
@@ -1537,21 +1773,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Data from ucode file: header followed by uCode images */
ucode = (struct iwl_ucode_header *)ucode_raw->data;
- priv->ucode_ver = le32_to_cpu(ucode->ver);
+ if (ucode->ver)
+ err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces);
+ else
+ err = iwlagn_load_firmware(priv, ucode_raw, &pieces,
+ &ucode_capa);
+
+ if (err)
+ goto try_again;
+
api_ver = IWL_UCODE_API(priv->ucode_ver);
- build = priv->cfg->ops->ucode->get_build(ucode, api_ver);
- inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver);
- data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver);
- init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver);
- init_data_size =
- priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver);
- boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver);
- src = priv->cfg->ops->ucode->get_data(ucode, api_ver);
-
- /* api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward */
+ build = pieces.build;
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
if (api_ver < api_min || api_ver > api_max) {
IWL_ERR(priv, "Driver unable to support your firmware API. "
"Driver supports v%u, firmware is v%u.\n",
@@ -1565,40 +1803,26 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
"from http://www.intellinuxwireless.org.\n",
api_max, api_ver);
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
+ if (build)
+ sprintf(buildstr, " build %u", build);
+ else
+ buildstr[0] = '\0';
+
+ IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n",
+ IWL_UCODE_MAJOR(priv->ucode_ver),
+ IWL_UCODE_MINOR(priv->ucode_ver),
+ IWL_UCODE_API(priv->ucode_ver),
+ IWL_UCODE_SERIAL(priv->ucode_ver),
+ buildstr);
snprintf(priv->hw->wiphy->fw_version,
sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
+ "%u.%u.%u.%u%s",
IWL_UCODE_MAJOR(priv->ucode_ver),
IWL_UCODE_MINOR(priv->ucode_ver),
IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- if (build)
- IWL_DEBUG_INFO(priv, "Build %u\n", build);
-
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
- ? "OTP" : "EEPROM", eeprom_ver);
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
- inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
- data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
- init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
- init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
- boot_size);
+ IWL_UCODE_SERIAL(priv->ucode_ver),
+ buildstr);
/*
* For any of the failures below (before allocating pci memory)
@@ -1606,43 +1830,47 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
* user just got a corrupted version of the latest API.
*/
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size !=
- priv->cfg->ops->ucode->get_header_size(api_ver) +
- inst_size + data_size + init_size +
- init_data_size + boot_size) {
-
- IWL_DEBUG_INFO(priv,
- "uCode file size %d does not match expected size\n",
- (int)ucode_raw->size);
- goto try_again;
- }
+ IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
+ priv->ucode_ver);
+ IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
+ pieces.inst_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
+ pieces.data_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
+ pieces.init_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
+ pieces.init_data_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
+ pieces.boot_size);
/* Verify that uCode images will fit in card's SRAM */
- if (inst_size > priv->hw_params.max_inst_size) {
- IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
- inst_size);
+ if (pieces.inst_size > priv->hw_params.max_inst_size) {
+ IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
+ pieces.inst_size);
goto try_again;
}
- if (data_size > priv->hw_params.max_data_size) {
- IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
- data_size);
+ if (pieces.data_size > priv->hw_params.max_data_size) {
+ IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
+ pieces.data_size);
goto try_again;
}
- if (init_size > priv->hw_params.max_inst_size) {
- IWL_INFO(priv, "uCode init instr len %d too large to fit in\n",
- init_size);
+
+ if (pieces.init_size > priv->hw_params.max_inst_size) {
+ IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
+ pieces.init_size);
goto try_again;
}
- if (init_data_size > priv->hw_params.max_data_size) {
- IWL_INFO(priv, "uCode init data len %d too large to fit in\n",
- init_data_size);
+
+ if (pieces.init_data_size > priv->hw_params.max_data_size) {
+ IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
+ pieces.init_data_size);
goto try_again;
}
- if (boot_size > priv->hw_params.max_bsm_size) {
- IWL_INFO(priv, "uCode boot instr len %d too large to fit in\n",
- boot_size);
+
+ if (pieces.boot_size > priv->hw_params.max_bsm_size) {
+ IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
+ pieces.boot_size);
goto try_again;
}
@@ -1651,13 +1879,13 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Runtime instructions and 2 copies of data:
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = inst_size;
+ priv->ucode_code.len = pieces.inst_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
- priv->ucode_data.len = data_size;
+ priv->ucode_data.len = pieces.data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
- priv->ucode_data_backup.len = data_size;
+ priv->ucode_data_backup.len = pieces.data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
@@ -1665,11 +1893,11 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
goto err_pci_alloc;
/* Initialization instructions and data */
- if (init_size && init_data_size) {
- priv->ucode_init.len = init_size;
+ if (pieces.init_size && pieces.init_data_size) {
+ priv->ucode_init.len = pieces.init_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
- priv->ucode_init_data.len = init_data_size;
+ priv->ucode_init_data.len = pieces.init_data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
@@ -1677,8 +1905,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
}
/* Bootstrap (instructions only, no data) */
- if (boot_size) {
- priv->ucode_boot.len = boot_size;
+ if (pieces.boot_size) {
+ priv->ucode_boot.len = pieces.boot_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
if (!priv->ucode_boot.v_addr)
@@ -1688,51 +1916,48 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Copy images into buffers for card's bus-master reads ... */
/* Runtime instructions (first block of data in file) */
- len = inst_size;
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", len);
- memcpy(priv->ucode_code.v_addr, src, len);
- src += len;
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
+ pieces.inst_size);
+ memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
- /* Runtime data (2nd block)
- * NOTE: Copy into backup buffer will be done in iwl_up() */
- len = data_size;
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", len);
- memcpy(priv->ucode_data.v_addr, src, len);
- memcpy(priv->ucode_data_backup.v_addr, src, len);
- src += len;
-
- /* Initialization instructions (3rd block) */
- if (init_size) {
- len = init_size;
+ /*
+ * Runtime data
+ * NOTE: Copy into backup buffer will be done in iwl_up()
+ */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
+ pieces.data_size);
+ memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
+ memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+ /* Initialization instructions */
+ if (pieces.init_size) {
IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n",
- len);
- memcpy(priv->ucode_init.v_addr, src, len);
- src += len;
+ pieces.init_size);
+ memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
}
- /* Initialization data (4th block) */
- if (init_data_size) {
- len = init_data_size;
+ /* Initialization data */
+ if (pieces.init_data_size) {
IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n",
- len);
- memcpy(priv->ucode_init_data.v_addr, src, len);
- src += len;
+ pieces.init_data_size);
+ memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
+ pieces.init_data_size);
}
- /* Bootstrap instructions (5th block) */
- len = boot_size;
- IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", len);
- memcpy(priv->ucode_boot.v_addr, src, len);
+ /* Bootstrap instructions */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
+ pieces.boot_size);
+ memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
/**************************************************
* This is still part of probe() in a sense...
*
* 9. Setup and register with mac80211 and debugfs
**************************************************/
- err = iwl_mac_setup_register(priv);
+ err = iwl_mac_setup_register(priv, &ucode_capa);
if (err)
goto out_unbind;
@@ -1742,6 +1967,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
+ complete(&priv->_agn.firmware_loading_complete);
return;
try_again:
@@ -1755,6 +1981,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
IWL_ERR(priv, "failed to allocate pci memory\n");
iwl_dealloc_ucode_pci(priv);
out_unbind:
+ complete(&priv->_agn.firmware_loading_complete);
device_release_driver(&priv->pci_dev->dev);
release_firmware(ucode_raw);
}
@@ -1809,6 +2036,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
u32 data2, line;
u32 desc, time, count, base, data1;
u32 blink1, blink2, ilink1, ilink2;
+ u32 pc, hcmd;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
@@ -1831,6 +2059,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
}
desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
+ pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32));
blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
@@ -1839,6 +2068,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
+ hcmd = iwl_read_targ_mem(priv, base + 22 * sizeof(u32));
trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
blink1, blink2, ilink1, ilink2);
@@ -1847,10 +2077,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
"data1 data2 line\n");
IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
desc_lookup(desc), desc, time, data1, data2, line);
- IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
- IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
- ilink1, ilink2);
-
+ IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
+ IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
+ pc, blink1, blink2, ilink1, ilink2, hcmd);
}
#define EVENT_START_OFFSET (4 * sizeof(u32))
@@ -1966,9 +2195,6 @@ static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
return pos;
}
-/* For sanity check only. Actual size is determined by uCode, typ. 512 */
-#define MAX_EVENT_LOG_SIZE (512)
-
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@@ -2001,16 +2227,16 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
- if (capacity > MAX_EVENT_LOG_SIZE) {
+ if (capacity > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
- capacity, MAX_EVENT_LOG_SIZE);
- capacity = MAX_EVENT_LOG_SIZE;
+ capacity, priv->cfg->max_event_log_size);
+ capacity = priv->cfg->max_event_log_size;
}
- if (next_entry > MAX_EVENT_LOG_SIZE) {
+ if (next_entry > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
- next_entry, MAX_EVENT_LOG_SIZE);
- next_entry = MAX_EVENT_LOG_SIZE;
+ next_entry, priv->cfg->max_event_log_size);
+ next_entry = priv->cfg->max_event_log_size;
}
size = num_wraps ? capacity : next_entry;
@@ -2095,7 +2321,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
goto restart;
}
- iwl_clear_stations_table(priv);
ret = priv->cfg->ops->lib->alive_notify(priv);
if (ret) {
IWL_WARN(priv,
@@ -2106,13 +2331,19 @@ static void iwl_alive_start(struct iwl_priv *priv)
/* After the ALIVE response, we can send host commands to the uCode */
set_bit(STATUS_ALIVE, &priv->status);
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ /* Enable timer to monitor the driver queues */
+ mod_timer(&priv->monitor_recover,
+ jiffies +
+ msecs_to_jiffies(priv->cfg->monitor_recover_period));
+ }
+
if (iwl_is_rfkill(priv))
return;
ieee80211_wake_queues(priv->hw);
- priv->active_rate = priv->rates_mask;
- priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
+ priv->active_rate = IWL_RATES_MASK;
/* Configure Tx antenna selection based on H/W config */
if (priv->cfg->ops->hcmd->set_tx_ant)
@@ -2126,7 +2357,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
- iwl_connection_init_rx_config(priv, priv->iw_mode);
+ iwl_connection_init_rx_config(priv, NULL);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
@@ -2135,7 +2366,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
}
/* Configure Bluetooth device coexistence support */
- iwl_send_bt_config(priv);
+ priv->cfg->ops->hcmd->send_bt_config(priv);
iwl_reset_run_time_calib(priv);
@@ -2152,18 +2383,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
wake_up_interruptible(&priv->wait_command_queue);
iwl_power_update_mode(priv, true);
+ IWL_DEBUG_INFO(priv, "Updated power mode\n");
- /* reassociate for ADHOC mode */
- if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
- struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
- priv->vif);
- if (beacon)
- iwl_mac_beacon_update(priv->hw, beacon);
- }
-
-
- if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
- iwl_set_mode(priv, priv->iw_mode);
return;
@@ -2183,7 +2404,9 @@ static void __iwl_down(struct iwl_priv *priv)
if (!exit_pending)
set_bit(STATUS_EXIT_PENDING, &priv->status);
- iwl_clear_stations_table(priv);
+ iwl_clear_ucode_stations(priv);
+ iwl_dealloc_bcast_station(priv);
+ iwl_clear_driver_stations(priv);
/* Unblock any waiting calls */
wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2231,8 +2454,8 @@ static void __iwl_down(struct iwl_priv *priv)
/* device going down, Stop using ICT table */
iwl_disable_ict(priv);
- iwl_txq_ctx_stop(priv);
- iwl_rxq_stop(priv);
+ iwlagn_txq_ctx_stop(priv);
+ iwlagn_rxq_stop(priv);
/* Power-down device's busmaster DMA clocks */
iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
@@ -2292,7 +2515,7 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv)
{
int ret = 0;
- IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n");
+ IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n");
ret = iwl_set_hw_ready(priv);
if (priv->hw_ready)
@@ -2330,6 +2553,10 @@ static int __iwl_up(struct iwl_priv *priv)
return -EIO;
}
+ ret = iwl_alloc_bcast_station(priv, true);
+ if (ret)
+ return ret;
+
iwl_prepare_card_hw(priv);
if (!priv->hw_ready) {
@@ -2353,7 +2580,7 @@ static int __iwl_up(struct iwl_priv *priv)
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- ret = iwl_hw_nic_init(priv);
+ ret = iwlagn_hw_nic_init(priv);
if (ret) {
IWL_ERR(priv, "Unable to init nic\n");
return ret;
@@ -2380,8 +2607,6 @@ static int __iwl_up(struct iwl_priv *priv)
for (i = 0; i < MAX_HW_RESTARTS; i++) {
- iwl_clear_stations_table(priv);
-
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
* prepare to load the "initialize" uCode */
@@ -2467,7 +2692,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
}
mutex_unlock(&priv->mutex);
- return;
}
static void iwl_bg_restart(struct work_struct *data)
@@ -2505,34 +2729,28 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
return;
mutex_lock(&priv->mutex);
- iwl_rx_replenish(priv);
+ iwlagn_rx_replenish(priv);
mutex_unlock(&priv->mutex);
}
#define IWL_DELAY_NEXT_SCAN (HZ*2)
-void iwl_post_associate(struct iwl_priv *priv)
+void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
struct ieee80211_conf *conf = NULL;
int ret = 0;
- unsigned long flags;
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
+ if (!vif || !priv->is_open)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP) {
IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
return;
}
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- priv->assoc_id, priv->active_rxon.bssid_addr);
-
-
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
-
- if (!priv->vif || !priv->is_open)
- return;
-
iwl_scan_cancel_timeout(priv, 200);
conf = ieee80211_get_hw_conf(priv->hw);
@@ -2540,7 +2758,7 @@ void iwl_post_associate(struct iwl_priv *priv)
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing), &priv->rxon_timing);
if (ret)
@@ -2554,56 +2772,44 @@ void iwl_post_associate(struct iwl_priv *priv)
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
+ priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid);
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- priv->assoc_id, priv->beacon_int);
+ vif->bss_conf.aid, vif->bss_conf.beacon_int);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
}
iwlcore_commit_rxon(priv);
- switch (priv->iw_mode) {
+ IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
+ vif->bss_conf.aid, priv->active_rxon.bssid_addr);
+
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
break;
-
case NL80211_IFTYPE_ADHOC:
-
- /* assume default assoc id */
- priv->assoc_id = 1;
-
- iwl_rxon_add_station(priv, priv->bssid, 0);
iwl_send_beacon_cmd(priv);
-
break;
-
default:
IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, priv->iw_mode);
+ __func__, vif->type);
break;
}
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
- priv->assoc_station_added = 1;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_activate_qos(priv, 0);
- spin_unlock_irqrestore(&priv->lock, flags);
-
/* the chain noise calibration will enabled PM upon completion
* If chain noise has already been run, then we need to enable
* power management here */
@@ -2628,7 +2834,8 @@ void iwl_post_associate(struct iwl_priv *priv)
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
-static int iwl_mac_setup_register(struct iwl_priv *priv)
+static int iwl_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
@@ -2636,7 +2843,6 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_SPECTRUM_MGMT;
@@ -2649,6 +2855,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
hw->sta_data_size = sizeof(struct iwl_station_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
@@ -2664,7 +2872,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
/* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
+ hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
@@ -2770,17 +2978,16 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
- if (iwl_tx_skb(priv, skb))
+ if (iwlagn_tx_skb(priv, skb))
dev_kfree_skb_any(skb);
IWL_DEBUG_MACDUMP(priv, "leave\n");
return NETDEV_TX_OK;
}
-void iwl_config_ap(struct iwl_priv *priv)
+void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
int ret = 0;
- unsigned long flags;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -2793,7 +3000,7 @@ void iwl_config_ap(struct iwl_priv *priv)
iwlcore_commit_rxon(priv);
/* RXON Timing */
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing), &priv->rxon_timing);
if (ret)
@@ -2807,9 +3014,10 @@ void iwl_config_ap(struct iwl_priv *priv)
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
- /* FIXME: what should be the assoc_id for AP? */
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ priv->staging_rxon.assoc_id = 0;
+
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_PREAMBLE_MSK;
else
@@ -2817,26 +3025,21 @@ void iwl_config_ap(struct iwl_priv *priv)
~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability &
- WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
- iwl_reset_qos(priv);
- spin_lock_irqsave(&priv->lock, flags);
- iwl_activate_qos(priv, 1);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl_add_bcast_station(priv);
}
iwl_send_beacon_cmd(priv);
@@ -2855,8 +3058,7 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MAC80211(priv, "enter\n");
- iwl_update_tkip_key(priv, keyconf,
- sta ? sta->addr : iwl_bcast_addr,
+ iwl_update_tkip_key(priv, keyconf, sta,
iv32, phase1key);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2868,7 +3070,6 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct iwl_priv *priv = hw->priv;
- const u8 *addr;
int ret;
u8 sta_id;
bool is_default_wep_key = false;
@@ -2879,25 +3080,29 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
return -EOPNOTSUPP;
}
- addr = sta ? sta->addr : iwl_bcast_addr;
- sta_id = iwl_find_station(priv, addr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
- addr);
- return -EINVAL;
+ if (sta) {
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
+ sta->addr);
+ return -EINVAL;
+ }
+ } else {
+ sta_id = priv->hw_params.bcast_sta_id;
}
mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
- mutex_unlock(&priv->mutex);
- /* If we are getting WEP group key and we didn't receive any key mapping
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
* so far, we are in legacy wep mode (group key only), otherwise we are
* in 1X mode.
- * In legacy wep mode, we use another host command to the uCode */
- if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id &&
- priv->iw_mode != NL80211_IFTYPE_AP) {
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if (key->alg == ALG_WEP && !sta && vif->type != NL80211_IFTYPE_AP) {
if (cmd == SET_KEY)
is_default_wep_key = !priv->key_mapping_key;
else
@@ -2926,6 +3131,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -2933,8 +3139,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
struct iwl_priv *priv = hw->priv;
int ret;
@@ -2948,20 +3154,31 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
IWL_DEBUG_HT(priv, "start Rx\n");
- return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn);
+ return iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
case IEEE80211_AMPDU_RX_STOP:
IWL_DEBUG_HT(priv, "stop Rx\n");
- ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid);
+ ret = iwl_sta_rx_agg_stop(priv, sta, tid);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return 0;
else
return ret;
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
- return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
+ ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+ if (ret == 0) {
+ priv->_agn.agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
+ priv->_agn.agg_tids_count);
+ }
+ return ret;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
- ret = iwl_tx_agg_stop(priv, sta->addr, tid);
+ ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
+ if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) {
+ priv->_agn.agg_tids_count--;
+ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
+ priv->_agn.agg_tids_count);
+ }
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return 0;
else
@@ -2977,18 +3194,6 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static int iwl_mac_get_stats(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
-{
- struct iwl_priv *priv = hw->priv;
-
- priv = hw->priv;
- IWL_DEBUG_MAC80211(priv, "enter\n");
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-
static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
@@ -2998,18 +3203,7 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
int sta_id;
- /*
- * TODO: We really should use this callback to
- * actually maintain the station table in
- * the device.
- */
-
switch (cmd) {
- case STA_NOTIFY_ADD:
- atomic_set(&sta_priv->pending_frames, 0);
- if (vif->type == NL80211_IFTYPE_AP)
- sta_priv->client = true;
- break;
case STA_NOTIFY_SLEEP:
WARN_ON(!sta_priv->client);
sta_priv->asleep = true;
@@ -3021,7 +3215,7 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
if (!sta_priv->asleep)
break;
sta_priv->asleep = false;
- sta_id = iwl_find_station(priv, sta->addr);
+ sta_id = iwl_sta_id(sta);
if (sta_id != IWL_INVALID_STATION)
iwl_sta_modify_ps_wake(priv, sta_id);
break;
@@ -3030,6 +3224,44 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
}
}
+static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret;
+ u8 sta_id;
+
+ sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+ IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+ sta->addr);
+
+ atomic_set(&sta_priv->pending_frames, 0);
+ if (vif->type == NL80211_IFTYPE_AP)
+ sta_priv->client = true;
+
+ ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
+ &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl_rs_rate_init(priv, sta, sta_id);
+
+ return 0;
+}
+
/*****************************************************************************
*
* sysfs attributes
@@ -3130,125 +3362,6 @@ static ssize_t store_tx_power(struct device *d,
static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
-static ssize_t show_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
-}
-
-static ssize_t store_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- u32 flags;
- int ret = strict_strtoul(buf, 0, &val);
- if (ret)
- return ret;
- flags = (u32)val;
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
- /* Cancel any currently running scans... */
- if (iwl_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags);
- priv->staging_rxon.flags = cpu_to_le32(flags);
- iwlcore_commit_rxon(priv);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
-
-static ssize_t show_filter_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- return sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->active_rxon.filter_flags));
-}
-
-static ssize_t store_filter_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- u32 filter_flags;
- int ret = strict_strtoul(buf, 0, &val);
- if (ret)
- return ret;
- filter_flags = (u32)val;
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
- /* Cancel any currently running scans... */
- if (iwl_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
- "0x%04X\n", filter_flags);
- priv->staging_rxon.filter_flags =
- cpu_to_le32(filter_flags);
- iwlcore_commit_rxon(priv);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
- store_filter_flags);
-
-
-static ssize_t show_statistics(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- u32 size = sizeof(struct iwl_notif_statistics);
- u32 len = 0, ofs = 0;
- u8 *data = (u8 *)&priv->statistics;
- int rc = 0;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- mutex_lock(&priv->mutex);
- rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (rc) {
- len = sprintf(buf,
- "Error sending statistics request: 0x%08X\n", rc);
- return len;
- }
-
- while (size && (PAGE_SIZE - len)) {
- hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
- len = strlen(buf);
- if (PAGE_SIZE - len)
- buf[len++] = '\n';
-
- ofs += 16;
- size -= min(size, 16U);
- }
-
- return len;
-}
-
-static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
-
static ssize_t show_rts_ht_protection(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3316,6 +3429,13 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
priv->ucode_trace.data = (unsigned long)priv;
priv->ucode_trace.function = iwl_bg_ucode_trace;
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ init_timer(&priv->monitor_recover);
+ priv->monitor_recover.data = (unsigned long)priv;
+ priv->monitor_recover.function =
+ priv->cfg->ops->lib->recover_from_tx_stall;
+ }
+
if (!priv->cfg->use_isr_legacy)
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl_irq_tasklet, (unsigned long)priv);
@@ -3336,6 +3456,8 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
del_timer_sync(&priv->ucode_trace);
+ if (priv->cfg->ops->lib->recover_from_tx_stall)
+ del_timer_sync(&priv->monitor_recover);
}
static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3373,9 +3495,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
mutex_init(&priv->mutex);
mutex_init(&priv->sync_cmd_mutex);
- /* Clear the driver's (not device's) station table */
- iwl_clear_stations_table(priv);
-
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
@@ -3383,6 +3502,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+ priv->_agn.agg_tids_count = 0;
/* initialize force reset */
priv->force_reset[IWL_RF_RESET].reset_duration =
@@ -3396,16 +3516,10 @@ static int iwl_init_drv(struct iwl_priv *priv)
iwl_init_scan_params(priv);
- iwl_reset_qos(priv);
-
- priv->qos_data.qos_active = 0;
- priv->qos_data.qos_cap.val = 0;
-
- priv->rates_mask = IWL_RATES_MASK;
/* Set the tx_power_user_lmt to the lowest power level
* this value will get overwritten by channel max power avg
* from eeprom */
- priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN;
+ priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
ret = iwl_init_channel_map(priv);
if (ret) {
@@ -3433,13 +3547,10 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
iwl_calib_free_results(priv);
iwlcore_free_geos(priv);
iwl_free_channel_map(priv);
- kfree(priv->scan);
+ kfree(priv->scan_cmd);
}
static struct attribute *iwl_sysfs_entries[] = {
- &dev_attr_flags.attr,
- &dev_attr_filter_flags.attr,
- &dev_attr_statistics.attr,
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
&dev_attr_rts_ht_protection.attr,
@@ -3464,13 +3575,14 @@ static struct ieee80211_ops iwl_hw_ops = {
.configure_filter = iwl_configure_filter,
.set_key = iwl_mac_set_key,
.update_tkip_key = iwl_mac_update_tkip_key,
- .get_stats = iwl_mac_get_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
.ampdu_action = iwl_mac_ampdu_action,
.hw_scan = iwl_mac_hw_scan,
.sta_notify = iwl_mac_sta_notify,
+ .sta_add = iwlagn_mac_sta_add,
+ .sta_remove = iwl_mac_sta_remove,
};
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -3574,7 +3686,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
iwl_hw_detect(priv);
- IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
+ IWL_INFO(priv, "Detected %s, REV=0x%X\n",
priv->cfg->name, priv->hw_rev);
/* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -3672,6 +3784,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
+ init_completion(&priv->_agn.firmware_loading_complete);
+
err = iwl_request_firmware(priv, true);
if (err)
goto out_remove_sysfs;
@@ -3712,6 +3826,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
if (!priv)
return;
+ wait_for_completion(&priv->_agn.firmware_loading_complete);
+
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
iwl_dbgfs_unregister(priv);
@@ -3752,10 +3868,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
iwl_dealloc_ucode_pci(priv);
if (priv->rxq.bd)
- iwl_rx_queue_free(priv, &priv->rxq);
- iwl_hw_txq_ctx_free(priv);
+ iwlagn_rx_queue_free(priv, &priv->rxq);
+ iwlagn_hw_txq_ctx_free(priv);
- iwl_clear_stations_table(priv);
iwl_eeprom_free(priv);
@@ -3870,6 +3985,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+/* 6x00 Series Gen2a */
+ {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
+
/* 6x50 WiFi/WiMax Series */
{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
{IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
@@ -3951,3 +4071,38 @@ module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "debug output mask");
#endif
+module_param_named(swcrypto50, iwlagn_mod_params.sw_crypto, bool, S_IRUGO);
+MODULE_PARM_DESC(swcrypto50,
+ "using crypto in software (default 0 [hardware]) (deprecated)");
+module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num50,
+ iwlagn_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num50,
+ "number of hw queues in 50xx series (deprecated)");
+module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable50, iwlagn_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality (deprecated)");
+module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K50, iwlagn_mod_params.amsdu_size_8K,
+ int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K50,
+ "enable 8K amsdu size in 50XX series (deprecated)");
+module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
+ int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart50, iwlagn_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart50,
+ "restart firmware in case of error (deprecated)");
+module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+module_param_named(
+ disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO);
+MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
+
+module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
+ S_IRUGO);
+MODULE_PARM_DESC(ucode_alternative,
+ "specify ucode alternative to use from ucode file");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
new file mode 100644
index 0000000..2d74805
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -0,0 +1,181 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_agn_h__
+#define __iwl_agn_h__
+
+#include "iwl-dev.h"
+
+extern struct iwl_mod_params iwlagn_mod_params;
+extern struct iwl_hcmd_ops iwlagn_hcmd;
+extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
+
+int iwl_reset_ict(struct iwl_priv *priv);
+void iwl_disable_ict(struct iwl_priv *priv);
+int iwl_alloc_isr_ict(struct iwl_priv *priv);
+void iwl_free_isr_ict(struct iwl_priv *priv);
+irqreturn_t iwl_isr_ict(int irq, void *data);
+bool iwl_good_ack_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+
+/* tx queue */
+void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
+ int txq_id, u32 index);
+void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt);
+void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
+ int tx_fifo, int sta_id, int tid, u16 ssn_idx);
+int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo);
+void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
+
+/* uCode */
+int iwlagn_load_ucode(struct iwl_priv *priv);
+void iwlagn_rx_calib_result(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwlagn_rx_calib_complete(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwlagn_init_alive_start(struct iwl_priv *priv);
+int iwlagn_alive_notify(struct iwl_priv *priv);
+
+/* lib */
+void iwl_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status);
+void iwlagn_rx_handler_setup(struct iwl_priv *priv);
+void iwlagn_setup_deferred_work(struct iwl_priv *priv);
+int iwlagn_hw_valid_rtc_data_addr(u32 addr);
+int iwlagn_send_tx_power(struct iwl_priv *priv);
+void iwlagn_temperature(struct iwl_priv *priv);
+u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
+const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
+ size_t offset);
+void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwlagn_hw_nic_init(struct iwl_priv *priv);
+
+/* rx */
+void iwlagn_rx_queue_restock(struct iwl_priv *priv);
+void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority);
+void iwlagn_rx_replenish(struct iwl_priv *priv);
+void iwlagn_rx_replenish_now(struct iwl_priv *priv);
+void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwlagn_rxq_stop(struct iwl_priv *priv);
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+
+/* tx */
+void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info);
+int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int iwlagn_txq_check_empty(struct iwl_priv *priv,
+ int sta_id, u8 tid, int txq_id);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
+void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv);
+int iwlagn_txq_ctx_alloc(struct iwl_priv *priv);
+void iwlagn_txq_ctx_reset(struct iwl_priv *priv);
+void iwlagn_txq_ctx_stop(struct iwl_priv *priv);
+
+static inline u32 iwl_tx_status_to_mac80211(u32 status)
+{
+ status &= TX_STATUS_MSK;
+
+ switch (status) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ return IEEE80211_TX_STAT_ACK;
+ case TX_STATUS_FAIL_DEST_PS:
+ return IEEE80211_TX_STAT_TX_FILTERED;
+ default:
+ return 0;
+ }
+}
+
+static inline bool iwl_is_tx_success(u32 status)
+{
+ status &= TX_STATUS_MSK;
+ return (status == TX_STATUS_SUCCESS) ||
+ (status == TX_STATUS_DIRECT_DONE);
+}
+
+/* scan */
+void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+
+#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 8b516c5..7e8227773 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -593,7 +593,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
if (!rx_enable_time) {
- IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0! \n");
+ IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
return;
}
@@ -638,8 +638,6 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
iwl_sensitivity_write(priv);
-
- return;
}
EXPORT_SYMBOL(iwl_sensitivity_calibration);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index f4e59ae..9aab020 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -106,7 +106,7 @@ enum {
REPLY_TX = 0x1c,
REPLY_RATE_SCALE = 0x47, /* 3945 only */
REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
+ REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
/* WiMAX coexistence */
COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
@@ -512,8 +512,9 @@ struct iwl_init_alive_resp {
*
* Entries without timestamps contain only event_id and data.
*
+ *
* 2) error_event_table_ptr indicates base of the error log. This contains
- * information about any uCode error that occurs. For 4965, the format
+ * information about any uCode error that occurs. For agn, the format
* of the error log is:
*
* __le32 valid; (nonzero) valid, (0) log is empty
@@ -529,6 +530,30 @@ struct iwl_init_alive_resp {
* __le32 bcon_time; beacon timer
* __le32 tsf_low; network timestamp function timer
* __le32 tsf_hi; network timestamp function timer
+ * __le32 gp1; GP1 timer register
+ * __le32 gp2; GP2 timer register
+ * __le32 gp3; GP3 timer register
+ * __le32 ucode_ver; uCode version
+ * __le32 hw_ver; HW Silicon version
+ * __le32 brd_ver; HW board version
+ * __le32 log_pc; log program counter
+ * __le32 frame_ptr; frame pointer
+ * __le32 stack_ptr; stack pointer
+ * __le32 hcmd; last host command
+ * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
+ * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
+ * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
+ * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
+ * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
+ * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
+ * __le32 wait_event; wait event() caller address
+ * __le32 l2p_control; L2pControlField
+ * __le32 l2p_duration; L2pDurationField
+ * __le32 l2p_mhvalid; L2pMhValidBits
+ * __le32 l2p_addr_match; L2pAddrMatchStat
+ * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
+ * __le32 u_timestamp; indicate when the date and time of the compilation
+ * __le32 reserved;
*
* The Linux driver can print both logs to the system log when a uCode error
* occurs.
@@ -1418,7 +1443,7 @@ struct iwl4965_rx_mpdu_res_start {
/* 1: Ignore Bluetooth priority for this frame.
* 0: Delay Tx until Bluetooth device is done (normal usage). */
-#define TX_CMD_FLG_BT_DIS_MSK cpu_to_le32(1 << 12)
+#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12)
/* 1: uCode overrides sequence control field in MAC header.
* 0: Driver provides sequence control field in MAC header.
@@ -1637,7 +1662,7 @@ struct iwl_tx_cmd {
struct ieee80211_hdr hdr[0];
} __attribute__ ((packed));
-/* TX command response is sent after *all* transmission attempts.
+/* TX command response is sent after *3945* transmission attempts.
*
* NOTES:
*
@@ -1665,24 +1690,65 @@ struct iwl_tx_cmd {
* control line. Receiving is still allowed in this case.
*/
enum {
+ TX_3945_STATUS_SUCCESS = 0x01,
+ TX_3945_STATUS_DIRECT_DONE = 0x02,
+ TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
+ TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+ TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
+ TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
+ TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ TX_3945_STATUS_FAIL_DEST_PS = 0x88,
+ TX_3945_STATUS_FAIL_ABORTED = 0x89,
+ TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
+ TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
+ TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
+ TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+ TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+ TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
+ TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+/*
+ * TX command response is sent after *agn* transmission attempts.
+ *
+ * both postpone and abort status are expected behavior from uCode. there is
+ * no special operation required from driver; except for RFKILL_FLUSH,
+ * which required tx flush host command to flush all the tx frames in queues
+ */
+enum {
TX_STATUS_SUCCESS = 0x01,
TX_STATUS_DIRECT_DONE = 0x02,
+ /* postpone TX */
+ TX_STATUS_POSTPONE_DELAY = 0x40,
+ TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+ TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+ TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+ TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+ /* abort TX */
+ TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
TX_STATUS_FAIL_LONG_LIMIT = 0x83,
TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
- TX_STATUS_FAIL_MGMNT_ABORT = 0x85,
- TX_STATUS_FAIL_NEXT_FRAG = 0x86,
+ TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+ TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
TX_STATUS_FAIL_DEST_PS = 0x88,
- TX_STATUS_FAIL_ABORTED = 0x89,
+ TX_STATUS_FAIL_HOST_ABORTED = 0x89,
TX_STATUS_FAIL_BT_RETRY = 0x8a,
TX_STATUS_FAIL_STA_INVALID = 0x8b,
TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
TX_STATUS_FAIL_TID_DISABLE = 0x8d,
- TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+ TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
- TX_STATUS_FAIL_TX_LOCKED = 0x90,
- TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+ /* uCode drop due to FW drop request */
+ TX_STATUS_FAIL_FW_DROP = 0x90,
+ /*
+ * uCode drop due to station color mismatch
+ * between tx command and station table
+ */
+ TX_STATUS_FAIL_STA_COLOR_MISMATCH_DROP = 0x91,
};
#define TX_PACKET_MODE_REGULAR 0x0000
@@ -1704,30 +1770,6 @@ enum {
TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
};
-static inline u32 iwl_tx_status_to_mac80211(u32 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_SUCCESS:
- case TX_STATUS_DIRECT_DONE:
- return IEEE80211_TX_STAT_ACK;
- case TX_STATUS_FAIL_DEST_PS:
- return IEEE80211_TX_STAT_TX_FILTERED;
- default:
- return 0;
- }
-}
-
-static inline bool iwl_is_tx_success(u32 status)
-{
- status &= TX_STATUS_MSK;
- return (status == TX_STATUS_SUCCESS) ||
- (status == TX_STATUS_DIRECT_DONE);
-}
-
-
-
/* *******************************
* TX aggregation status
******************************* */
@@ -2626,7 +2668,6 @@ struct iwl_ssid_ie {
#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
#define IWL_MAX_SCAN_SIZE 1024
#define IWL_MAX_CMD_SIZE 4096
-#define IWL_MAX_PROBE_REQUEST 200
/*
* REPLY_SCAN_CMD = 0x80 (command)
@@ -3086,6 +3127,11 @@ struct statistics_tx {
__le32 cts_timeout_collision;
__le32 ack_or_ba_timeout_collision;
struct statistics_tx_non_phy_agg agg;
+ /*
+ * "tx_power" are optional parameters provided by uCode,
+ * 6000 series is the only device provide the information,
+ * Those are reserved fields for all the other devices
+ */
struct statistics_tx_power tx_power;
__le32 reserved1;
} __attribute__ ((packed));
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 049b652..5a7eca8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -66,38 +66,7 @@ MODULE_LICENSE("GPL");
*/
static bool bt_coex_active = true;
module_param(bt_coex_active, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
-
-static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
- {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
- 0, COEX_UNASSOC_IDLE_FLAGS},
- {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
- 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
- {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
- 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
- {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
- 0, COEX_CALIBRATION_FLAGS},
- {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
- 0, COEX_PERIODIC_CALIBRATION_FLAGS},
- {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
- 0, COEX_CONNECTION_ESTAB_FLAGS},
- {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
- 0, COEX_ASSOCIATED_IDLE_FLAGS},
- {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
- 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
- {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
- 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
- {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
- 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
- {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
- {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
- {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
- 0, COEX_STAND_ALONE_DEBUG_FLAGS},
- {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
- 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
- {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
- {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
-};
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -115,8 +84,6 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
u32 iwl_debug_level;
EXPORT_SYMBOL(iwl_debug_level);
-static irqreturn_t iwl_isr(int irq, void *data);
-
/*
* Parameter order:
* rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
@@ -143,30 +110,6 @@ const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
};
EXPORT_SYMBOL(iwl_rates);
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info)
-{
- struct ieee80211_tx_rate *r = &info->control.rates[0];
-
- info->antenna_sel_tx =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
- if (rate_n_flags & RATE_MCS_HT_MSK)
- r->flags |= IEEE80211_TX_RC_MCS;
- if (rate_n_flags & RATE_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- r->flags |= IEEE80211_TX_RC_DUP_DATA;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- r->idx = iwl_hwrate_to_mac80211_idx(rate_n_flags, info->band);
-}
-EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
-
int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
{
int idx = 0;
@@ -198,27 +141,6 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
}
EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
-int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
-{
- int idx = 0;
- int band_offset = 0;
-
- /* HT rate format: mac80211 wants an MCS number, which is just LSB */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
- return idx;
- /* Legacy rate format, search for match in table */
- } else {
- if (band == IEEE80211_BAND_5GHZ)
- band_offset = IWL_FIRST_OFDM_RATE;
- for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
- if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx - band_offset;
- }
-
- return -1;
-}
-
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
{
int i;
@@ -268,74 +190,16 @@ void iwl_hw_detect(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_hw_detect);
-int iwl_hw_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
- int ret;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- ret = iwl_rx_queue_alloc(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl_rx_queue_reset(priv, rxq);
-
- iwl_rx_replenish(priv);
-
- iwl_rx_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(priv, rxq);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (!priv->txq) {
- ret = iwl_txq_ctx_alloc(priv);
- if (ret)
- return ret;
- } else
- iwl_txq_ctx_reset(priv);
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_hw_nic_init);
-
/*
* QoS support
*/
-void iwl_activate_qos(struct iwl_priv *priv, u8 force)
+static void iwl_update_qos(struct iwl_priv *priv)
{
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
priv->qos_data.def_qos_parm.qos_flags = 0;
- if (priv->qos_data.qos_cap.q_AP.queue_request &&
- !priv->qos_data.qos_cap.q_AP.txop_request)
- priv->qos_data.def_qos_parm.qos_flags |=
- QOS_PARAM_FLG_TXOP_TYPE_MSK;
if (priv->qos_data.qos_active)
priv->qos_data.def_qos_parm.qos_flags |=
QOS_PARAM_FLG_UPDATE_EDCA_MSK;
@@ -343,118 +207,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force)
if (priv->current_ht_config.is_ht)
priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
- if (force || iwl_is_associated(priv)) {
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
- priv->qos_data.qos_active,
- priv->qos_data.def_qos_parm.qos_flags);
+ IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ priv->qos_data.qos_active,
+ priv->qos_data.def_qos_parm.qos_flags);
- iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
- sizeof(struct iwl_qosparam_cmd),
- &priv->qos_data.def_qos_parm, NULL);
- }
+ iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
+ sizeof(struct iwl_qosparam_cmd),
+ &priv->qos_data.def_qos_parm, NULL);
}
-EXPORT_SYMBOL(iwl_activate_qos);
-
-/*
- * AC CWmin CW max AIFSN TXOP Limit TXOP Limit
- * (802.11b) (802.11a/g)
- * AC_BK 15 1023 7 0 0
- * AC_BE 15 1023 3 0 0
- * AC_VI 7 15 2 6.016ms 3.008ms
- * AC_VO 3 7 2 3.264ms 1.504ms
- */
-void iwl_reset_qos(struct iwl_priv *priv)
-{
- u16 cw_min = 15;
- u16 cw_max = 1023;
- u8 aifs = 2;
- bool is_legacy = false;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&priv->lock, flags);
- /* QoS always active in AP and ADHOC mode
- * In STA mode wait for association
- */
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
- priv->iw_mode == NL80211_IFTYPE_AP)
- priv->qos_data.qos_active = 1;
- else
- priv->qos_data.qos_active = 0;
-
- /* check for legacy mode */
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
- (priv->iw_mode == NL80211_IFTYPE_STATION &&
- (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
- cw_min = 31;
- is_legacy = 1;
- }
-
- if (priv->qos_data.qos_active)
- aifs = 3;
-
- /* AC_BE */
- priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
- priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
- priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
- priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
-
- if (priv->qos_data.qos_active) {
- /* AC_BK */
- i = 1;
- priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
- priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
- priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
-
- /* AC_VI */
- i = 2;
- priv->qos_data.def_qos_parm.ac[i].cw_min =
- cpu_to_le16((cw_min + 1) / 2 - 1);
- priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
- if (is_legacy)
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(6016);
- else
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(3008);
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
-
- /* AC_VO */
- i = 3;
- priv->qos_data.def_qos_parm.ac[i].cw_min =
- cpu_to_le16((cw_min + 1) / 4 - 1);
- priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16((cw_min + 1) / 2 - 1);
- priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
- if (is_legacy)
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(3264);
- else
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(1504);
- } else {
- for (i = 1; i < 4; i++) {
- priv->qos_data.def_qos_parm.ac[i].cw_min =
- cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16(cw_max);
- priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
- priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
- }
- }
- IWL_DEBUG_QOS(priv, "set QoS to default \n");
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_reset_qos);
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -721,7 +481,7 @@ static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
return new_val;
}
-void iwl_setup_rxon_timing(struct iwl_priv *priv)
+void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
u64 tsf;
s32 interval_tm, rem;
@@ -735,15 +495,14 @@ void iwl_setup_rxon_timing(struct iwl_priv *priv)
priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- beacon_int = priv->beacon_int;
- priv->rxon_timing.atim_window = 0;
- } else {
- beacon_int = priv->vif->bss_conf.beacon_int;
+ beacon_int = vif->bss_conf.beacon_int;
+ if (vif->type == NL80211_IFTYPE_ADHOC) {
/* TODO: we need to get atim_window from upper stack
* for now we set to 0 */
priv->rxon_timing.atim_window = 0;
+ } else {
+ priv->rxon_timing.atim_window = 0;
}
beacon_int = iwl_adjust_beacon_interval(beacon_int,
@@ -903,23 +662,10 @@ EXPORT_SYMBOL(iwl_full_rxon_required);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
{
- int i;
- int rate_mask;
-
- /* Set rate mask*/
- if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
- rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
- else
- rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
-
- /* Find lowest valid rate */
- for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
- i = iwl_rates[i].next_ieee) {
- if (rate_mask & (1 << i))
- return iwl_rates[i].plcp;
- }
-
- /* No valid rate was found. Assign the lowest one */
+ /*
+ * Assign the lowest rate -- should really get this from
+ * the beacon skb from mac80211.
+ */
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
return IWL_RATE_1M_PLCP;
else
@@ -991,7 +737,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
"extension channel offset 0x%x\n",
le32_to_cpu(rxon->flags), ht_conf->ht_protection,
ht_conf->extension_chan_offset);
- return;
}
EXPORT_SYMBOL(iwl_set_rxon_ht);
@@ -1051,19 +796,6 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
}
/**
- * iwl_is_monitor_mode - Determine if interface in monitor mode
- *
- * priv->iw_mode is set in add_interface, but add_interface is
- * never called for monitor mode. The only way mac80211 informs us about
- * monitor mode is through configuring filters (call to configure_filter).
- */
-bool iwl_is_monitor_mode(struct iwl_priv *priv)
-{
- return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
-}
-EXPORT_SYMBOL(iwl_is_monitor_mode);
-
-/**
* iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
*
* Selects how many and which Rx receivers/antennas/chains to use.
@@ -1106,19 +838,6 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
- /* copied from 'iwl_bg_request_scan()' */
- /* Force use of chains B and C (0x6) for Rx for 4965
- * Avoid A (0x1) because of its off-channel reception on A-band.
- * MIMO is not used here, but value is required */
- if (iwl_is_monitor_mode(priv) &&
- !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
- ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
- rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- }
-
priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
@@ -1174,8 +893,9 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
}
EXPORT_SYMBOL(iwl_set_rxon_channel);
-void iwl_set_flags_for_band(struct iwl_priv *priv,
- enum ieee80211_band band)
+static void iwl_set_flags_for_band(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
{
if (band == IEEE80211_BAND_5GHZ) {
priv->staging_rxon.flags &=
@@ -1184,12 +904,12 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
} else {
/* Copied from iwl_post_associate() */
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif && vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif && vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
@@ -1201,13 +921,18 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
/*
* initialize rxon structure with default values from eeprom
*/
-void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
+void iwl_connection_init_rx_config(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
{
const struct iwl_channel_info *ch_info;
+ enum nl80211_iftype type = NL80211_IFTYPE_STATION;
+
+ if (vif)
+ type = vif->type;
memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
- switch (mode) {
+ switch (type) {
case NL80211_IFTYPE_AP:
priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
break;
@@ -1225,7 +950,7 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
break;
default:
- IWL_ERR(priv, "Unsupported interface type %d\n", mode);
+ IWL_ERR(priv, "Unsupported interface type %d\n", type);
break;
}
@@ -1244,18 +969,10 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
if (!ch_info)
ch_info = &priv->channel_info[0];
- /*
- * in some case A channels are all non IBSS
- * in this case force B/G channel
- */
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !(is_channel_ibss(ch_info)))
- ch_info = &priv->channel_info[0];
-
priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
priv->band = ch_info->band;
- iwl_set_flags_for_band(priv, priv->band);
+ iwl_set_flags_for_band(priv, priv->band, vif);
priv->staging_rxon.ofdm_basic_rates =
(IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
@@ -1286,7 +1003,6 @@ static void iwl_set_rate(struct iwl_priv *priv)
}
priv->active_rate = 0;
- priv->active_rate_basic = 0;
for (i = 0; i < hw->n_bitrates; i++) {
rate = &(hw->bitrates[i]);
@@ -1294,30 +1010,13 @@ static void iwl_set_rate(struct iwl_priv *priv)
priv->active_rate |= (1 << rate->hw_value);
}
- IWL_DEBUG_RATE(priv, "Set active_rate = %0x, active_rate_basic = %0x\n",
- priv->active_rate, priv->active_rate_basic);
+ IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
- /*
- * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
- * otherwise set it to the default of all CCK rates and 6, 12, 24 for
- * OFDM
- */
- if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
- priv->staging_rxon.cck_basic_rates =
- ((priv->active_rate_basic &
- IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
- else
- priv->staging_rxon.cck_basic_rates =
- (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
- priv->staging_rxon.ofdm_basic_rates =
- ((priv->active_rate_basic &
- (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
- IWL_FIRST_OFDM_RATE) & 0xFF;
- else
- priv->staging_rxon.ofdm_basic_rates =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+ priv->staging_rxon.cck_basic_rates =
+ (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+ priv->staging_rxon.ofdm_basic_rates =
+ (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
}
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
@@ -1374,6 +1073,9 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
/* Cancel currently queued command. */
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_ERR(priv, "Loaded firmware version: %s\n",
+ priv->hw->wiphy->fw_version);
+
priv->cfg->ops->lib->dump_nic_error_log(priv);
if (priv->cfg->ops->lib->dump_csr)
priv->cfg->ops->lib->dump_csr(priv);
@@ -1401,7 +1103,7 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_irq_handle_error);
-int iwl_apm_stop_master(struct iwl_priv *priv)
+static int iwl_apm_stop_master(struct iwl_priv *priv)
{
int ret = 0;
@@ -1417,7 +1119,6 @@ int iwl_apm_stop_master(struct iwl_priv *priv)
return ret;
}
-EXPORT_SYMBOL(iwl_apm_stop_master);
void iwl_apm_stop(struct iwl_priv *priv)
{
@@ -1561,41 +1262,33 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct iwl_priv *priv = hw->priv;
- __le32 *filter_flags = &priv->staging_rxon.filter_flags;
+ __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
changed_flags, *total_flags);
- if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
- if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
- *filter_flags |= RXON_FILTER_PROMISC_MSK;
- else
- *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
- }
- if (changed_flags & FIF_ALLMULTI) {
- if (*total_flags & FIF_ALLMULTI)
- *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
- else
- *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
- }
- if (changed_flags & FIF_CONTROL) {
- if (*total_flags & FIF_CONTROL)
- *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
- else
- *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
- }
- if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
- if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
- *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
- else
- *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
- }
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_ALLMULTI, RXON_FILTER_ACCEPT_GRP_MSK);
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
- /* We avoid iwl_commit_rxon here to commit the new filter flags
- * since mac80211 will call ieee80211_hw_config immediately.
- * (mc_list is not supported at this time). Otherwise, we need to
- * queue a background iwl_commit_rxon work.
- */
+#undef CHK
+
+ mutex_lock(&priv->mutex);
+
+ priv->staging_rxon.filter_flags &= ~filter_nand;
+ priv->staging_rxon.filter_flags |= filter_or;
+
+ iwlcore_commit_rxon(priv);
+
+ mutex_unlock(&priv->mutex);
*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
@@ -1626,10 +1319,11 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
int ret = 0;
s8 prev_tx_power = priv->tx_power_user_lmt;
- if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
- IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n",
+ if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
+ IWL_WARN(priv,
+ "Requested user TXPOWER %d below lower limit %d.\n",
tx_power,
- IWL_TX_POWER_TARGET_POWER_MIN);
+ IWLAGN_TX_POWER_TARGET_POWER_MIN);
return -EINVAL;
}
@@ -1668,286 +1362,16 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
}
EXPORT_SYMBOL(iwl_set_tx_power);
-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
-
-/* Free dram table */
-void iwl_free_isr_ict(struct iwl_priv *priv)
-{
- if (priv->ict_tbl_vir) {
- dma_free_coherent(&priv->pci_dev->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- priv->ict_tbl_vir, priv->ict_tbl_dma);
- priv->ict_tbl_vir = NULL;
- }
-}
-EXPORT_SYMBOL(iwl_free_isr_ict);
-
-
-/* allocate dram shared table it is a PAGE_SIZE aligned
- * also reset all data related to ICT table interrupt.
- */
-int iwl_alloc_isr_ict(struct iwl_priv *priv)
-{
-
- if (priv->cfg->use_isr_legacy)
- return 0;
- /* allocate shrared data table */
- priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- &priv->ict_tbl_dma, GFP_KERNEL);
- if (!priv->ict_tbl_vir)
- return -ENOMEM;
-
- /* align table to PAGE_SIZE boundry */
- priv->aligned_ict_tbl_dma = ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
-
- IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
- (unsigned long long)priv->ict_tbl_dma,
- (unsigned long long)priv->aligned_ict_tbl_dma,
- (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
-
- priv->ict_tbl = priv->ict_tbl_vir +
- (priv->aligned_ict_tbl_dma - priv->ict_tbl_dma);
-
- IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
- priv->ict_tbl, priv->ict_tbl_vir,
- (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
-
- /* reset table and index to all 0 */
- memset(priv->ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
- priv->ict_index = 0;
-
- /* add periodic RX interrupt */
- priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
- return 0;
-}
-EXPORT_SYMBOL(iwl_alloc_isr_ict);
-
-/* Device is going up inform it about using ICT interrupt table,
- * also we need to tell the driver to start using ICT interrupt.
- */
-int iwl_reset_ict(struct iwl_priv *priv)
-{
- u32 val;
- unsigned long flags;
-
- if (!priv->ict_tbl_vir)
- return 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
-
- memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
-
- val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
-
- val |= CSR_DRAM_INT_TBL_ENABLE;
- val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
-
- IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
- "aligned dma address %Lx\n",
- val, (unsigned long long)priv->aligned_ict_tbl_dma);
-
- iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
- priv->use_ict = true;
- priv->ict_index = 0;
- iwl_write32(priv, CSR_INT, priv->inta_mask);
- iwl_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_reset_ict);
-
-/* Device is going down disable ict interrupt usage */
-void iwl_disable_ict(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->use_ict = false;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_disable_ict);
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_isr_ict(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 val = 0;
-
- if (!priv)
- return IRQ_NONE;
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (!priv->use_ict)
- return iwl_isr(irq, data);
-
- spin_lock(&priv->lock);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here.
- */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!priv->ict_tbl[priv->ict_index]) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /* read all entries that not 0 start with ict_index */
- while (priv->ict_tbl[priv->ict_index]) {
-
- val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
- IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
- priv->ict_index,
- le32_to_cpu(priv->ict_tbl[priv->ict_index]));
- priv->ict_tbl[priv->ict_index] = 0;
- priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
- ICT_COUNT);
-
- }
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
- inta, inta_mask, val);
-
- inta &= priv->inta_mask;
- priv->inta |= inta;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) {
- /* Allow interrupt if was disabled by this handler and
- * no tasklet was schedules, We should not enable interrupt,
- * tasklet will enable it.
- */
- iwl_enable_interrupts(priv);
- }
-
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock(&priv->lock);
- return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_isr_ict);
-
-
-static irqreturn_t iwl_isr(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_fh;
-#endif
- if (!priv)
- return IRQ_NONE;
-
- spin_lock(&priv->lock);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
- "fh 0x%08x\n", inta, inta_mask, inta_fh);
- }
-#endif
-
- priv->inta |= inta;
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- unplugged:
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if diabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock(&priv->lock);
- return IRQ_NONE;
-}
-
irqreturn_t iwl_isr_legacy(int irq, void *data)
{
struct iwl_priv *priv = data;
u32 inta, inta_mask;
u32 inta_fh;
+ unsigned long flags;
if (!priv)
return IRQ_NONE;
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
@@ -1985,7 +1409,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
tasklet_schedule(&priv->irq_tasklet);
unplugged:
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_HANDLED;
none:
@@ -1993,12 +1417,12 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
/* only Re-enable if diabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_NONE;
}
EXPORT_SYMBOL(iwl_isr_legacy);
-int iwl_send_bt_config(struct iwl_priv *priv)
+void iwl_send_bt_config(struct iwl_priv *priv)
{
struct iwl_bt_cmd bt_cmd = {
.lead_time = BT_LEAD_TIME_DEF,
@@ -2015,8 +1439,9 @@ int iwl_send_bt_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "BT coex %s\n",
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
- return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(struct iwl_bt_cmd), &bt_cmd);
+ if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ sizeof(struct iwl_bt_cmd), &bt_cmd))
+ IWL_ERR(priv, "failed to send BT Coex Config\n");
}
EXPORT_SYMBOL(iwl_send_bt_config);
@@ -2306,12 +1731,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
cpu_to_le16((params->txop * 32));
priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- priv->qos_data.qos_active = 1;
-
- if (priv->iw_mode == NL80211_IFTYPE_AP)
- iwl_activate_qos(priv, 1);
- else if (priv->assoc_id && iwl_is_associated(priv))
- iwl_activate_qos(priv, 0);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2321,12 +1740,13 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
EXPORT_SYMBOL(iwl_mac_conf_tx);
static void iwl_ht_conf(struct iwl_priv *priv,
- struct ieee80211_bss_conf *bss_conf)
+ struct ieee80211_vif *vif)
{
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
struct ieee80211_sta *sta;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- IWL_DEBUG_MAC80211(priv, "enter: \n");
+ IWL_DEBUG_MAC80211(priv, "enter:\n");
if (!ht_conf->is_ht)
return;
@@ -2338,10 +1758,10 @@ static void iwl_ht_conf(struct iwl_priv *priv,
ht_conf->single_chain_sufficient = false;
- switch (priv->iw_mode) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, priv->bssid);
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (sta) {
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
int maxstreams;
@@ -2379,7 +1799,6 @@ static void iwl_ht_conf(struct iwl_priv *priv,
static inline void iwl_set_no_assoc(struct iwl_priv *priv)
{
- priv->assoc_id = 0;
iwl_led_disassociate(priv);
/*
* inform the ucode that there is no longer an
@@ -2392,7 +1811,6 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv)
iwlcore_commit_rxon(priv);
}
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -2408,14 +1826,12 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (changes & BSS_CHANGED_BEACON &&
- priv->iw_mode == NL80211_IFTYPE_AP) {
+ if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
dev_kfree_skb(priv->ibss_beacon);
priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
}
if (changes & BSS_CHANGED_BEACON_INT) {
- priv->beacon_int = bss_conf->beacon_int;
/* TODO: in AP mode, do something to make this take effect */
}
@@ -2435,8 +1851,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
}
/* mac80211 only sets assoc when in STATION mode */
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
- bss_conf->assoc) {
+ if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
memcpy(priv->staging_rxon.bssid_addr,
bss_conf->bssid, ETH_ALEN);
@@ -2454,7 +1869,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
* mac80211 decides to do both changes at once because
* it will invoke post_associate.
*/
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
changes & BSS_CHANGED_BEACON) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
@@ -2497,7 +1912,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
}
if (changes & BSS_CHANGED_HT) {
- iwl_ht_conf(priv, bss_conf);
+ iwl_ht_conf(priv, vif);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
@@ -2506,28 +1921,17 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
if (bss_conf->assoc) {
- priv->assoc_id = bss_conf->aid;
- priv->beacon_int = bss_conf->beacon_int;
priv->timestamp = bss_conf->timestamp;
- priv->assoc_capability = bss_conf->assoc_capability;
iwl_led_associate(priv);
- /*
- * We have just associated, don't start scan too early
- * leave time for EAPOL exchange to complete.
- *
- * XXX: do this in mac80211
- */
- priv->next_scan_jiffies = jiffies +
- IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
if (!iwl_is_rfkill(priv))
- priv->cfg->ops->lib->post_associate(priv);
+ priv->cfg->ops->lib->post_associate(priv, vif);
} else
iwl_set_no_assoc(priv);
}
- if (changes && iwl_is_associated(priv) && priv->assoc_id) {
+ if (changes && iwl_is_associated(priv) && bss_conf->aid) {
IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
changes);
ret = iwl_send_rxon_assoc(priv);
@@ -2544,11 +1948,20 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
memcpy(priv->staging_rxon.bssid_addr,
bss_conf->bssid, ETH_ALEN);
memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwlcore_config_ap(priv);
+ iwlcore_config_ap(priv, vif);
} else
iwl_set_no_assoc(priv);
}
+ if (changes & BSS_CHANGED_IBSS) {
+ ret = priv->cfg->ops->lib->manage_ibss_station(priv, vif,
+ bss_conf->ibss_joined);
+ if (ret)
+ IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+ bss_conf->ibss_joined ? "add" : "remove",
+ bss_conf->bssid);
+ }
+
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2568,11 +1981,6 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
return -EIO;
}
- if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
- IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
- return -EIO;
- }
-
spin_lock_irqsave(&priv->lock, flags);
if (priv->ibss_beacon)
@@ -2580,59 +1988,31 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
priv->ibss_beacon = skb;
- priv->assoc_id = 0;
timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
priv->timestamp = le64_to_cpu(timestamp);
IWL_DEBUG_MAC80211(priv, "leave\n");
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_reset_qos(priv);
-
- priv->cfg->ops->lib->post_associate(priv);
-
+ priv->cfg->ops->lib->post_associate(priv, priv->vif);
return 0;
}
EXPORT_SYMBOL(iwl_mac_beacon_update);
-int iwl_set_mode(struct iwl_priv *priv, int mode)
+static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
- if (mode == NL80211_IFTYPE_ADHOC) {
- const struct iwl_channel_info *ch_info;
-
- ch_info = iwl_get_channel_info(priv,
- priv->band,
- le16_to_cpu(priv->staging_rxon.channel));
-
- if (!ch_info || !is_channel_ibss(ch_info)) {
- IWL_ERR(priv, "channel %d not IBSS channel\n",
- le16_to_cpu(priv->staging_rxon.channel));
- return -EINVAL;
- }
- }
-
- iwl_connection_init_rx_config(priv, mode);
+ iwl_connection_init_rx_config(priv, vif);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
- iwl_clear_stations_table(priv);
-
- /* dont commit rxon if rf-kill is on*/
- if (!iwl_is_ready_rf(priv))
- return -EAGAIN;
-
- iwlcore_commit_rxon(priv);
-
- return 0;
+ return iwlcore_commit_rxon(priv);
}
-EXPORT_SYMBOL(iwl_set_mode);
-int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
int err = 0;
@@ -2641,6 +2021,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
+ if (WARN_ON(!iwl_is_ready_rf(priv))) {
+ err = -EINVAL;
+ goto out;
+ }
+
if (priv->vif) {
IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
err = -EOPNOTSUPP;
@@ -2650,15 +2035,18 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
priv->vif = vif;
priv->iw_mode = vif->type;
- if (vif->addr) {
- IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
- memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
- }
+ IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
+
+ err = iwl_set_mode(priv, vif);
+ if (err)
+ goto out_err;
- if (iwl_set_mode(priv, vif->type) == -EAGAIN)
- /* we are not ready, will run again when ready */
- set_bit(STATUS_MODE_PENDING, &priv->status);
+ goto out;
+ out_err:
+ priv->vif = NULL;
+ priv->iw_mode = NL80211_IFTYPE_STATION;
out:
mutex_unlock(&priv->mutex);
@@ -2668,7 +2056,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
EXPORT_SYMBOL(iwl_mac_add_interface);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
@@ -2694,10 +2082,6 @@ EXPORT_SYMBOL(iwl_mac_remove_interface);
/**
* iwl_mac_config - mac80211 config callback
- *
- * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
- * be set inappropriately and the driver currently sets the hardware up to
- * use it whenever needed.
*/
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
{
@@ -2752,15 +2136,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
goto set_ch_out;
}
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- !is_channel_ibss(ch_info)) {
- IWL_ERR(priv, "channel %d in band %d not "
- "IBSS channel\n",
- conf->channel->hw_value, conf->channel->band);
- ret = -EINVAL;
- goto set_ch_out;
- }
-
spin_lock_irqsave(&priv->lock, flags);
/* Configure HT40 channels */
@@ -2794,7 +2169,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_rxon_channel(priv, conf->channel);
iwl_set_rxon_ht(priv, ht_conf);
- iwl_set_flags_for_band(priv, conf->channel->band);
+ iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
spin_unlock_irqrestore(&priv->lock, flags);
if (iwl_is_associated(priv) &&
(le16_to_cpu(priv->active_rxon.channel) != ch) &&
@@ -2833,6 +2208,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_tx_power(priv, conf->power_level, false);
}
+ if (changed & IEEE80211_CONF_CHANGE_QOS) {
+ bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->qos_data.qos_active = qos_active;
+ iwl_update_qos(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
@@ -2867,12 +2251,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_reset_qos(priv);
-
spin_lock_irqsave(&priv->lock, flags);
- priv->assoc_id = 0;
- priv->assoc_capability = 0;
- priv->assoc_station_added = 0;
/* new association get rid of ibss beacon skb */
if (priv->ibss_beacon)
@@ -2880,10 +2259,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
priv->ibss_beacon = NULL;
- priv->beacon_int = priv->vif->bss_conf.beacon_int;
priv->timestamp = 0;
- if ((priv->iw_mode == NL80211_IFTYPE_STATION))
- priv->beacon_int = 0;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2896,17 +2272,9 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
/* we are restarting association process
* clear RXON_FILTER_ASSOC_MSK bit
*/
- if (priv->iw_mode != NL80211_IFTYPE_AP) {
- iwl_scan_cancel_timeout(priv, 100);
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv);
- }
-
- if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
- IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
- mutex_unlock(&priv->mutex);
- return;
- }
+ iwl_scan_cancel_timeout(priv, 100);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwlcore_commit_rxon(priv);
iwl_set_rate(priv);
@@ -2923,7 +2291,7 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
GFP_KERNEL);
if (!priv->txq) {
- IWL_ERR(priv, "Not enough memory for txq \n");
+ IWL_ERR(priv, "Not enough memory for txq\n");
return -ENOMEM;
}
return 0;
@@ -2937,34 +2305,6 @@ void iwl_free_txq_mem(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_free_txq_mem);
-int iwl_send_wimax_coex(struct iwl_priv *priv)
-{
- struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd);
-
- if (priv->cfg->support_wimax_coexist) {
- /* UnMask wake up src at associated sleep */
- coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
-
- /* UnMask wake up src at unassociated sleep */
- coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
- memcpy(coex_cmd.sta_prio, cu_priorities,
- sizeof(struct iwl_wimax_coex_event_entry) *
- COEX_NUM_OF_EVENTS);
-
- /* enabling the coexistence feature */
- coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
-
- /* enabling the priorities tables */
- coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
- } else {
- /* coexistence is disabled */
- memset(&coex_cmd, 0, sizeof(coex_cmd));
- }
- return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
- sizeof(coex_cmd), &coex_cmd);
-}
-EXPORT_SYMBOL(iwl_send_wimax_coex);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
@@ -3403,6 +2743,99 @@ int iwl_force_reset(struct iwl_priv *priv, int mode)
}
return 0;
}
+EXPORT_SYMBOL(iwl_force_reset);
+
+/**
+ * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
+ *
+ * During normal condition (no queue is stuck), the timer is continually set to
+ * execute every monitor_recover_period milliseconds after the last timer
+ * expired. When the queue read_ptr is at the same place, the timer is
+ * shorten to 100mSecs. This is
+ * 1) to reduce the chance that the read_ptr may wrap around (not stuck)
+ * 2) to detect the stuck queues quicker before the station and AP can
+ * disassociate each other.
+ *
+ * This function monitors all the tx queues and recover from it if any
+ * of the queues are stuck.
+ * 1. It first check the cmd queue for stuck conditions. If it is stuck,
+ * it will recover by resetting the firmware and return.
+ * 2. Then, it checks for station association. If it associates it will check
+ * other queues. If any queue is stuck, it will recover by resetting
+ * the firmware.
+ * Note: It the number of times the queue read_ptr to be at the same place to
+ * be MAX_REPEAT+1 in order to consider to be stuck.
+ */
+/*
+ * The maximum number of times the read pointer of the tx queue at the
+ * same place without considering to be stuck.
+ */
+#define MAX_REPEAT (2)
+static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
+{
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+
+ txq = &priv->txq[cnt];
+ q = &txq->q;
+ /* queue is empty, skip */
+ if (q->read_ptr != q->write_ptr) {
+ if (q->read_ptr == q->last_read_ptr) {
+ /* a queue has not been read from last time */
+ if (q->repeat_same_read_ptr > MAX_REPEAT) {
+ IWL_ERR(priv,
+ "queue %d stuck %d time. Fw reload.\n",
+ q->id, q->repeat_same_read_ptr);
+ q->repeat_same_read_ptr = 0;
+ iwl_force_reset(priv, IWL_FW_RESET);
+ } else {
+ q->repeat_same_read_ptr++;
+ IWL_DEBUG_RADIO(priv,
+ "queue %d, not read %d time\n",
+ q->id,
+ q->repeat_same_read_ptr);
+ mod_timer(&priv->monitor_recover, jiffies +
+ msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS));
+ }
+ return 1;
+ } else {
+ q->last_read_ptr = q->read_ptr;
+ q->repeat_same_read_ptr = 0;
+ }
+ }
+ return 0;
+}
+
+void iwl_bg_monitor_recover(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+ int cnt;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ /* monitor and check for stuck cmd queue */
+ if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM))
+ return;
+
+ /* monitor and check for other stuck queues */
+ if (iwl_is_associated(priv)) {
+ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == IWL_CMD_QUEUE_NUM)
+ continue;
+ if (iwl_check_stuck_queue(priv, cnt))
+ return;
+ }
+ }
+ /*
+ * Reschedule the timer to occur in
+ * priv->cfg->monitor_recover_period
+ */
+ mod_timer(&priv->monitor_recover,
+ jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period));
+}
+EXPORT_SYMBOL(iwl_bg_monitor_recover);
#ifdef CONFIG_PM
@@ -3432,6 +2865,12 @@ int iwl_pci_resume(struct pci_dev *pdev)
struct iwl_priv *priv = pci_get_drvdata(pdev);
int ret;
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
pci_set_power_state(pdev, PCI_D0);
ret = pci_enable_device(pdev);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 36940a9..7e5a5ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -90,6 +90,7 @@ struct iwl_hcmd_ops {
int (*commit_rxon)(struct iwl_priv *priv);
void (*set_rxon_chain)(struct iwl_priv *priv);
int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
+ void (*send_bt_config)(struct iwl_priv *priv);
};
struct iwl_hcmd_utils_ops {
@@ -105,6 +106,7 @@ struct iwl_hcmd_utils_ops {
__le32 *tx_flags);
int (*calc_rssi)(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp);
+ void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
};
struct iwl_apm_ops {
@@ -114,23 +116,21 @@ struct iwl_apm_ops {
int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
};
+struct iwl_debugfs_ops {
+ ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+};
+
struct iwl_temp_ops {
void (*temperature)(struct iwl_priv *priv);
void (*set_ct_kill)(struct iwl_priv *priv);
void (*set_calib_version)(struct iwl_priv *priv);
};
-struct iwl_ucode_ops {
- u32 (*get_header_size)(u32);
- u32 (*get_build)(const struct iwl_ucode_header *, u32);
- u32 (*get_inst_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_data_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_init_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_init_data_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_boot_size)(const struct iwl_ucode_header *, u32);
- u8 * (*get_data)(const struct iwl_ucode_header *, u32);
-};
-
struct iwl_lib_ops {
/* set hw dependent parameters */
int (*set_hw_params)(struct iwl_priv *priv);
@@ -180,8 +180,9 @@ struct iwl_lib_ops {
/* power */
int (*send_tx_power) (struct iwl_priv *priv);
void (*update_chain_flags)(struct iwl_priv *priv);
- void (*post_associate) (struct iwl_priv *priv);
- void (*config_ap) (struct iwl_priv *priv);
+ void (*post_associate)(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
+ void (*config_ap)(struct iwl_priv *priv, struct ieee80211_vif *vif);
irqreturn_t (*isr) (int irq, void *data);
/* eeprom operations (as defined in iwl-eeprom.h) */
@@ -190,7 +191,17 @@ struct iwl_lib_ops {
/* temperature */
struct iwl_temp_ops temp_ops;
/* station management */
- void (*add_bcast_station)(struct iwl_priv *priv);
+ int (*manage_ibss_station)(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+ /* recover from tx queue stall */
+ void (*recover_from_tx_stall)(unsigned long data);
+ /* check for plcp health */
+ bool (*check_plcp_health)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+ /* check for ack health */
+ bool (*check_ack_health)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+ struct iwl_debugfs_ops debugfs_ops;
};
struct iwl_led_ops {
@@ -200,7 +211,6 @@ struct iwl_led_ops {
};
struct iwl_ops {
- const struct iwl_ucode_ops *ucode;
const struct iwl_lib_ops *lib;
const struct iwl_hcmd_ops *hcmd;
const struct iwl_hcmd_utils_ops *utils;
@@ -237,6 +247,18 @@ struct iwl_mod_params {
* @support_wimax_coexist: support wimax/wifi co-exist
* @plcp_delta_threshold: plcp error rate threshold used to trigger
* radio tuning when there is a high receiving plcp error rate
+ * @chain_noise_scale: default chain noise scale used for gain computation
+ * @monitor_recover_period: default timer used to check stuck queues
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @tx_power_by_driver: tx power calibration performed by driver
+ * instead of uCode
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ * sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ * chain noise calibration operation
+ * @scan_antennas: available antenna for scan operation
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -295,6 +317,15 @@ struct iwl_cfg {
const bool support_wimax_coexist;
u8 plcp_delta_threshold;
s32 chain_noise_scale;
+ /* timer period for monitor the driver queues */
+ u32 monitor_recover_period;
+ bool temperature_kelvin;
+ u32 max_event_log_size;
+ const bool tx_power_by_driver;
+ const bool ucode_tracing;
+ const bool sensitivity_calib_by_driver;
+ const bool chain_noise_calib_by_driver;
+ u8 scan_antennas[IEEE80211_NUM_BANDS];
};
/***************************
@@ -304,8 +335,7 @@ struct iwl_cfg {
struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
struct ieee80211_ops *hw_ops);
void iwl_hw_detect(struct iwl_priv *priv);
-void iwl_reset_qos(struct iwl_priv *priv);
-void iwl_activate_qos(struct iwl_priv *priv, u8 force);
+void iwl_activate_qos(struct iwl_priv *priv);
int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt);
@@ -316,8 +346,8 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct ieee80211_sta_ht_cap *sta_ht_inf);
-void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band);
-void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode);
+void iwl_connection_init_rx_config(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
int iwl_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
@@ -326,29 +356,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv);
void iwl_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast);
-int iwl_hw_nic_init(struct iwl_priv *priv);
int iwl_set_hw_params(struct iwl_priv *priv);
-bool iwl_is_monitor_mode(struct iwl_priv *priv);
-void iwl_post_associate(struct iwl_priv *priv);
+void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
int iwl_commit_rxon(struct iwl_priv *priv);
-int iwl_set_mode(struct iwl_priv *priv, int mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_config_ap(struct iwl_priv *priv);
+void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
int iwl_alloc_txq_mem(struct iwl_priv *priv);
void iwl_free_txq_mem(struct iwl_priv *priv);
void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
__le32 *tx_flags);
-int iwl_send_wimax_coex(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -411,26 +437,24 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
/*****************************************************
* RX
******************************************************/
-void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_cmd_queue_free(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_handle(struct iwl_priv *priv);
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
-void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-void iwl_rx_replenish(struct iwl_priv *priv);
-void iwl_rx_replenish_now(struct iwl_priv *priv);
-int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-void iwl_rx_queue_restock(struct iwl_priv *priv);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
/* Handlers */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+bool iwl_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+bool iwl_good_ack_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+void iwl_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
@@ -442,14 +466,10 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/*****************************************************
* TX
******************************************************/
-int iwl_txq_ctx_alloc(struct iwl_priv *priv);
-void iwl_txq_ctx_reset(struct iwl_priv *priv);
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len, u8 reset, u8 pad);
-int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
-void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
@@ -460,9 +480,6 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
-int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
-int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
-int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
/*****************************************************
* TX power
****************************************************/
@@ -472,10 +489,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
* Rate
******************************************************************************/
-void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info);
int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
-int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
@@ -505,7 +519,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
+int iwl_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void iwl_bg_start_internal_scan(struct work_struct *work);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
@@ -515,7 +532,8 @@ u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
u8 n_probes);
u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band);
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif);
void iwl_bg_scan_check(struct work_struct *data);
void iwl_bg_abort_scan(struct work_struct *work);
void iwl_bg_scan_completed(struct work_struct *work);
@@ -530,6 +548,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
+#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
/*******************************************************************************
* Calibrations - implemented in iwl-calib.c
@@ -563,11 +582,6 @@ int iwl_send_card_state(struct iwl_priv *priv, u32 flags,
* PCI *
*****************************************************/
irqreturn_t iwl_isr_legacy(int irq, void *data);
-int iwl_reset_ict(struct iwl_priv *priv);
-void iwl_disable_ict(struct iwl_priv *priv);
-int iwl_alloc_isr_ict(struct iwl_priv *priv);
-void iwl_free_isr_ict(struct iwl_priv *priv);
-irqreturn_t iwl_isr_ict(int irq, void *data);
static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
{
@@ -577,6 +591,9 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
return pci_lnk_ctl;
}
+
+void iwl_bg_monitor_recover(unsigned long data);
+
#ifdef CONFIG_PM
int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
int iwl_pci_resume(struct pci_dev *pdev);
@@ -625,7 +642,6 @@ void iwlcore_free_geos(struct iwl_priv *priv);
#define STATUS_SCAN_HW 15
#define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17
-#define STATUS_MODE_PENDING 18
static inline int iwl_is_ready(struct iwl_priv *priv)
@@ -672,23 +688,16 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
}
extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
-extern int iwl_send_bt_config(struct iwl_priv *priv);
+extern void iwl_send_bt_config(struct iwl_priv *priv);
extern int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
extern int iwl_verify_ucode(struct iwl_priv *priv);
extern int iwl_send_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq, u8 flags);
-extern void iwl_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init);
void iwl_apm_stop(struct iwl_priv *priv);
-int iwl_apm_stop_master(struct iwl_priv *priv);
int iwl_apm_init(struct iwl_priv *priv);
-void iwl_setup_rxon_timing(struct iwl_priv *priv);
+void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif);
static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
{
return priv->cfg->ops->hcmd->rxon_assoc(priv);
@@ -697,9 +706,10 @@ static inline int iwlcore_commit_rxon(struct iwl_priv *priv)
{
return priv->cfg->ops->hcmd->commit_rxon(priv);
}
-static inline void iwlcore_config_ap(struct iwl_priv *priv)
+static inline void iwlcore_config_ap(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
{
- priv->cfg->ops->lib->config_ap(priv);
+ priv->cfg->ops->lib->config_ap(priv, vif);
}
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
struct iwl_priv *priv, enum ieee80211_band band)
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 808b714..254c35a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -298,6 +298,7 @@
#define CSR_HW_REV_TYPE_1000 (0x0000060)
#define CSR_HW_REV_TYPE_6x00 (0x0000070)
#define CSR_HW_REV_TYPE_6x50 (0x0000080)
+#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0)
#define CSR_HW_REV_TYPE_NONE (0x00000F0)
/* EEPROM REG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 1c7b53d..5c2bcef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -78,6 +78,8 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
void iwl_dbgfs_unregister(struct iwl_priv *priv);
+extern int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
+ int bufsz);
#else
static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index b6e1b0e..9659c5d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -106,6 +106,26 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
.open = iwl_dbgfs_open_file_generic, \
};
+int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+ int p = 0;
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+ le32_to_cpu(priv->statistics.flag));
+ if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (le32_to_cpu(priv->statistics.flag) &
+ UCODE_STATISTICS_FREQUENCY_MSK)
+ ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (le32_to_cpu(priv->statistics.flag) &
+ UCODE_STATISTICS_NARROW_BAND_MSK)
+ ? "enabled" : "disabled");
+ return p;
+}
+EXPORT_SYMBOL(iwl_dbgfs_statistics_flag);
static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
char __user *user_buf,
@@ -561,8 +581,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
test_bit(STATUS_POWER_PMI, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
test_bit(STATUS_FW_ERROR, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_MODE_PENDING:\t %d\n",
- test_bit(STATUS_MODE_PENDING, &priv->status));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -661,7 +679,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
int pos = 0, i;
char buf[256];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
for (i = 0; i < AC_NUM; i++) {
pos += scnprintf(buf + pos, bufsz - pos,
@@ -673,8 +690,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
priv->qos_data.def_qos_parm.ac[i].aifsn,
priv->qos_data.def_qos_parm.ac[i].edca_txop);
}
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
@@ -684,7 +700,6 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
int pos = 0;
char buf[256];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos,
"allow blinking: %s\n",
@@ -698,8 +713,7 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
priv->last_blink_time);
}
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
@@ -712,7 +726,6 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos,
"Thermal Throttling Mode: %s\n",
@@ -732,8 +745,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
"HT mode: %d\n",
restriction->is_ht);
}
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
@@ -770,13 +782,11 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos,
"11n 40MHz Mode: %s\n",
priv->disable_ht40 ? "Disabled" : "Enabled");
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
@@ -1044,474 +1054,13 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
- int bufsz)
-{
- int p = 0;
-
- p += scnprintf(buf + p, bufsz - p,
- "Statistics Flag(0x%X):\n",
- le32_to_cpu(priv->statistics.flag));
- if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p,
- "\tOperational Frequency: %s\n",
- (le32_to_cpu(priv->statistics.flag) &
- UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p,
- "\tTGj Narrow Band: %s\n",
- (le32_to_cpu(priv->statistics.flag) &
- UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
- return p;
-}
-
-static const char ucode_stats_header[] =
- "%-32s current acumulative delta max\n";
-static const char ucode_stats_short_format[] =
- " %-30s %10u\n";
-static const char ucode_stats_format[] =
- " %-30s %10u %10u %10u %10u\n";
-
static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 40 +
- sizeof(struct statistics_rx_non_phy) * 40 +
- sizeof(struct statistics_rx_ht_phy) * 40 + 400;
- ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
- struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_non_phy *delta_general, *max_general;
- struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->statistics.rx.ofdm;
- cck = &priv->statistics.rx.cck;
- general = &priv->statistics.rx.general;
- ht = &priv->statistics.rx.ofdm_ht;
- accum_ofdm = &priv->accum_statistics.rx.ofdm;
- accum_cck = &priv->accum_statistics.rx.cck;
- accum_general = &priv->accum_statistics.rx.general;
- accum_ht = &priv->accum_statistics.rx.ofdm_ht;
- delta_ofdm = &priv->delta_statistics.rx.ofdm;
- delta_cck = &priv->delta_statistics.rx.cck;
- delta_general = &priv->delta_statistics.rx.general;
- delta_ht = &priv->delta_statistics.rx.ofdm_ht;
- max_ofdm = &priv->max_delta.rx.ofdm;
- max_cck = &priv->max_delta.rx.cck;
- max_general = &priv->max_delta.rx.general;
- max_ht = &priv->max_delta.rx.ofdm_ht;
-
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err,
- delta_ofdm->overrun_err, max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_good:",
- le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good,
- delta_ofdm->crc32_good, max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout,
- delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout,
- delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt,
- delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt,
- delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ba_rsp_cnt:",
- le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt,
- delta_ofdm->sent_ba_rsp_cnt,
- max_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dsp_self_kill:",
- le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill,
- delta_ofdm->dsp_self_kill,
- max_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "mh_format_err:",
- le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err,
- delta_ofdm->mh_format_err,
- max_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "re_acq_main_rssi_sum:",
- le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum,
- delta_ofdm->re_acq_main_rssi_sum,
- max_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err,
- delta_cck->overrun_err, max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good,
- max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout,
- delta_cck->sfd_timeout, max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout,
- delta_cck->fina_timeout, max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts,
- delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt,
- delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt,
- delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ba_rsp_cnt:",
- le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt,
- delta_cck->sent_ba_rsp_cnt,
- max_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dsp_self_kill:",
- le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill,
- delta_cck->dsp_self_kill,
- max_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "mh_format_err:",
- le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err,
- delta_cck->mh_format_err, max_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "re_acq_main_rssi_sum:",
- le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum,
- delta_cck->re_acq_main_rssi_sum,
- max_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts,
- delta_general->bogus_cts, max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack,
- delta_general->bogus_ack, max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "channel_beacons:",
- le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons,
- delta_general->channel_beacons,
- max_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "num_missed_bcon:",
- le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon,
- delta_general->num_missed_bcon,
- max_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "adc_rx_saturation_time:",
- le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time,
- delta_general->adc_rx_saturation_time,
- max_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ina_detect_search_tm:",
- le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time,
- delta_general->ina_detection_search_time,
- max_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_silence_rssi_a:",
- le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a,
- delta_general->beacon_silence_rssi_a,
- max_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_silence_rssi_b:",
- le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b,
- delta_general->beacon_silence_rssi_b,
- max_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_silence_rssi_c:",
- le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c,
- delta_general->beacon_silence_rssi_c,
- max_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "interference_data_flag:",
- le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag,
- delta_general->interference_data_flag,
- max_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "channel_load:",
- le32_to_cpu(general->channel_load),
- accum_general->channel_load,
- delta_general->channel_load,
- max_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dsp_false_alarms:",
- le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms,
- delta_general->dsp_false_alarms,
- max_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_rssi_a:",
- le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a,
- delta_general->beacon_rssi_a,
- max_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_rssi_b:",
- le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b,
- delta_general->beacon_rssi_b,
- max_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_rssi_c:",
- le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c,
- delta_general->beacon_rssi_c,
- max_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_energy_a:",
- le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a,
- delta_general->beacon_energy_a,
- max_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_energy_b:",
- le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b,
- delta_general->beacon_energy_b,
- max_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_energy_c:",
- le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c,
- delta_general->beacon_energy_c,
- max_general->beacon_energy_c);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - OFDM_HT:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "plcp_err:",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
- delta_ht->plcp_err, max_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "overrun_err:",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
- delta_ht->overrun_err, max_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "early_overrun_err:",
- le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err,
- delta_ht->early_overrun_err,
- max_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_good:",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
- delta_ht->crc32_good, max_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_err:",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
- delta_ht->crc32_err, max_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "mh_format_err:",
- le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err,
- delta_ht->mh_format_err, max_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg_crc32_good:",
- le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good,
- delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg_mpdu_cnt:",
- le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt,
- delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg_cnt:",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
- delta_ht->agg_cnt, max_ht->agg_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "unsupport_mcs:",
- le32_to_cpu(ht->unsupport_mcs),
- accum_ht->unsupport_mcs,
- delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
+ user_buf, count, ppos);
}
static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
@@ -1519,173 +1068,8 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
- ssize_t ret;
- struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->statistics.tx;
- accum_tx = &priv->accum_statistics.tx;
- delta_tx = &priv->delta_statistics.tx;
- max_tx = &priv->max_delta.tx;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dump_msdu_cnt:",
- le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt,
- delta_tx->dump_msdu_cnt,
- max_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "abort_nxt_frame_mismatch:",
- le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt,
- delta_tx->burst_abort_next_frame_mismatch_cnt,
- max_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "abort_missing_nxt_frame:",
- le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt,
- delta_tx->burst_abort_missing_next_frame_cnt,
- max_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "cts_timeout_collision:",
- le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision,
- delta_tx->cts_timeout_collision,
- max_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ack_ba_timeout_collision:",
- le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision,
- delta_tx->ack_or_ba_timeout_collision,
- max_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg ba_timeout:",
- le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout,
- delta_tx->agg.ba_timeout,
- max_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg ba_resched_frames:",
- le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames,
- delta_tx->agg.ba_reschedule_frames,
- max_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_agg_frame:",
- le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt,
- delta_tx->agg.scd_query_agg_frame_cnt,
- max_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_no_agg:",
- le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg,
- delta_tx->agg.scd_query_no_agg,
- max_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_agg:",
- le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg,
- delta_tx->agg.scd_query_agg,
- max_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_mismatch:",
- le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch,
- delta_tx->agg.scd_query_mismatch,
- max_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg frame_not_ready:",
- le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready,
- delta_tx->agg.frame_not_ready,
- max_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg underrun:",
- le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun,
- delta_tx->agg.underrun, max_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg bt_prio_kill:",
- le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill,
- delta_tx->agg.bt_prio_kill,
- max_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg rx_ba_rsp_cnt:",
- le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt,
- delta_tx->agg.rx_ba_rsp_cnt,
- max_tx->agg.rx_ba_rsp_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
+ user_buf, count, ppos);
}
static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
@@ -1693,107 +1077,8 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_general) * 10 + 300;
- ssize_t ret;
- struct statistics_general *general, *accum_general;
- struct statistics_general *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->statistics.general;
- dbg = &priv->statistics.general.dbg;
- div = &priv->statistics.general.div;
- accum_general = &priv->accum_statistics.general;
- delta_general = &priv->delta_statistics.general;
- max_general = &priv->max_delta.general;
- accum_dbg = &priv->accum_statistics.general.dbg;
- delta_dbg = &priv->delta_statistics.general.dbg;
- max_dbg = &priv->max_delta.general.dbg;
- accum_div = &priv->accum_statistics.general.div;
- delta_div = &priv->delta_statistics.general.div;
- max_div = &priv->max_delta.general.div;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
- "temperature:",
- le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
- "temperature_m:",
- le32_to_cpu(general->temperature_m));
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rx_enable_counter:",
- le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter,
- delta_general->rx_enable_counter,
- max_general->rx_enable_counter);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "num_of_sos_states:",
- le32_to_cpu(general->num_of_sos_states),
- accum_general->num_of_sos_states,
- delta_general->num_of_sos_states,
- max_general->num_of_sos_states);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
+ user_buf, count, ppos);
}
static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
@@ -1935,46 +1220,6 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
return ret;
}
-static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[128];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct statistics_tx *tx;
-
- if (!iwl_is_alive(priv))
- pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
- else {
- tx = &priv->statistics.tx;
- if (tx->tx_power.ant_a ||
- tx->tx_power.ant_b ||
- tx->tx_power.ant_c) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "tx power: (1/2 dB step)\n");
- if ((priv->cfg->valid_tx_ant & ANT_A) &&
- tx->tx_power.ant_a)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tantenna A: 0x%X\n",
- tx->tx_power.ant_a);
- if ((priv->cfg->valid_tx_ant & ANT_B) &&
- tx->tx_power.ant_b)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tantenna B: 0x%X\n",
- tx->tx_power.ant_b);
- if ((priv->cfg->valid_tx_ant & ANT_C) &&
- tx->tx_power.ant_c)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tantenna C: 0x%X\n",
- tx->tx_power.ant_c);
- } else
- pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2052,7 +1297,6 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
int pos = 0;
char buf[128];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
priv->event_log.ucode_trace ? "On" : "Off");
@@ -2063,8 +1307,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
priv->event_log.wraps_more_count);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
@@ -2096,6 +1339,31 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n", le32_to_cpu(priv->active_rxon.flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n",
+ le32_to_cpu(priv->active_rxon.filter_flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2125,13 +1393,11 @@ static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
int pos = 0;
char buf[12];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
priv->missed_beacon_threshold);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
@@ -2160,27 +1426,6 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
return count;
}
-static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int scan;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &scan) != 1)
- return -EINVAL;
-
- iwl_internal_short_hw_scan(priv);
-
- return count;
-}
-
static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
@@ -2189,13 +1434,11 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
int pos = 0;
char buf[12];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
priv->cfg->plcp_delta_threshold);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
@@ -2288,7 +1531,6 @@ DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
DEBUGFS_READ_FILE_OPS(ucode_general_stats);
DEBUGFS_READ_FILE_OPS(sensitivity);
DEBUGFS_READ_FILE_OPS(chain_noise);
-DEBUGFS_READ_FILE_OPS(tx_power);
DEBUGFS_READ_FILE_OPS(power_save_status);
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
@@ -2296,9 +1538,10 @@ DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
-DEBUGFS_WRITE_FILE_OPS(internal_scan);
DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
/*
* Create the debugfs files and directories
@@ -2334,8 +1577,11 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
+ if (!priv->cfg->broken_powersave) {
+ DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
+ S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
+ }
DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
@@ -2343,29 +1589,33 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_power, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+ if (priv->cfg->sensitivity_calib_by_driver)
DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ if (priv->cfg->chain_noise_calib_by_driver)
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ if (priv->cfg->ucode_tracing)
DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
- }
- DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal);
- DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
- &priv->disable_chain_noise_cal);
- if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
- ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
+ DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+ if (priv->cfg->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+ &priv->disable_sens_cal);
+ if (priv->cfg->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+ &priv->disable_chain_noise_cal);
+ if (priv->cfg->tx_power_by_driver)
DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
&priv->disable_tx_power_cal);
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index ef1720a..f3f3473 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -43,6 +43,7 @@
#include "iwl-debug.h"
#include "iwl-4965-hw.h"
#include "iwl-3945-hw.h"
+#include "iwl-agn-hw.h"
#include "iwl-led.h"
#include "iwl-power.h"
#include "iwl-agn-rs.h"
@@ -56,6 +57,7 @@ extern struct iwl_cfg iwl5100_bgn_cfg;
extern struct iwl_cfg iwl5100_abg_cfg;
extern struct iwl_cfg iwl5150_agn_cfg;
extern struct iwl_cfg iwl5150_abg_cfg;
+extern struct iwl_cfg iwl6000g2a_2agn_cfg;
extern struct iwl_cfg iwl6000i_2agn_cfg;
extern struct iwl_cfg iwl6000i_2abg_cfg;
extern struct iwl_cfg iwl6000i_2bg_cfg;
@@ -67,45 +69,6 @@ extern struct iwl_cfg iwl1000_bg_cfg;
struct iwl_tx_queue;
-/* shared structures from iwl-5000.c */
-extern struct iwl_mod_params iwl50_mod_params;
-extern struct iwl_ucode_ops iwl5000_ucode;
-extern struct iwl_lib_ops iwl5000_lib;
-extern struct iwl_hcmd_ops iwl5000_hcmd;
-extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils;
-
-/* shared functions from iwl-5000.c */
-extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len);
-extern u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd,
- u8 *data);
-extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
- __le32 *tx_flags);
-extern int iwl5000_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp);
-extern void iwl5000_nic_config(struct iwl_priv *priv);
-extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
-extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset);
-extern void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
-extern void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern int iwl5000_load_ucode(struct iwl_priv *priv);
-extern void iwl5000_init_alive_start(struct iwl_priv *priv);
-extern int iwl5000_alive_notify(struct iwl_priv *priv);
-extern int iwl5000_hw_set_hw_params(struct iwl_priv *priv);
-extern int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx);
-extern int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo);
-extern void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask);
-extern void iwl5000_setup_deferred_work(struct iwl_priv *priv);
-extern void iwl5000_rx_handler_setup(struct iwl_priv *priv);
-extern int iwl5000_hw_valid_rtc_data_addr(u32 addr);
-extern int iwl5000_send_tx_power(struct iwl_priv *priv);
-extern void iwl5000_temperature(struct iwl_priv *priv);
-
/* CT-KILL constants */
#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
#define CT_KILL_THRESHOLD 114 /* in Celsius */
@@ -183,6 +146,10 @@ struct iwl_queue {
int n_bd; /* number of BDs in this queue */
int write_ptr; /* 1-st empty entry (index) host_w*/
int read_ptr; /* last used entry (index) host_r*/
+ /* use for monitoring and recovering the stuck queue */
+ int last_read_ptr; /* storing the last read_ptr */
+ /* number of time read_ptr and last_read_ptr are the same */
+ u8 repeat_same_read_ptr;
dma_addr_t dma_addr; /* physical addr for BD's */
int n_window; /* safe queue window */
u32 id;
@@ -304,13 +271,11 @@ struct iwl_channel_info {
struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
};
-#define IWL_TX_FIFO_AC0 0
-#define IWL_TX_FIFO_AC1 1
-#define IWL_TX_FIFO_AC2 2
-#define IWL_TX_FIFO_AC3 3
-#define IWL_TX_FIFO_HCCA_1 5
-#define IWL_TX_FIFO_HCCA_2 6
-#define IWL_TX_FIFO_NONE 7
+#define IWL_TX_FIFO_BK 0
+#define IWL_TX_FIFO_BE 1
+#define IWL_TX_FIFO_VI 2
+#define IWL_TX_FIFO_VO 3
+#define IWL_TX_FIFO_UNUSED -1
/* Minimum number of queues. MAX_NUM is defined in hw specific files.
* Set the minimum to accommodate the 4 standard TX queues, 1 command
@@ -361,13 +326,6 @@ enum {
#define DEF_CMD_PAYLOAD_SIZE 320
-/*
- * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
- * SNAP header and alignment. It should also be big enough for 802.11
- * control frames.
- */
-#define IWL_LINK_HDR_MAX 64
-
/**
* struct iwl_device_cmd
*
@@ -519,38 +477,28 @@ struct iwl_ht_config {
u8 non_GF_STA_present;
};
-union iwl_qos_capabity {
- struct {
- u8 edca_count:4; /* bit 0-3 */
- u8 q_ack:1; /* bit 4 */
- u8 queue_request:1; /* bit 5 */
- u8 txop_request:1; /* bit 6 */
- u8 reserved:1; /* bit 7 */
- } q_AP;
- struct {
- u8 acvo_APSD:1; /* bit 0 */
- u8 acvi_APSD:1; /* bit 1 */
- u8 ac_bk_APSD:1; /* bit 2 */
- u8 ac_be_APSD:1; /* bit 3 */
- u8 q_ack:1; /* bit 4 */
- u8 max_len:2; /* bit 5-6 */
- u8 more_data_ack:1; /* bit 7 */
- } q_STA;
- u8 val;
-};
-
/* QoS structures */
struct iwl_qos_info {
int qos_active;
- union iwl_qos_capabity qos_cap;
struct iwl_qosparam_cmd def_qos_parm;
};
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock
+ * held.
+ */
struct iwl_station_entry {
struct iwl_addsta_cmd sta;
struct iwl_tid_data tid[MAX_TID_COUNT];
u8 used;
struct iwl_hw_key keyinfo;
+ struct iwl_link_quality_cmd *lq;
+};
+
+struct iwl_station_priv_common {
+ u8 sta_id;
};
/*
@@ -559,14 +507,28 @@ struct iwl_station_entry {
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is places in that
* space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and agn!
*/
struct iwl_station_priv {
+ struct iwl_station_priv_common common;
struct iwl_lq_sta lq_sta;
atomic_t pending_frames;
bool client;
bool asleep;
};
+/**
+ * struct iwl_vif_priv - driver's private per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct iwl_vif_priv {
+ u8 ibss_bssid_sta_id;
+};
+
/* one for each uCode image (inst/data, boot/init/runtime) */
struct fw_desc {
void *v_addr; /* access by driver */
@@ -574,7 +536,7 @@ struct fw_desc {
u32 len; /* bytes */
};
-/* uCode file layout */
+/* v1/v2 uCode file layout */
struct iwl_ucode_header {
__le32 ver; /* major/minor/API/serial */
union {
@@ -597,7 +559,62 @@ struct iwl_ucode_header {
} v2;
} u;
};
-#define UCODE_HEADER_SIZE(ver) ((ver) == 1 ? 24 : 28)
+
+/*
+ * new TLV uCode file layout
+ *
+ * The new TLV file format contains TLVs, that each specify
+ * some piece of data. To facilitate "groups", for example
+ * different instruction image with different capabilities,
+ * bundled with the same init image, an alternative mechanism
+ * is provided:
+ * When the alternative field is 0, that means that the item
+ * is always valid. When it is non-zero, then it is only
+ * valid in conjunction with items of the same alternative,
+ * in which case the driver (user) selects one alternative
+ * to use.
+ */
+
+enum iwl_ucode_tlv_type {
+ IWL_UCODE_TLV_INVALID = 0, /* unused */
+ IWL_UCODE_TLV_INST = 1,
+ IWL_UCODE_TLV_DATA = 2,
+ IWL_UCODE_TLV_INIT = 3,
+ IWL_UCODE_TLV_INIT_DATA = 4,
+ IWL_UCODE_TLV_BOOT = 5,
+ IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
+};
+
+struct iwl_ucode_tlv {
+ __le16 type; /* see above */
+ __le16 alternative; /* see comment */
+ __le32 length; /* not including type/length fields */
+ u8 data[0];
+} __attribute__ ((packed));
+
+#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
+
+struct iwl_tlv_ucode_header {
+ /*
+ * The TLV style ucode header is distinguished from
+ * the v1/v2 style header by first four bytes being
+ * zero, as such is an invalid combination of
+ * major/minor/API/serial versions.
+ */
+ __le32 zero;
+ __le32 magic;
+ u8 human_readable[64];
+ __le32 ver; /* major/minor/API/serial */
+ __le32 build;
+ __le64 alternatives; /* bitmask of valid alternatives */
+ /*
+ * The data contained herein has a TLV layout,
+ * see above for the TLV header and types.
+ * Note that each TLV is padded to a length
+ * that is a multiple of 4 for alignment.
+ */
+ u8 data[0];
+};
struct iwl4965_ibss_seq {
u8 mac[ETH_ALEN];
@@ -1039,6 +1056,11 @@ struct iwl_event_log {
#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+/* timer constants use to monitor and recover stuck tx queues in mSecs */
+#define IWL_MONITORING_PERIOD (1000)
+#define IWL_ONE_HUNDRED_MSECS (100)
+#define IWL_SIXTY_SECS (60000)
+
enum iwl_reset {
IWL_RF_RESET = 0,
IWL_FW_RESET,
@@ -1092,10 +1114,6 @@ struct iwl_priv {
struct iwl_channel_info *channel_info; /* channel info array */
u8 channel_count; /* # of channels */
- /* each calibration channel group in the EEPROM has a derived
- * clip setting for each rate. 3945 only.*/
- const struct iwl3945_clip_group clip39_groups[5];
-
/* thermal calibration */
s32 temperature; /* degrees Kelvin */
s32 last_temperature;
@@ -1104,12 +1122,10 @@ struct iwl_priv {
struct iwl_calib_result calib_results[IWL_CALIB_MAX];
/* Scan related variables */
- unsigned long next_scan_jiffies;
unsigned long scan_start;
- unsigned long scan_pass_start;
unsigned long scan_start_tsf;
- void *scan;
- int scan_bands;
+ void *scan_cmd;
+ enum ieee80211_band scan_band;
struct cfg80211_scan_request *scan_request;
bool is_internal_short_scan;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
@@ -1168,16 +1184,13 @@ struct iwl_priv {
u64 led_tpt;
u16 active_rate;
- u16 active_rate_basic;
- u8 assoc_station_added;
u8 start_calib;
struct iwl_sensitivity_data sensitivity_data;
struct iwl_chain_noise_data chain_noise_data;
__le16 sensitivity_tbl[HD_TABLE_SIZE];
struct iwl_ht_config current_ht_config;
- u8 last_phy_res[100];
/* Rate scaling data */
u8 retry_rate;
@@ -1197,9 +1210,6 @@ struct iwl_priv {
unsigned long status;
- int last_rx_rssi; /* From Rx packet statistics */
- int last_rx_noise; /* From beacon statistics */
-
/* counts mgmt, ctl, and data packets */
struct traffic_stats tx_stats;
struct traffic_stats rx_stats;
@@ -1218,18 +1228,14 @@ struct iwl_priv {
#endif
/* context information */
- u16 rates_mask;
-
- u8 bssid[ETH_ALEN];
- u16 rts_threshold;
+ u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
u8 mac_addr[ETH_ALEN];
/*station table variables */
spinlock_t sta_lock;
int num_stations;
struct iwl_station_entry stations[IWL_STATION_COUNT];
- struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
- u8 default_wep_key;
+ struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */
u8 key_mapping_key;
unsigned long ucode_key_table;
@@ -1244,10 +1250,6 @@ struct iwl_priv {
u8 mac80211_registered;
- /* Rx'd packet timing information */
- u32 last_beacon_time;
- u64 last_tsf;
-
/* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
@@ -1259,29 +1261,67 @@ struct iwl_priv {
/* Last Rx'd beacon timestamp */
u64 timestamp;
- u16 beacon_int;
struct ieee80211_vif *vif;
- /*Added for 3945 */
- void *shared_virt;
- dma_addr_t shared_phys;
- /*End*/
- struct iwl_hw_params hw_params;
+ union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+ struct {
+ void *shared_virt;
+ dma_addr_t shared_phys;
- /* INT ICT Table */
- __le32 *ict_tbl;
- dma_addr_t ict_tbl_dma;
- dma_addr_t aligned_ict_tbl_dma;
- int ict_index;
- void *ict_tbl_vir;
- u32 inta;
- bool use_ict;
+ struct delayed_work thermal_periodic;
+ struct delayed_work rfkill_poll;
+
+ struct iwl3945_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ struct iwl3945_notif_statistics accum_statistics;
+ struct iwl3945_notif_statistics delta_statistics;
+ struct iwl3945_notif_statistics max_delta;
+#endif
+
+ u32 sta_supp_rates;
+ int last_rx_rssi; /* From Rx packet statistics */
+
+ /* Rx'd packet timing information */
+ u32 last_beacon_time;
+ u64 last_tsf;
+
+ /*
+ * each calibration channel group in the
+ * EEPROM has a derived clip setting for
+ * each rate.
+ */
+ const struct iwl3945_clip_group clip_groups[5];
+
+ } _3945;
+#endif
+#if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE)
+ struct {
+ /* INT ICT Table */
+ __le32 *ict_tbl;
+ void *ict_tbl_vir;
+ dma_addr_t ict_tbl_dma;
+ dma_addr_t aligned_ict_tbl_dma;
+ int ict_index;
+ u32 inta;
+ bool use_ict;
+ /*
+ * reporting the number of tids has AGG on. 0 means
+ * no AGGREGATION
+ */
+ u8 agg_tids_count;
+
+ struct iwl_rx_phy_res last_phy_res;
+ bool last_phy_res_valid;
+
+ struct completion firmware_loading_complete;
+ } _agn;
+#endif
+ };
+
+ struct iwl_hw_params hw_params;
u32 inta_mask;
- /* Current association information needed to configure the
- * hardware */
- u16 assoc_id;
- u16 assoc_capability;
struct iwl_qos_info qos_data;
@@ -1291,7 +1331,6 @@ struct iwl_priv {
struct work_struct scan_completed;
struct work_struct rx_replenish;
struct work_struct abort_scan;
- struct work_struct request_scan;
struct work_struct beacon_update;
struct work_struct tt_work;
struct work_struct ct_enter;
@@ -1304,10 +1343,6 @@ struct iwl_priv {
struct delayed_work alive_start;
struct delayed_work scan_check;
- /*For 3945 only*/
- struct delayed_work thermal_periodic;
- struct delayed_work rfkill_poll;
-
/* TX Power */
s8 tx_power_user_lmt;
s8 tx_power_device_lmt;
@@ -1339,13 +1374,8 @@ struct iwl_priv {
struct work_struct run_time_calib_work;
struct timer_list statistics_periodic;
struct timer_list ucode_trace;
+ struct timer_list monitor_recover;
bool hw_ready;
- /*For 3945*/
-#define IWL_DEFAULT_TX_POWER 0x0F
-
- struct iwl3945_notif_statistics statistics_39;
-
- u32 sta_supp_rates;
struct iwl_event_log event_log;
}; /*iwl_priv */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 2ffc2ed..4a48763 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -37,6 +37,7 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index fb5bb48..ee11452 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -590,9 +590,16 @@ int iwl_eeprom_init(struct iwl_priv *priv)
e[addr / 2] = cpu_to_le16(r >> 16);
}
}
+
+ IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n",
+ (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ ? "OTP" : "EEPROM",
+ iwl_eeprom_query16(priv, EEPROM_VERSION));
+
ret = 0;
done:
priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
+
err:
if (ret)
iwl_eeprom_free(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 8171c70..95aa202 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -172,35 +172,35 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_5000_TX_POWER_VERSION (4)
#define EEPROM_5000_EEPROM_VERSION (0x11A)
-/*5000 calibrations */
-#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
-#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL)
-#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_5000_CALIB_ALL)
-
-/* 5000 links */
-#define EEPROM_5000_LINK_HOST (2*0x64)
-#define EEPROM_5000_LINK_GENERAL (2*0x65)
-#define EEPROM_5000_LINK_REGULATORY (2*0x66)
-#define EEPROM_5000_LINK_CALIBRATION (2*0x67)
-#define EEPROM_5000_LINK_PROCESS_ADJST (2*0x68)
-#define EEPROM_5000_LINK_OTHERS (2*0x69)
-
-/* 5000 regulatory - indirect access */
-#define EEPROM_5000_REG_SKU_ID ((0x02)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */
-#define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\
+/* 5000 and up calibration */
+#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
+#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
+
+/* 5000 temperature */
+#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
+
+/* agn links */
+#define EEPROM_LINK_HOST (2*0x64)
+#define EEPROM_LINK_GENERAL (2*0x65)
+#define EEPROM_LINK_REGULATORY (2*0x66)
+#define EEPROM_LINK_CALIBRATION (2*0x67)
+#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
+#define EEPROM_LINK_OTHERS (2*0x69)
+
+/* agn regulatory - indirect access */
+#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
-#define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\
+#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
-#define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\
+#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
-#define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\
+#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
-#define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\
+#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
-#define EEPROM_5000_REG_BAND_24_HT40_CHANNELS ((0x82)\
+#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
-#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\
+#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
/* 6000 regulatory - indirect access */
@@ -265,14 +265,21 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_5050_EEPROM_VERSION (0x21E)
/* 1000 Specific */
+#define EEPROM_1000_TX_POWER_VERSION (4)
#define EEPROM_1000_EEPROM_VERSION (0x15C)
/* 6x00 Specific */
+#define EEPROM_6000_TX_POWER_VERSION (4)
#define EEPROM_6000_EEPROM_VERSION (0x434)
/* 6x50 Specific */
+#define EEPROM_6050_TX_POWER_VERSION (4)
#define EEPROM_6050_EEPROM_VERSION (0x532)
+/* 6x00g2 Specific */
+#define EEPROM_6000G2_TX_POWER_VERSION (6)
+#define EEPROM_6000G2_EEPROM_VERSION (0x709)
+
/* OTP */
/* lower blocks contain EEPROM image and calibration data */
#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 73681c4..51f89e7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -169,7 +169,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
mutex_lock(&priv->sync_cmd_mutex);
set_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n",
+ IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
cmd_idx = iwl_enqueue_hcmd(priv, cmd);
@@ -191,7 +191,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
ret = -ETIMEDOUT;
goto cancel;
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 51a67fb..3ff6b9d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -31,6 +31,9 @@
#define __iwl_helpers_h__
#include <linux/ctype.h>
+#include <net/mac80211.h>
+
+#include "iwl-io.h"
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 16eb3ce..0203a3b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -298,7 +298,7 @@ static inline u32 __iwl_read_direct32(const char *f, u32 l,
struct iwl_priv *priv, u32 reg)
{
u32 value = _iwl_read_direct32(priv, reg);
- IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value,
+ IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
f, l);
return value;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index a6f9c91..db5bfcb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -46,7 +46,7 @@
static int led_mode;
module_param(led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), "
- "(default 0)\n");
+ "(default 0)");
static const struct {
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 548dac2..cda6a94 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -318,10 +318,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
- if (priv->vif)
- dtimper = priv->hw->conf.ps_dtim_period;
- else
- dtimper = 1;
+ dtimper = priv->hw->conf.ps_dtim_period ?: 1;
if (priv->cfg->broken_powersave)
iwl_power_sleep_cam_cmd(priv, &cmd);
@@ -384,10 +381,10 @@ EXPORT_SYMBOL(iwl_ht_enabled);
bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
{
- s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
+ s32 temp = priv->temperature; /* degrees CELSIUS except specified */
bool within_margin = false;
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
+ if (priv->cfg->temperature_kelvin)
temp = KELVIN_TO_CELSIUS(priv->temperature);
if (!priv->thermal_throttle.advanced_tt)
@@ -840,12 +837,12 @@ EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
static void iwl_bg_tt_work(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
- s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
+ s32 temp = priv->temperature; /* degrees CELSIUS except specified */
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
+ if (priv->cfg->temperature_kelvin)
temp = KELVIN_TO_CELSIUS(priv->temperature);
if (!priv->thermal_throttle.advanced_tt)
@@ -875,7 +872,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
struct iwl_tt_trans *transaction;
- IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling \n");
+ IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
memset(tt, 0, sizeof(struct iwl_tt_mgmt));
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index d2d2a91..b1f101c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -254,7 +254,7 @@
* device. A queue maps to only one (selectable by driver) Tx DMA channel,
* but one DMA channel may take input from several queues.
*
- * Tx DMA channels have dedicated purposes. For 4965, they are used as follows
+ * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
* (cf. default_queue_to_tx_fifo in iwl-4965.c):
*
* 0 -- EDCA BK (background) frames, lowest priority
@@ -262,20 +262,20 @@
* 2 -- EDCA VI (video) frames, higher priority
* 3 -- EDCA VO (voice) and management frames, highest priority
* 4 -- Commands (e.g. RXON, etc.)
- * 5 -- HCCA short frames
- * 6 -- HCCA long frames
+ * 5 -- unused (HCCA)
+ * 6 -- unused (HCCA)
* 7 -- not used by driver (device-internal only)
*
- * For 5000 series and up, they are used slightly differently
+ * For 5000 series and up, they are used differently
* (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
*
* 0 -- EDCA BK (background) frames, lowest priority
* 1 -- EDCA BE (best effort) frames, normal priority
* 2 -- EDCA VI (video) frames, higher priority
* 3 -- EDCA VO (voice) and management frames, highest priority
- * 4 -- (TBD)
- * 5 -- HCCA short frames
- * 6 -- HCCA long frames
+ * 4 -- unused
+ * 5 -- unused
+ * 6 -- unused
* 7 -- Commands
*
* Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
@@ -529,48 +529,48 @@
#define IWL_SCD_TXFIFO_POS_RA (4)
#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
-/* 5000 SCD */
-#define IWL50_SCD_QUEUE_STTS_REG_POS_TXF (0)
-#define IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
-#define IWL50_SCD_QUEUE_STTS_REG_POS_WSL (4)
-#define IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
-#define IWL50_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
-
-#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
-#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
-#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
-#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
-#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
-#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
-#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
-#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
-
-#define IWL50_SCD_CONTEXT_DATA_OFFSET (0x600)
-#define IWL50_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
-#define IWL50_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
-
-#define IWL50_SCD_CONTEXT_QUEUE_OFFSET(x)\
- (IWL50_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
-
-#define IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
- ((IWL50_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
-
-#define IWL50_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
+/* agn SCD */
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF (0)
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL (4)
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
+#define IWLAGN_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
+
+#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
+#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
+#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
+#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+
+#define IWLAGN_SCD_CONTEXT_DATA_OFFSET (0x600)
+#define IWLAGN_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
+#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
+
+#define IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(x)\
+ (IWLAGN_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+
+#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+ ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
+
+#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
(~(1<<IWL_CMD_QUEUE_NUM)))
-#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00)
-
-#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0)
-#define IWL50_SCD_DRAM_BASE_ADDR (IWL50_SCD_BASE + 0x8)
-#define IWL50_SCD_AIT (IWL50_SCD_BASE + 0x0c)
-#define IWL50_SCD_TXFACT (IWL50_SCD_BASE + 0x10)
-#define IWL50_SCD_ACTIVE (IWL50_SCD_BASE + 0x14)
-#define IWL50_SCD_QUEUE_WRPTR(x) (IWL50_SCD_BASE + 0x18 + (x) * 4)
-#define IWL50_SCD_QUEUE_RDPTR(x) (IWL50_SCD_BASE + 0x68 + (x) * 4)
-#define IWL50_SCD_QUEUECHAIN_SEL (IWL50_SCD_BASE + 0xe8)
-#define IWL50_SCD_AGGR_SEL (IWL50_SCD_BASE + 0x248)
-#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108)
-#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4)
+#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00)
+
+#define IWLAGN_SCD_SRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x0)
+#define IWLAGN_SCD_DRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x8)
+#define IWLAGN_SCD_AIT (IWLAGN_SCD_BASE + 0x0c)
+#define IWLAGN_SCD_TXFACT (IWLAGN_SCD_BASE + 0x10)
+#define IWLAGN_SCD_ACTIVE (IWLAGN_SCD_BASE + 0x14)
+#define IWLAGN_SCD_QUEUE_WRPTR(x) (IWLAGN_SCD_BASE + 0x18 + (x) * 4)
+#define IWLAGN_SCD_QUEUE_RDPTR(x) (IWLAGN_SCD_BASE + 0x68 + (x) * 4)
+#define IWLAGN_SCD_QUEUECHAIN_SEL (IWLAGN_SCD_BASE + 0xe8)
+#define IWLAGN_SCD_AGGR_SEL (IWLAGN_SCD_BASE + 0x248)
+#define IWLAGN_SCD_INTERRUPT_MASK (IWLAGN_SCD_BASE + 0x108)
+#define IWLAGN_SCD_QUEUE_STATUS_BITS(x) (IWLAGN_SCD_BASE + 0x10c + (x) * 4)
/*********************** END TX SCHEDULER *************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index e5eb339..0a5d7cf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -163,197 +163,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
-/**
- * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwl_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
- int write;
-
- spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-EXPORT_SYMBOL(iwl_rx_queue_restock);
-
-
-/**
- * iwl_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwl_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_rx_replenish);
-
-void iwl_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl_rx_allocate(priv, GFP_ATOMIC);
-
- iwl_rx_queue_restock(priv);
-}
-EXPORT_SYMBOL(iwl_rx_replenish_now);
-
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-EXPORT_SYMBOL(iwl_rx_queue_free);
int iwl_rx_queue_alloc(struct iwl_priv *priv)
{
@@ -396,98 +205,6 @@ err_bd:
}
EXPORT_SYMBOL(iwl_rx_queue_alloc);
-void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
-
- if (!priv->cfg->use_isr_legacy)
- rb_timeout = RX_RB_TIMEOUT;
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->dma_addr >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
- * the credit mechanism in 5000 HW RX FIFO
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- return 0;
-}
-
-int iwl_rxq_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_rxq_stop);
-
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -543,6 +260,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
int bcn_silence_c =
le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+ int last_rx_noise;
if (bcn_silence_a) {
total_silence += bcn_silence_a;
@@ -559,13 +277,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
/* Average among active antennas */
if (num_active_rx)
- priv->last_rx_noise = (total_silence / num_active_rx) - 107;
+ last_rx_noise = (total_silence / num_active_rx) - 107;
else
- priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+ last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
bcn_silence_a, bcn_silence_b, bcn_silence_c,
- priv->last_rx_noise);
+ last_rx_noise);
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -617,29 +335,20 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
#define REG_RECALIB_PERIOD (60)
-#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
-void iwl_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+bool iwl_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
{
- int change;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ bool rc = true;
int combined_plcp_delta;
unsigned int plcp_msec;
unsigned long plcp_received_jiffies;
- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
- (int)sizeof(priv->statistics),
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-
- change = ((priv->statistics.general.temperature !=
- pkt->u.stats.general.temperature) ||
- ((priv->statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
/*
* check for plcp_err and trigger radio reset if it exceeds
* the plcp error threshold plcp_delta.
@@ -660,11 +369,11 @@ void iwl_rx_statistics(struct iwl_priv *priv,
le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
if ((combined_plcp_delta > 0) &&
- ((combined_plcp_delta * 100) / plcp_msec) >
+ ((combined_plcp_delta * 100) / plcp_msec) >
priv->cfg->plcp_delta_threshold) {
/*
- * if plcp_err exceed the threshold, the following
- * data is printed in csv format:
+ * if plcp_err exceed the threshold,
+ * the following data is printed in csv format:
* Text: plcp_err exceeded %d,
* Received ofdm.plcp_err,
* Current ofdm.plcp_err,
@@ -673,22 +382,76 @@ void iwl_rx_statistics(struct iwl_priv *priv,
* combined_plcp_delta,
* plcp_msec
*/
- IWL_DEBUG_RADIO(priv, PLCP_MSG,
+ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+ "%u, %u, %u, %u, %d, %u mSecs\n",
priv->cfg->plcp_delta_threshold,
le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
le32_to_cpu(
- priv->statistics.rx.ofdm_ht.plcp_err),
+ priv->statistics.rx.ofdm_ht.plcp_err),
combined_plcp_delta, plcp_msec);
+ rc = false;
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL(iwl_good_plcp_health);
- /*
- * Reset the RF radio due to the high plcp
- * error rate
- */
- iwl_force_reset(priv, IWL_RF_RESET);
+void iwl_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+ if (iwl_is_associated(priv)) {
+ if (priv->cfg->ops->lib->check_ack_health) {
+ if (!priv->cfg->ops->lib->check_ack_health(
+ priv, pkt)) {
+ /*
+ * low ack count detected
+ * restart Firmware
+ */
+ IWL_ERR(priv, "low ack count detected, "
+ "restart firmware\n");
+ if (!iwl_force_reset(priv, IWL_FW_RESET))
+ return;
+ }
+ }
+ if (priv->cfg->ops->lib->check_plcp_health) {
+ if (!priv->cfg->ops->lib->check_plcp_health(
+ priv, pkt)) {
+ /*
+ * high plcp error detected
+ * reset Radio
+ */
+ iwl_force_reset(priv, IWL_RF_RESET);
+ }
}
}
+}
+EXPORT_SYMBOL(iwl_recover_from_statistics);
+
+void iwl_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ int change;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+
+ IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
+ (int)sizeof(priv->statistics),
+ le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+
+ change = ((priv->statistics.general.temperature !=
+ pkt->u.stats.general.temperature) ||
+ ((priv->statistics.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
+ iwl_recover_from_statistics(priv, pkt);
memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
@@ -731,139 +494,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_reply_statistics);
-/* Calc max signal level (dBm) among 3 possible receivers */
-static inline int iwl_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-/**
- * iwl_dbg_report_frame - dump frame to syslog during debug sessions
- *
- * You may hack this function to show different aspects of received frames,
- * including selective frame dumps.
- * group100 parameter selects whether to show 1 out of 100 good data frames.
- * All beacon and probe response frames are printed.
- */
-static void iwl_dbg_report_frame(struct iwl_priv *priv,
- struct iwl_rx_phy_res *phy_res, u16 length,
- struct ieee80211_hdr *header, int group100)
-{
- u32 to_us;
- u32 print_summary = 0;
- u32 print_dump = 0; /* set to 1 to dump all frames' contents */
- u32 hundred = 0;
- u32 dataframe = 0;
- __le16 fc;
- u16 seq_ctl;
- u16 channel;
- u16 phy_flags;
- u32 rate_n_flags;
- u32 tsf_low;
- int rssi;
-
- if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
- return;
-
- /* MAC header */
- fc = header->frame_control;
- seq_ctl = le16_to_cpu(header->seq_ctrl);
-
- /* metadata */
- channel = le16_to_cpu(phy_res->channel);
- phy_flags = le16_to_cpu(phy_res->phy_flags);
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* signal statistics */
- rssi = iwl_calc_rssi(priv, phy_res);
- tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
-
- to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
-
- /* if data frame is to us and all is good,
- * (optionally) print summary for only 1 out of every 100 */
- if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
- cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
- dataframe = 1;
- if (!group100)
- print_summary = 1; /* print each frame */
- else if (priv->framecnt_to_us < 100) {
- priv->framecnt_to_us++;
- print_summary = 0;
- } else {
- priv->framecnt_to_us = 0;
- print_summary = 1;
- hundred = 1;
- }
- } else {
- /* print summary for all other frames */
- print_summary = 1;
- }
-
- if (print_summary) {
- char *title;
- int rate_idx;
- u32 bitrate;
-
- if (hundred)
- title = "100Frames";
- else if (ieee80211_has_retry(fc))
- title = "Retry";
- else if (ieee80211_is_assoc_resp(fc))
- title = "AscRsp";
- else if (ieee80211_is_reassoc_resp(fc))
- title = "RasRsp";
- else if (ieee80211_is_probe_resp(fc)) {
- title = "PrbRsp";
- print_dump = 1; /* dump frame contents */
- } else if (ieee80211_is_beacon(fc)) {
- title = "Beacon";
- print_dump = 1; /* dump frame contents */
- } else if (ieee80211_is_atim(fc))
- title = "ATIM";
- else if (ieee80211_is_auth(fc))
- title = "Auth";
- else if (ieee80211_is_deauth(fc))
- title = "DeAuth";
- else if (ieee80211_is_disassoc(fc))
- title = "DisAssoc";
- else
- title = "Frame";
-
- rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
- if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
- bitrate = 0;
- WARN_ON_ONCE(1);
- } else {
- bitrate = iwl_rates[rate_idx].ieee / 2;
- }
-
- /* print frame summary.
- * MAC addresses show just the last byte (for brevity),
- * but you can hack it to show more, if you'd like to. */
- if (dataframe)
- IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
- "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
- title, le16_to_cpu(fc), header->addr1[5],
- length, rssi, channel, bitrate);
- else {
- /* src/dst addresses assume managed mode */
- IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
- "len=%u, rssi=%d, tim=%lu usec, "
- "phy=0x%02x, chnl=%d\n",
- title, le16_to_cpu(fc), header->addr1[5],
- header->addr3[5], length, rssi,
- tsf_low - priv->scan_start_tsf,
- phy_flags, channel);
- }
- }
- if (print_dump)
- iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
-}
-#endif
-
/*
* returns non-zero if packet should be dropped
*/
@@ -911,305 +541,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
return 0;
}
EXPORT_SYMBOL(iwl_set_decrypted_flag);
-
-static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
- u32 decrypt_out = 0;
-
- if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
- RX_RES_STATUS_STATION_FOUND)
- decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
- RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
- decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
- /* packet was not encrypted */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_NONE)
- return decrypt_out;
-
- /* packet was encrypted with unknown alg */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_ERR)
- return decrypt_out;
-
- /* decryption was not done in HW */
- if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
- RX_MPDU_RES_STATUS_DEC_DONE_MSK)
- return decrypt_out;
-
- switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- /* alg is CCM: check MIC only */
- if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
- /* Bad MIC */
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
- break;
-
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
- /* Bad TTAK */
- decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
- break;
- }
- /* fall through if TTAK OK */
- default:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
- break;
- };
-
- IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
- decrypt_in, decrypt_out);
-
- return decrypt_out;
-}
-
-static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u16 len,
- u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct sk_buff *skb;
- int ret = 0;
- __le16 fc = hdr->frame_control;
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- /* In case of HW accelerated crypto and bad decryption, drop */
- if (!priv->cfg->mod_params->sw_crypto &&
- iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
- return;
-
- skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
- if (!skb) {
- IWL_ERR(priv, "alloc_skb failed\n");
- return;
- }
-
- skb_reserve(skb, IWL_LINK_HDR_MAX);
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
- /* mac80211 currently doesn't support paged SKB. Convert it to
- * linear SKB for management frame and data frame requires
- * software decryption or software defragementation. */
- if (ieee80211_is_mgmt(fc) ||
- ieee80211_has_protected(fc) ||
- ieee80211_has_morefrags(fc) ||
- le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
- (ieee80211_is_data_qos(fc) &&
- *ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
- ret = skb_linearize(skb);
- else
- ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
- 0 : -ENOMEM;
-
- if (ret) {
- kfree_skb(skb);
- goto out;
- }
-
- /*
- * XXX: We cannot touch the page and its virtual memory (hdr) after
- * here. It might have already been freed by the above skb change.
- */
-
- iwl_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- out:
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-/* This is necessary only for a number of statistics, see the caller. */
-static int iwl_is_network_packet(struct iwl_priv *priv,
- struct ieee80211_hdr *header)
-{
- /* Filter incoming packets to determine if they are targeted toward
- * this network, discarding packets coming from ourselves */
- switch (priv->iw_mode) {
- case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr3, priv->bssid);
- case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr2, priv->bssid);
- default:
- return 1;
- }
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwl_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_phy_res *phy_res;
- __le32 rx_pkt_status;
- struct iwl4965_rx_mpdu_res_start *amsdu;
- u32 len;
- u32 ampdu_status;
- u32 rate_n_flags;
-
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->last_phy_res[0]) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return;
- }
- phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
- amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
- ampdu_status = iwl_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
- }
-
- if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- phy_res->cfg_phy_cnt);
- return;
- }
-
- if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
- !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(rx_pkt_status));
- return;
- }
-
- /* This will be used in several places later */
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.rate_idx =
- iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
- rx_status.flag = 0;
-
- /* TSF isn't reliable. In order to allow smooth user experience,
- * this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_TSFT;*/
-
- priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwl_calc_rssi(priv, phy_res);
-
- /* Meaningful noise values are available only from beacon statistics,
- * which are gathered only when associated, and indicate noise
- * only for the associated network channel ...
- * Ignore these noise values while scanning (other channels) */
- if (iwl_is_associated(priv) &&
- !test_bit(STATUS_SCANNING, &priv->status)) {
- rx_status.noise = priv->last_rx_noise;
- } else {
- rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
- }
-
- /* Reset beacon noise level if not associated. */
- if (!iwl_is_associated(priv))
- priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- /* Set "1" to report good data frames in groups of 100 */
- if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
- iwl_dbg_report_frame(priv, phy_res, len, header, 1);
-#endif
- iwl_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
- rx_status.signal, rx_status.noise,
- (unsigned long long)rx_status.mactime);
-
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna =
- (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
- /* set the preamble flag if appropriate */
- if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- /* Set up the HT phy flags */
- if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
-
- if (iwl_is_network_packet(priv, header)) {
- priv->last_rx_rssi = rx_status.signal;
- priv->last_beacon_time = priv->ucode_beacon_time;
- priv->last_tsf = le64_to_cpu(phy_res->timestamp);
- }
-
- iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
- rxb, &rx_status);
-}
-EXPORT_SYMBOL(iwl_rx_reply_rx);
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- priv->last_phy_res[0] = 1;
- memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
- sizeof(struct iwl_rx_phy_res));
-}
-EXPORT_SYMBOL(iwl_rx_reply_rx_phy);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 741e65e..107e173 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -69,9 +69,8 @@ int iwl_scan_cancel(struct iwl_priv *priv)
}
if (test_bit(STATUS_SCANNING, &priv->status)) {
- if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ if (!test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n");
- set_bit(STATUS_SCAN_ABORTING, &priv->status);
queue_work(priv->workqueue, &priv->abort_scan);
} else
@@ -201,9 +200,6 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
le32_to_cpu(notif->statistics[0]),
le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
-
- if (!priv->is_internal_short_scan)
- priv->next_scan_jiffies = 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
@@ -223,49 +219,24 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
/* The HW is no longer scanning */
clear_bit(STATUS_SCAN_HW, &priv->status);
- IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n",
- (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
- "2.4" : "5.2",
+ IWL_DEBUG_INFO(priv, "Scan on %sGHz took %dms\n",
+ (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
jiffies_to_msecs(elapsed_jiffies
- (priv->scan_pass_start, jiffies)));
+ (priv->scan_start, jiffies)));
- /* Remove this scanned band from the list of pending
- * bands to scan, band G precedes A in order of scanning
- * as seen in iwl_bg_request_scan */
- if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
- priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
- else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
- priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
-
- /* If a request to abort was given, or the scan did not succeed
+ /*
+ * If a request to abort was given, or the scan did not succeed
* then we reset the scan state machine and terminate,
- * re-queuing another scan if one has been requested */
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ * re-queuing another scan if one has been requested
+ */
+ if (test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status))
IWL_DEBUG_INFO(priv, "Aborted scan completed.\n");
- clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- } else {
- /* If there are more bands on this scan pass reschedule */
- if (priv->scan_bands)
- goto reschedule;
- }
-
- if (!priv->is_internal_short_scan)
- priv->next_scan_jiffies = 0;
IWL_DEBUG_INFO(priv, "Setting scan to off\n");
clear_bit(STATUS_SCANNING, &priv->status);
- IWL_DEBUG_INFO(priv, "Scan took %dms\n",
- jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
-
queue_work(priv->workqueue, &priv->scan_completed);
-
- return;
-
-reschedule:
- priv->scan_pass_start = jiffies;
- queue_work(priv->workqueue, &priv->request_scan);
}
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
@@ -294,7 +265,8 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
EXPORT_SYMBOL(iwl_get_active_dwell_time);
u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band)
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
{
u16 passive = (band == IEEE80211_BAND_2GHZ) ?
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
@@ -304,7 +276,7 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
/* If we're associated, we clamp the maximum passive
* dwell time to be 98% of the beacon interval (minus
* 2 * channel tune time) */
- passive = priv->beacon_int;
+ passive = vif ? vif->bss_conf.beacon_int : 0;
if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
passive = IWL_PASSIVE_DWELL_BASE;
passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
@@ -314,150 +286,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_get_passive_dwell_time);
-static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct iwl_scan_channel *scan_ch)
-{
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int i, added = 0;
- u16 channel = 0;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband) {
- IWL_ERR(priv, "invalid band\n");
- return added;
- }
-
- active_dwell = iwl_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- /* only scan single channel, good enough to reset the RF */
- /* pick the first valid not in-use channel */
- if (band == IEEE80211_BAND_5GHZ) {
- for (i = 14; i < priv->channel_count; i++) {
- if (priv->channel_info[i].channel !=
- le16_to_cpu(priv->staging_rxon.channel)) {
- channel = priv->channel_info[i].channel;
- ch_info = iwl_get_channel_info(priv,
- band, channel);
- if (is_channel_valid(ch_info))
- break;
- }
- }
- } else {
- for (i = 0; i < 14; i++) {
- if (priv->channel_info[i].channel !=
- le16_to_cpu(priv->staging_rxon.channel)) {
- channel =
- priv->channel_info[i].channel;
- ch_info = iwl_get_channel_info(priv,
- band, channel);
- if (is_channel_valid(ch_info))
- break;
- }
- }
- }
- if (channel) {
- scan_ch->channel = cpu_to_le16(channel);
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
- added++;
- } else
- IWL_ERR(priv, "no valid channel found\n");
- return added;
-}
-
-static int iwl_get_channels_for_scan(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl_scan_channel *scan_ch)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
- u16 channel;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- channel = ieee80211_frequency_to_channel(chan->center_freq);
- scan_ch->channel = cpu_to_le16(channel);
-
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
- if (n_probes)
- scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
- channel, le32_to_cpu(scan_ch->type),
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- "ACTIVE" : "PASSIVE",
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
- return added;
-}
-
void iwl_init_scan_params(struct iwl_priv *priv)
{
u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
@@ -468,7 +296,7 @@ void iwl_init_scan_params(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_init_scan_params);
-static int iwl_scan_initiate(struct iwl_priv *priv)
+static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
WARN_ON(!mutex_is_locked(&priv->mutex));
@@ -476,26 +304,28 @@ static int iwl_scan_initiate(struct iwl_priv *priv)
set_bit(STATUS_SCANNING, &priv->status);
priv->is_internal_short_scan = false;
priv->scan_start = jiffies;
- priv->scan_pass_start = priv->scan_start;
- queue_work(priv->workqueue, &priv->request_scan);
+ if (WARN_ON(!priv->cfg->ops->utils->request_scan))
+ return -EOPNOTSUPP;
+
+ priv->cfg->ops->utils->request_scan(priv, vif);
return 0;
}
-#define IWL_DELAY_NEXT_SCAN (HZ*2)
-
int iwl_mac_hw_scan(struct ieee80211_hw *hw,
- struct cfg80211_scan_request *req)
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
{
- unsigned long flags;
struct iwl_priv *priv = hw->priv;
- int ret, i;
+ int ret;
IWL_DEBUG_MAC80211(priv, "enter\n");
+ if (req->n_channels == 0)
+ return -EINVAL;
+
mutex_lock(&priv->mutex);
- spin_lock_irqsave(&priv->lock, flags);
if (!iwl_is_ready_rf(priv)) {
ret = -EIO;
@@ -515,30 +345,15 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
- /* We don't schedule scan within next_scan_jiffies period.
- * Avoid scanning during possible EAPOL exchange, return
- * success immediately.
- */
- if (priv->next_scan_jiffies &&
- time_after(priv->next_scan_jiffies, jiffies)) {
- IWL_DEBUG_SCAN(priv, "scan rejected: within next scan period\n");
- queue_work(priv->workqueue, &priv->scan_completed);
- ret = 0;
- goto out_unlock;
- }
-
- priv->scan_bands = 0;
- for (i = 0; i < req->n_channels; i++)
- priv->scan_bands |= BIT(req->channels[i]->band);
-
+ /* mac80211 will only ask for one band at a time */
+ priv->scan_band = req->channels[0]->band;
priv->scan_request = req;
- ret = iwl_scan_initiate(priv);
+ ret = iwl_scan_initiate(priv, vif);
IWL_DEBUG_MAC80211(priv, "leave\n");
out_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
mutex_unlock(&priv->mutex);
return ret;
@@ -554,7 +369,7 @@ void iwl_internal_short_hw_scan(struct iwl_priv *priv)
queue_work(priv->workqueue, &priv->start_internal_scan);
}
-static void iwl_bg_start_internal_scan(struct work_struct *work)
+void iwl_bg_start_internal_scan(struct work_struct *work)
{
struct iwl_priv *priv =
container_of(work, struct iwl_priv, start_internal_scan);
@@ -576,22 +391,20 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
goto unlock;
}
- priv->scan_bands = 0;
- if (priv->band == IEEE80211_BAND_5GHZ)
- priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
- else
- priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
+ priv->scan_band = priv->band;
IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
priv->is_internal_short_scan = true;
- queue_work(priv->workqueue, &priv->request_scan);
+
+ if (WARN_ON(!priv->cfg->ops->utils->request_scan))
+ goto unlock;
+
+ priv->cfg->ops->utils->request_scan(priv, NULL);
unlock:
mutex_unlock(&priv->mutex);
}
-EXPORT_SYMBOL(iwl_internal_short_hw_scan);
-
-#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
+EXPORT_SYMBOL(iwl_bg_start_internal_scan);
void iwl_bg_scan_check(struct work_struct *data)
{
@@ -654,289 +467,15 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
if (WARN_ON(left < ie_len))
return len;
- if (ies)
+ if (ies && ie_len) {
memcpy(pos, ies, ie_len);
- len += ie_len;
- left -= ie_len;
+ len += ie_len;
+ }
return (u16)len;
}
EXPORT_SYMBOL(iwl_fill_probe_req);
-static void iwl_bg_request_scan(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, request_scan);
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl_scan_cmd *scan;
- struct ieee80211_conf *conf = NULL;
- int ret = 0;
- u32 rate_flags = 0;
- u16 cmd_len;
- u16 rx_chain = 0;
- enum ieee80211_band band;
- u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
- u8 rate;
- bool is_active = false;
- int chan_mod;
- u8 active_chains;
-
- conf = ieee80211_get_hw_conf(priv->hw);
-
- mutex_lock(&priv->mutex);
-
- cancel_delayed_work(&priv->scan_check);
-
- if (!iwl_is_ready(priv)) {
- IWL_WARN(priv, "request scan called when driver not ready.\n");
- goto done;
- }
-
- /* Make sure the scan wasn't canceled before this queued work
- * was given the chance to run... */
- if (!test_bit(STATUS_SCANNING, &priv->status))
- goto done;
-
- /* This should never be called or scheduled if there is currently
- * a scan active in the hardware. */
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
- "Ignoring second request.\n");
- ret = -EIO;
- goto done;
- }
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
- goto done;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
- goto done;
- }
-
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
- goto done;
- }
-
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
- goto done;
- }
-
- if (!priv->scan_bands) {
- IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n");
- goto done;
- }
-
- if (!priv->scan) {
- priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan) {
- ret = -ENOMEM;
- goto done;
- }
- }
- scan = priv->scan;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_is_associated(priv)) {
- u16 interval = 0;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
- unsigned long flags;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- spin_lock_irqsave(&priv->lock, flags);
- interval = priv->beacon_int;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
-
- extra = (suspend_time / interval) << 22;
- scan_suspend_time = (extra |
- ((suspend_time % interval) * 1024));
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->is_internal_short_scan) {
- IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
- } else if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
-
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-
- if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
- band = IEEE80211_BAND_2GHZ;
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
- rate = IWL_RATE_6M_PLCP;
- } else {
- rate = IWL_RATE_1M_PLCP;
- rate_flags = RATE_MCS_CCK_MSK;
- }
- scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
- } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
- band = IEEE80211_BAND_5GHZ;
- rate = IWL_RATE_6M_PLCP;
- /*
- * If active scanning is requested but a certain channel is
- * marked passive, we can do active scanning if we detect
- * transmissions.
- *
- * There is an issue with some firmware versions that triggers
- * a sysassert on a "good CRC threshold" of zero (== disabled),
- * on a radar channel even though this means that we should NOT
- * send probes.
- *
- * The "good CRC threshold" is the number of frames that we
- * need to receive during our dwell time on a channel before
- * sending out probes -- setting this to a huge value will
- * mean we never reach it, but at the same time work around
- * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
- * here instead of IWL_GOOD_CRC_TH_DISABLED.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_NEVER;
-
- /* Force use of chains B and C (0x6) for scan Rx for 4965
- * Avoid A (0x1) because of its off-channel reception on A-band.
- */
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
- rx_ant = ANT_BC;
- } else {
- IWL_WARN(priv, "Invalid scan band count\n");
- goto done;
- }
-
- priv->scan_tx_ant[band] =
- iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
- rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
- scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
-
- /* In power save mode use one chain, otherwise use all chains */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* rx_ant has been set to all valid chains previously */
- active_chains = rx_ant &
- ((u8)(priv->chain_noise_data.active_chains));
- if (!active_chains)
- active_chains = rx_ant;
-
- IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
- priv->chain_noise_data.active_chains);
-
- rx_ant = first_antenna(active_chains);
- }
- /* MIMO is not used here, but value is required */
- rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- scan->rx_chain = cpu_to_le16(rx_chain);
- if (!priv->is_internal_short_scan) {
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- } else {
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- NULL, 0,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
-
- }
- scan->tx_cmd.len = cpu_to_le16(cmd_len);
- if (iwl_is_monitor_mode(priv))
- scan->filter_flags = RXON_FILTER_PROMISC_MSK;
-
- scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
- RXON_FILTER_BCON_AWARE_MSK);
-
- if (priv->is_internal_short_scan) {
- scan->channel_count =
- iwl_get_single_channel_for_scan(priv, band,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- } else {
- scan->channel_count =
- iwl_get_channels_for_scan(priv, band,
- is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- }
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- goto done;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
- ret = iwl_send_cmd_sync(priv, &cmd);
- if (ret)
- goto done;
-
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IWL_SCAN_CHECK_WATCHDOG);
-
- mutex_unlock(&priv->mutex);
- return;
-
- done:
- /* Cannot perform scan. Make sure we clear scanning
- * bits from status so next scan request can be performed.
- * If we don't clear scanning status bit here all next scan
- * will fail
- */
- clear_bit(STATUS_SCAN_HW, &priv->status);
- clear_bit(STATUS_SCANNING, &priv->status);
- /* inform mac80211 scan aborted */
- queue_work(priv->workqueue, &priv->scan_completed);
- mutex_unlock(&priv->mutex);
-}
-
void iwl_bg_abort_scan(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
@@ -984,7 +523,6 @@ EXPORT_SYMBOL(iwl_bg_scan_completed);
void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
{
INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
- INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 4a6686f..85ed235 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -29,57 +29,12 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+#include <linux/sched.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
-
-u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
-{
- int i;
- int start = 0;
- int ret = IWL_INVALID_STATION;
- unsigned long flags;
-
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
- (priv->iw_mode == NL80211_IFTYPE_AP))
- start = IWL_STA_ID;
-
- if (is_broadcast_ether_addr(addr))
- return priv->hw_params.bcast_sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = start; i < priv->hw_params.max_stations; i++)
- if (priv->stations[i].used &&
- (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr))) {
- ret = i;
- goto out;
- }
-
- IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
- addr, priv->num_stations);
-
- out:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(iwl_find_station);
-
-int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- return IWL_AP_ID;
- } else {
- u8 *da = ieee80211_get_DA(hdr);
- return iwl_find_station(priv, da);
- }
-}
-EXPORT_SYMBOL(iwl_get_ra_sta_id);
-
/* priv->sta_lock must be held */
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
{
@@ -132,7 +87,7 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
sta_id);
break;
case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d \n",
+ IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
sta_id);
break;
default:
@@ -158,13 +113,6 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
-
- /*
- * Determine if we wanted to modify or add a station,
- * if adding a station succeeded we have some more initialization
- * to do when using station notification. TODO
- */
-
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
@@ -190,6 +138,10 @@ int iwl_send_add_sta(struct iwl_priv *priv,
.flags = flags,
.data = data,
};
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+ sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
if (flags & CMD_ASYNC)
cmd.callback = iwl_add_sta_callback;
@@ -263,18 +215,19 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
}
/**
- * iwl_add_station - Add station to tables in driver and device
+ * iwl_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
*/
-u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
- struct ieee80211_sta_ht_cap *ht_info)
+static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
+ bool is_ap,
+ struct ieee80211_sta_ht_cap *ht_info)
{
struct iwl_station_entry *station;
- unsigned long flags_spin;
int i;
- int sta_id = IWL_INVALID_STATION;
+ u8 sta_id = IWL_INVALID_STATION;
u16 rate;
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
if (is_ap)
sta_id = IWL_AP_ID;
else if (is_broadcast_ether_addr(addr))
@@ -292,20 +245,32 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
sta_id = i;
}
- /* These two conditions have the same outcome, but keep them separate
- since they have different meanings */
- if (unlikely(sta_id == IWL_INVALID_STATION)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IWL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
+ sta_id);
return sta_id;
}
- if (priv->stations[sta_id].used &&
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
!compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
return sta_id;
}
-
station = &priv->stations[sta_id];
station->used = IWL_STA_DRIVER_ACTIVE;
IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
@@ -319,10 +284,12 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
station->sta.sta.sta_id = sta_id;
station->sta.station_flags = 0;
- /* BCAST station and IBSS stations do not work in HT mode */
- if (sta_id != priv->hw_params.bcast_sta_id &&
- priv->iw_mode != NL80211_IFTYPE_ADHOC)
- iwl_set_ht_add_station(priv, sta_id, ht_info);
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL ht_info, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ iwl_set_ht_add_station(priv, sta_id, ht_info);
/* 3945 only */
rate = (priv->band == IEEE80211_BAND_5GHZ) ?
@@ -330,86 +297,221 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
/* Turn on both antennas for the station... */
station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+ return sta_id;
+
+}
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_add_station_common -
+ */
+int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
+ bool is_ap,
+ struct ieee80211_sta_ht_cap *ht_info,
+ u8 *sta_id_r)
+{
+ struct iwl_station_entry *station;
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ sta_id = iwl_prep_station(priv, addr, is_ap, ht_info);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+ addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
+ sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+ station = &priv->stations[sta_id];
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
/* Add station to device's station table */
- iwl_send_add_sta(priv, &station->sta, flags);
- return sta_id;
+ ret = iwl_send_add_sta(priv, &station->sta, CMD_SYNC);
+ if (ret) {
+ IWL_ERR(priv, "Adding station %pM failed.\n", station->sta.sta.addr);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+EXPORT_SYMBOL(iwl_add_station_common);
+
+static struct iwl_link_quality_cmd *iwl_sta_alloc_lq(struct iwl_priv *priv,
+ u8 sta_id)
+{
+ int i, r;
+ struct iwl_link_quality_cmd *link_cmd;
+ u32 rate_flags;
+
+ link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+ /* Set up the rate scaling to start at selected rate, fall back
+ * all the way down to 1M in IEEE order, and then spin on 1M */
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ r = IWL_RATE_6M_INDEX;
+ else
+ r = IWL_RATE_1M_INDEX;
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ rate_flags = 0;
+ if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
+ RATE_MCS_ANT_POS;
+
+ link_cmd->rs_table[i].rate_n_flags =
+ iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
+ r = iwl_get_prev_ieee_rate(r);
+ }
+
+ link_cmd->general_params.single_stream_ant_msk =
+ first_antenna(priv->hw_params.valid_tx_ant);
+
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant &
+ ~first_antenna(priv->hw_params.valid_tx_ant);
+ if (!link_cmd->general_params.dual_stream_ant_msk) {
+ link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant;
+ }
+
+ link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ link_cmd->sta_id = sta_id;
+
+ return link_cmd;
}
-EXPORT_SYMBOL(iwl_add_station);
-static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr)
+/*
+ * iwl_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
+ u8 *sta_id_r)
{
+ int ret;
+ u8 sta_id;
+ struct iwl_link_quality_cmd *link_cmd;
unsigned long flags;
- u8 sta_id = iwl_find_station(priv, addr);
- BUG_ON(sta_id == IWL_INVALID_STATION);
+ if (*sta_id_r)
+ *sta_id_r = IWL_INVALID_STATION;
- IWL_DEBUG_ASSOC(priv, "Removed STA from Ucode: %pM\n", addr);
+ ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].used |= IWL_STA_LOCAL;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
- /* Ucode must be active and driver must be non active */
- if (priv->stations[sta_id].used != IWL_STA_UCODE_ACTIVE)
- IWL_ERR(priv, "removed non active STA %d\n", sta_id);
+ if (init_rs) {
+ /* Set up default rate scaling table in device's station table */
+ link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n",
+ addr);
+ return -ENOMEM;
+ }
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+ ret = iwl_send_lq_cmd(priv, link_cmd, CMD_SYNC, true);
+ if (ret)
+ IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
- memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ }
+
+ return 0;
}
+EXPORT_SYMBOL(iwl_add_bssid_station);
-static void iwl_remove_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
+/**
+ * iwl_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * priv->sta_lock must be held
+ */
+static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
{
- struct iwl_rem_sta_cmd *rm_sta =
- (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
- const u8 *addr = rm_sta->addr;
+ /* Ucode must be active and driver must be non active */
+ if ((priv->stations[sta_id].used &
+ (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != IWL_STA_UCODE_ACTIVE)
+ IWL_ERR(priv, "removed non active STA %u\n", sta_id);
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
- pkt->hdr.flags);
- return;
- }
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
- switch (pkt->u.rem_sta.status) {
- case REM_STA_SUCCESS_MSK:
- iwl_sta_ucode_deactivate(priv, addr);
- break;
- default:
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
- break;
- }
+ memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+ IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
}
-static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
- u8 flags)
+static int iwl_send_remove_station(struct iwl_priv *priv,
+ struct iwl_station_entry *station)
{
struct iwl_rx_packet *pkt;
int ret;
+ unsigned long flags_spin;
struct iwl_rem_sta_cmd rm_sta_cmd;
struct iwl_host_cmd cmd = {
.id = REPLY_REMOVE_STA,
.len = sizeof(struct iwl_rem_sta_cmd),
- .flags = flags,
+ .flags = CMD_SYNC,
.data = &rm_sta_cmd,
};
memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
rm_sta_cmd.num_sta = 1;
- memcpy(&rm_sta_cmd.addr, addr , ETH_ALEN);
+ memcpy(&rm_sta_cmd.addr, &station->sta.sta.addr , ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
- if (flags & CMD_ASYNC)
- cmd.callback = iwl_remove_sta_callback;
- else
- cmd.flags |= CMD_WANT_SKB;
ret = iwl_send_cmd(priv, &cmd);
- if (ret || (flags & CMD_ASYNC))
+ if (ret)
return ret;
pkt = (struct iwl_rx_packet *)cmd.reply_page;
@@ -422,7 +524,9 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
if (!ret) {
switch (pkt->u.rem_sta.status) {
case REM_STA_SUCCESS_MSK:
- iwl_sta_ucode_deactivate(priv, addr);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ iwl_sta_ucode_deactivate(priv, station->sta.sta.sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
break;
default:
@@ -439,45 +543,48 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
/**
* iwl_remove_station - Remove driver's knowledge of station.
*/
-int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
{
- int sta_id = IWL_INVALID_STATION;
- int i, ret = -EINVAL;
+ struct iwl_station_entry *station;
unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
- if (is_ap)
- sta_id = IWL_AP_ID;
- else if (is_broadcast_ether_addr(addr))
- sta_id = priv->hw_params.bcast_sta_id;
- else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
- if (priv->stations[i].used &&
- !compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr)) {
- sta_id = i;
- break;
- }
+ IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
+ sta_id, addr);
- if (unlikely(sta_id == IWL_INVALID_STATION))
- goto out;
+ if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
- IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
- sta_id, addr);
+ spin_lock_irqsave(&priv->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- IWL_ERR(priv, "Removing %pM but non DRIVER active\n",
+ IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
addr);
- goto out;
+ goto out_err;
}
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_ERR(priv, "Removing %pM but non UCODE active\n",
+ IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
addr);
- goto out;
+ goto out_err;
}
+ if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+ kfree(priv->stations[sta_id].lq);
+ priv->stations[sta_id].lq = NULL;
+ }
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
@@ -485,47 +592,112 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
BUG_ON(priv->num_stations < 0);
+ station = &priv->stations[sta_id];
spin_unlock_irqrestore(&priv->sta_lock, flags);
- ret = iwl_send_remove_station(priv, addr, CMD_ASYNC);
- return ret;
-out:
+ return iwl_send_remove_station(priv, station);
+out_err:
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
+ return -EINVAL;
}
+EXPORT_SYMBOL_GPL(iwl_remove_station);
/**
- * iwl_clear_stations_table - Clear the driver's station table
+ * iwl_clear_ucode_stations - clear ucode station table bits
*
- * NOTE: This does not clear or otherwise alter the device's station table.
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
*/
-void iwl_clear_stations_table(struct iwl_priv *priv)
+void iwl_clear_ucode_stations(struct iwl_priv *priv)
{
- unsigned long flags;
int i;
+ unsigned long flags_spin;
+ bool cleared = false;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
- if (iwl_is_alive(priv) &&
- !test_bit(STATUS_EXIT_PENDING, &priv->status) &&
- iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL))
- IWL_ERR(priv, "Couldn't clear the station table\n");
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+ if (!cleared)
+ IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(iwl_clear_ucode_stations);
+
+/**
+ * iwl_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void iwl_restore_stations(struct iwl_priv *priv)
+{
+ struct iwl_station_entry *station;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
- priv->num_stations = 0;
- memset(priv->stations, 0, sizeof(priv->stations));
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
+ return;
+ }
- /* clean ucode key table bit map */
- priv->ucode_key_table = 0;
+ IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+ !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].sta.mode = 0;
+ priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
- /* keep track of static keys */
- for (i = 0; i < WEP_KEYS_MAX ; i++) {
- if (priv->wep_keys[i].key_size)
- set_bit(i, &priv->ucode_key_table);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ station = &priv->stations[i];
+ ret = iwl_send_add_sta(priv, &priv->stations[i].sta, CMD_SYNC);
+ if (ret) {
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ station->sta.sta.addr);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (station->lq)
+ iwl_send_lq_cmd(priv, station->lq, CMD_SYNC, true);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ }
}
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ if (!found)
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n");
+ else
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
}
-EXPORT_SYMBOL(iwl_clear_stations_table);
+EXPORT_SYMBOL(iwl_restore_stations);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
{
@@ -539,7 +711,7 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
-int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
+static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
{
int i, not_empty = 0;
u8 buff[sizeof(struct iwl_wep_cmd) +
@@ -549,9 +721,11 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
struct iwl_host_cmd cmd = {
.id = REPLY_WEPKEY,
.data = wep_cmd,
- .flags = CMD_ASYNC,
+ .flags = CMD_SYNC,
};
+ might_sleep();
+
memset(wep_cmd, 0, cmd_size +
(sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
@@ -581,33 +755,34 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
else
return 0;
}
-EXPORT_SYMBOL(iwl_send_static_wepkey_cmd);
+
+int iwl_restore_default_wep_keys(struct iwl_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->mutex));
+
+ return iwl_send_static_wepkey_cmd(priv, 0);
+}
+EXPORT_SYMBOL(iwl_restore_default_wep_keys);
int iwl_remove_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf)
{
int ret;
- unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ WARN_ON(!mutex_is_locked(&priv->mutex));
+
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
- if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
- IWL_ERR(priv, "index %d not used in uCode key table.\n",
- keyconf->keyidx);
-
- priv->default_wep_key--;
memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ /* but keys in device are clear anyway so return success */
return 0;
}
ret = iwl_send_static_wepkey_cmd(priv, 1);
IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
keyconf->keyidx, ret);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;
}
@@ -617,7 +792,8 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf)
{
int ret;
- unsigned long flags;
+
+ WARN_ON(!mutex_is_locked(&priv->mutex));
if (keyconf->keylen != WEP_KEY_LEN_128 &&
keyconf->keylen != WEP_KEY_LEN_64) {
@@ -629,13 +805,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
keyconf->hw_key_idx = HW_KEY_DEFAULT;
priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP;
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->default_wep_key++;
-
- if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table))
- IWL_ERR(priv, "index %d already used in uCode key table.\n",
- keyconf->keyidx);
-
priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key,
keyconf->keylen);
@@ -643,7 +812,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
ret = iwl_send_static_wepkey_cmd(priv, 0);
IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
keyconf->keylen, keyconf->keyidx, ret);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;
}
@@ -798,18 +966,23 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
void iwl_update_tkip_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
- const u8 *addr, u32 iv32, u16 *phase1key)
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
{
- u8 sta_id = IWL_INVALID_STATION;
+ u8 sta_id;
unsigned long flags;
int i;
- sta_id = iwl_find_station(priv, addr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
- addr);
- return;
- }
+ if (sta) {
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_MAC80211(priv, "leave - %pM not initialised.\n",
+ sta->addr);
+ return;
+ }
+ } else
+ sta_id = priv->hw_params.bcast_sta_id;
+
if (iwl_scan_cancel(priv)) {
/* cancel scan failed, just live w/ bad key and rely
@@ -885,7 +1058,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n");
+ IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
@@ -948,253 +1121,149 @@ static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
}
#endif
-int iwl_send_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq, u8 flags)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_TX_LINK_QUALITY_CMD,
- .len = sizeof(struct iwl_link_quality_cmd),
- .flags = flags,
- .data = lq,
- };
-
- if ((lq->sta_id == 0xFF) &&
- (priv->iw_mode == NL80211_IFTYPE_ADHOC))
- return -EINVAL;
-
- if (lq->sta_id == 0xFF)
- lq->sta_id = IWL_AP_ID;
-
- iwl_dump_lq_cmd(priv, lq);
-
- if (iwl_is_associated(priv) && priv->assoc_station_added)
- return iwl_send_cmd(priv, &cmd);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_send_lq_cmd);
-
/**
- * iwl_sta_init_lq - Initialize a station's hardware rate table
- *
- * The uCode's station table contains a table of fallback rates
- * for automatic fallback during transmission.
- *
- * NOTE: This sets up a default set of values. These will be replaced later
- * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
- * rc80211_simple.
+ * is_lq_table_valid() - Test one aspect of LQ cmd for validity
*
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- * which requires station table entry to exist).
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
*/
-static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap)
+static bool is_lq_table_valid(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
{
- int i, r;
- struct iwl_link_quality_cmd link_cmd = {
- .reserved1 = 0,
- };
- u32 rate_flags;
+ int i;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- /* Set up the rate scaling to start at selected rate, fall back
- * all the way down to 1M in IEEE order, and then spin on 1M */
- if (is_ap)
- r = IWL_RATE_54M_INDEX;
- else if (priv->band == IEEE80211_BAND_5GHZ)
- r = IWL_RATE_6M_INDEX;
- else
- r = IWL_RATE_1M_INDEX;
+ if (ht_conf->is_ht)
+ return true;
+ IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+ priv->active_rxon.channel);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- rate_flags = 0;
- if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
-
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
- RATE_MCS_ANT_POS;
-
- link_cmd.rs_table[i].rate_n_flags =
- iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
- r = iwl_get_prev_ieee_rate(r);
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
+ IWL_DEBUG_INFO(priv,
+ "index %d of LQ expects HT channel\n",
+ i);
+ return false;
+ }
}
-
- link_cmd.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
- link_cmd.general_params.dual_stream_ant_msk = 3;
- link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
- link_cmd.agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
- /* Update the rate scaling for control frame Tx to AP */
- link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
-
- iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
- sizeof(link_cmd), &link_cmd, NULL);
+ return true;
}
/**
- * iwl_rxon_add_station - add station into station table.
+ * iwl_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
*
- * there is only one AP station with id= IWL_AP_ID
- * NOTE: mutex must be held before calling this function
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
*/
-int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
+int iwl_send_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init)
{
- struct ieee80211_sta *sta;
- struct ieee80211_sta_ht_cap ht_config;
- struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
- u8 sta_id;
+ int ret = 0;
+ unsigned long flags_spin;
- /*
- * Set HT capabilities. It is ok to set this struct even if not using
- * HT config: the priv->current_ht_config.is_ht flag will just be false
- */
- rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, addr);
- if (sta) {
- memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
- cur_ht_config = &ht_config;
- }
- rcu_read_unlock();
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_TX_LINK_QUALITY_CMD,
+ .len = sizeof(struct iwl_link_quality_cmd),
+ .flags = flags,
+ .data = lq,
+ };
- /* Add station to device's station table */
- sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
- /* Set up default rate scaling table in device's station table */
- iwl_sta_init_lq(priv, addr, is_ap);
+ iwl_dump_lq_cmd(priv, lq);
+ BUG_ON(init && (cmd.flags & CMD_ASYNC));
- return sta_id;
+ if (is_lq_table_valid(priv, lq))
+ ret = iwl_send_cmd(priv, &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ return ret;
}
-EXPORT_SYMBOL(iwl_rxon_add_station);
+EXPORT_SYMBOL(iwl_send_lq_cmd);
/**
- * iwl_sta_init_bcast_lq - Initialize a bcast station's hardware rate table
+ * iwl_alloc_bcast_station - add broadcast station into driver's station table.
*
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- * which requires station table entry to exist).
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
*/
-static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
+int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq)
{
- int i, r;
- struct iwl_link_quality_cmd link_cmd = {
- .reserved1 = 0,
- };
- u32 rate_flags;
-
- /* Set up the rate scaling to start at selected rate, fall back
- * all the way down to 1M in IEEE order, and then spin on 1M */
- if (priv->band == IEEE80211_BAND_5GHZ)
- r = IWL_RATE_6M_INDEX;
- else
- r = IWL_RATE_1M_INDEX;
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- rate_flags = 0;
- if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
+ struct iwl_link_quality_cmd *link_cmd;
+ unsigned long flags;
+ u8 sta_id;
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
- RATE_MCS_ANT_POS;
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ sta_id = iwl_prep_station(priv, iwl_bcast_addr, false, NULL);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
- link_cmd.rs_table[i].rate_n_flags =
- iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
- r = iwl_get_prev_ieee_rate(r);
+ return -EINVAL;
}
- link_cmd.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
- link_cmd.general_params.dual_stream_ant_msk = 3;
- link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
- link_cmd.agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
- /* Update the rate scaling for control frame Tx to AP */
- link_cmd.sta_id = priv->hw_params.bcast_sta_id;
-
- iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
- sizeof(link_cmd), &link_cmd, NULL);
-}
-
+ priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used |= IWL_STA_BCAST;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
-/**
- * iwl_add_bcast_station - add broadcast station into station table.
- */
-void iwl_add_bcast_station(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
- iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+ if (init_lq) {
+ link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
- /* Set up default rate scaling table in device's station table */
- iwl_sta_init_bcast_lq(priv);
-}
-EXPORT_SYMBOL(iwl_add_bcast_station);
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ }
-/**
- * iwl3945_add_bcast_station - add broadcast station into station table.
- */
-void iwl3945_add_bcast_station(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
- iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+ return 0;
}
-EXPORT_SYMBOL(iwl3945_add_bcast_station);
+EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
-/**
- * iwl_get_sta_id - Find station's index within station table
- *
- * If new IBSS station, create new entry in station table
- */
-int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
+void iwl_dealloc_bcast_station(struct iwl_priv *priv)
{
- int sta_id;
- __le16 fc = hdr->frame_control;
-
- /* If this frame is broadcast or management, use broadcast station id */
- if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1))
- return priv->hw_params.bcast_sta_id;
-
- switch (priv->iw_mode) {
-
- /* If we are a client station in a BSS network, use the special
- * AP station entry (that's the only station we communicate with) */
- case NL80211_IFTYPE_STATION:
- return IWL_AP_ID;
-
- /* If we are an AP, then find the station, or use BCAST */
- case NL80211_IFTYPE_AP:
- sta_id = iwl_find_station(priv, hdr->addr1);
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
- return priv->hw_params.bcast_sta_id;
-
- /* If this frame is going out to an IBSS network, find the station,
- * or create a new station table entry */
- case NL80211_IFTYPE_ADHOC:
- sta_id = iwl_find_station(priv, hdr->addr1);
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
-
- /* Create new station table entry */
- sta_id = iwl_add_station(priv, hdr->addr1, false,
- CMD_ASYNC, NULL);
-
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
-
- IWL_DEBUG_DROP(priv, "Station %pM not in station map. "
- "Defaulting to broadcast...\n",
- hdr->addr1);
- iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
- return priv->hw_params.bcast_sta_id;
+ unsigned long flags;
+ int i;
- default:
- IWL_WARN(priv, "Unknown mode of operation: %d\n",
- priv->iw_mode);
- return priv->hw_params.bcast_sta_id;
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (!(priv->stations[i].used & IWL_STA_BCAST))
+ continue;
+
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ priv->num_stations--;
+ BUG_ON(priv->num_stations < 0);
+ kfree(priv->stations[i].lq);
+ priv->stations[i].lq = NULL;
}
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
}
-EXPORT_SYMBOL(iwl_get_sta_id);
+EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_station);
/**
* iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
@@ -1214,13 +1283,13 @@ void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
}
EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid);
-int iwl_sta_rx_agg_start(struct iwl_priv *priv,
- const u8 *addr, int tid, u16 ssn)
+int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn)
{
unsigned long flags;
int sta_id;
- sta_id = iwl_find_station(priv, addr);
+ sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION)
return -ENXIO;
@@ -1233,16 +1302,17 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv,
spin_unlock_irqrestore(&priv->sta_lock, flags);
return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
- CMD_ASYNC);
+ CMD_ASYNC);
}
EXPORT_SYMBOL(iwl_sta_rx_agg_start);
-int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
+int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid)
{
unsigned long flags;
int sta_id;
- sta_id = iwl_find_station(priv, addr);
+ sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
return -ENXIO;
@@ -1291,3 +1361,22 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
}
+EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count);
+
+int iwl_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
+ sta->addr);
+ ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr);
+ if (ret)
+ IWL_ERR(priv, "Error removing station %pM\n",
+ sta->addr);
+ return ret;
+}
+EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 2dc35fe..c2a453a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -29,44 +29,82 @@
#ifndef __iwl_sta_h__
#define __iwl_sta_h__
+#include "iwl-dev.h"
+
#define HW_KEY_DYNAMIC 0
#define HW_KEY_DEFAULT 1
-/**
- * iwl_find_station - Find station id for a given BSSID
- * @bssid: MAC address of station ID to find
- */
-u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid);
+#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
-int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty);
int iwl_remove_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key);
int iwl_set_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key);
+int iwl_restore_default_wep_keys(struct iwl_priv *priv);
int iwl_set_dynamic_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key, u8 sta_id);
int iwl_remove_dynamic_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key, u8 sta_id);
void iwl_update_tkip_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
- const u8 *addr, u32 iv32, u16 *phase1key);
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
-int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
-void iwl_add_bcast_station(struct iwl_priv *priv);
-void iwl3945_add_bcast_station(struct iwl_priv *priv);
-int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
-void iwl_clear_stations_table(struct iwl_priv *priv);
+void iwl_restore_stations(struct iwl_priv *priv);
+void iwl_clear_ucode_stations(struct iwl_priv *priv);
+int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq);
+void iwl_dealloc_bcast_station(struct iwl_priv *priv);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
-int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
-int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags);
-u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
- struct ieee80211_sta_ht_cap *ht_info);
+int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
+ u8 *sta_id_r);
+int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
+ bool is_ap,
+ struct ieee80211_sta_ht_cap *ht_info,
+ u8 *sta_id_r);
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr);
+int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
-int iwl_sta_rx_agg_start(struct iwl_priv *priv,
- const u8 *addr, int tid, u16 ssn);
-int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
+int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn);
+int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid);
void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
+
+/**
+ * iwl_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ memset(priv->stations, 0, sizeof(priv->stations));
+ priv->num_stations = 0;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static inline int iwl_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IWL_INVALID_STATION;
+
+ return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
+}
#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 8dd0c03..1ece2ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -38,47 +38,6 @@
#include "iwl-io.h"
#include "iwl-helpers.h"
-static const u16 default_tid_to_tx_fifo[] = {
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC0,
- IWL_TX_FIFO_AC0,
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_AC3
-};
-
-static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
- GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
/**
* iwl_txq_update_write_ptr - Send new write index to hardware
*/
@@ -310,6 +269,8 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
q->high_mark = 2;
q->write_ptr = q->read_ptr = 0;
+ q->last_read_ptr = 0;
+ q->repeat_same_read_ptr = 0;
return 0;
}
@@ -454,611 +415,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
}
EXPORT_SYMBOL(iwl_tx_queue_reset);
-/**
- * iwl_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == IWL_CMD_QUEUE_NUM)
- iwl_cmd_queue_free(priv);
- else
- iwl_tx_queue_free(priv, txq_id);
- }
- iwl_free_dma_ptr(priv, &priv->kw);
-
- iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
-
- /* free tx queue structure */
- iwl_free_txq_mem(priv);
-}
-EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
-
-/**
- * iwl_txq_ctx_alloc - allocate TX queue context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-int iwl_txq_ctx_alloc(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
-
- /* Free all tx/cmd queues and keep-warm buffer */
- iwl_hw_txq_ctx_free(priv);
-
- ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
- priv->hw_params.scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
- goto error_bc_tbls;
- }
- /* Alloc keep-warm buffer */
- ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(priv, "Keep Warm allocation failed\n");
- goto error_kw;
- }
-
- /* allocate tx queue structure */
- ret = iwl_alloc_txq_mem(priv);
- if (ret)
- goto error;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- priv->cfg->ops->lib->txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return ret;
-
- error:
- iwl_hw_txq_ctx_free(priv);
- iwl_free_dma_ptr(priv, &priv->kw);
- error_kw:
- iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
- error_bc_tbls:
- return ret;
-}
-
-void iwl_txq_ctx_reset(struct iwl_priv *priv)
-{
- int txq_id, slots_num;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- priv->cfg->ops->lib->txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
- }
-}
-
-/**
- * iwl_txq_ctx_stop - Stop all Tx DMA channels
- */
-void iwl_txq_ctx_stop(struct iwl_priv *priv)
-{
- int ch;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->cfg->ops->lib->txq_set_sched(priv, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_txq_ctx_stop);
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- u8 std_id)
-{
- __le16 fc = hdr->frame_control;
- __le32 tx_flags = tx_cmd->tx_flags;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
-
- if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
- tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-#define RTS_HCCA_RETRY_LIMIT 3
-#define RTS_DFAULT_RETRY_LIMIT 60
-
-static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- __le16 fc, int is_hcca)
-{
- u32 rate_flags;
- int rate_idx;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- u8 rate_plcp;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- /* Set retry limit on RTS packets */
- rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
- RTS_DFAULT_RETRY_LIMIT;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- /* DATA packets will use the uCode station table for rate/antenna
- * selection */
- if (ieee80211_is_data(fc)) {
- tx_cmd->initial_rate_index = 0;
- tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
- return;
- }
-
- /**
- * If the current TX rate stored in mac80211 has the MCS bit set, it's
- * not really a TX rate. Thus, we use the lowest supported rate for
- * this band. Also use the lowest supported rate if the stored rate
- * index is invalid.
- */
- rate_idx = info->control.rates[0].idx;
- if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
- (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
- rate_idx = rate_lowest_index(&priv->bands[info->band],
- info->control.sta);
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx += IWL_FIRST_OFDM_RATE;
- /* Get PLCP rate for tx_cmd->rate_n_flags */
- rate_plcp = iwl_rates[rate_idx].plcp;
- /* Zero out flags for this packet */
- rate_flags = 0;
-
- /* Set CCK flag as needed */
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
-
- /* Set up RTS and CTS flags for certain packets */
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
- tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
- }
- break;
- default:
- break;
- }
-
- /* Set up antennas */
- priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
- rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
-
- /* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
-}
-
-static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct ieee80211_key_conf *keyconf = info->control.hw_key;
-
- switch (keyconf->alg) {
- case ALG_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case ALG_TKIP:
- tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
- ieee80211_get_tkip_key(keyconf, skb_frag,
- IEEE80211_TKIP_P2_KEY, tx_cmd->key);
- IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
- break;
-
- case ALG_WEP:
- tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
- (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
-
- if (keyconf->keylen == WEP_KEY_LEN_128)
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
-
- memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", keyconf->keyidx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
- break;
- }
-}
-
-/*
- * start REPLY_TX command process
- */
-int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
- struct iwl_station_priv *sta_priv = NULL;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- struct iwl_tx_cmd *tx_cmd;
- int swq_id, txq_id;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, len_org, firstlen, secondlen;
- u16 seq_number = 0;
- __le16 fc;
- u8 hdr_len;
- u8 sta_id;
- u8 wait_write_ptr = 0;
- u8 tid = 0;
- u8 *qc = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- /* drop all non-injected data frame if we are not associated */
- if (ieee80211_is_data(fc) &&
- !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
- (!iwl_is_associated(priv) ||
- ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
- !priv->assoc_station_added)) {
- IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
- goto drop_unlock;
- }
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* Find (or create) index into station table for destination station */
- if (info->flags & IEEE80211_TX_CTL_INJECTED)
- sta_id = priv->hw_params.bcast_sta_id;
- else
- sta_id = iwl_get_sta_id(priv, hdr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop_unlock;
- }
-
- IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
- if (sta)
- sta_priv = (void *)sta->drv_priv;
-
- if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
- sta_priv->asleep) {
- WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
- /*
- * This sends an asynchronous command to the device,
- * but we can rely on it being processed before the
- * next frame is processed -- and the next frame to
- * this station is the one that will consume this
- * counter.
- * For now set the counter to just 1 since we do not
- * support uAPSD yet.
- */
- iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
- }
-
- txq_id = skb_get_queue_mapping(skb);
- if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (unlikely(tid >= MAX_TID_COUNT))
- goto drop_unlock;
- seq_number = priv->stations[sta_id].tid[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
- txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- }
- }
-
- txq = &priv->txq[txq_id];
- swq_id = txq->swq_id;
- q = &txq->q;
-
- if (unlikely(iwl_queue_space(q) < q->high_mark))
- goto drop_unlock;
-
- if (ieee80211_is_data_qos(fc))
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb[0] = skb;
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[q->write_ptr];
- out_meta = &txq->meta[q->write_ptr];
- tx_cmd = &out_cmd->cmd.tx;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- if (info->control.hw_key)
- iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
- iwl_dbg_log_tx_data_frame(priv, len, hdr);
-
- /* set is_hcca to 0; it probably will never be implemented */
- iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
-
- iwl_update_stats(priv, true, fc, len);
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
-
- len_org = len;
- firstlen = len = (len + 3) & ~3;
-
- if (len_org != len)
- len_org = 1;
- else
- len_org = 0;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (len_org)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev,
- &out_cmd->hdr, len,
- PCI_DMA_BIDIRECTIONAL);
- pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
- pci_unmap_len_set(out_meta, len, len);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, len, 1, 0);
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- if (qc)
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = len = skb->len - hdr_len;
- if (len) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- len, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, len,
- 0, 0);
- }
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- /* take back ownership of DMA buffer to enable update */
- pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
- len, PCI_DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
- /* Set up entry for this TFD in Tx byte-count array */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
-
- pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
- len, PCI_DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_dev_tx(priv,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &out_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
-
- /* avoid atomic ops if it isn't an associated client */
- if (sta_priv && sta_priv->client)
- atomic_inc(&sta_priv->pending_frames);
-
- if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- } else {
- iwl_stop_queue(priv, txq->swq_id);
- }
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
- return -1;
-}
-EXPORT_SYMBOL(iwl_tx_skb);
-
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
/**
@@ -1192,61 +548,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
return idx;
}
-static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_sta *sta;
- struct iwl_station_priv *sta_priv;
-
- sta = ieee80211_find_sta(priv->vif, hdr->addr1);
- if (sta) {
- sta_priv = (void *)sta->drv_priv;
- /* avoid atomic ops if this isn't a client */
- if (sta_priv->client &&
- atomic_dec_return(&sta_priv->pending_frames) == 0)
- ieee80211_sta_block_awake(priv->hw, sta, false);
- }
-
- ieee80211_tx_status_irqsafe(priv->hw, skb);
-}
-
-int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
- int nfreed = 0;
- struct ieee80211_hdr *hdr;
-
- if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- index, q->n_bd, q->write_ptr, q->read_ptr);
- return 0;
- }
-
- for (index = iwl_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
- iwl_tx_status(priv, tx_info->skb[0]);
-
- hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
- if (hdr && ieee80211_is_data_qos(hdr->frame_control))
- nfreed++;
- tx_info->skb[0] = NULL;
-
- if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
- priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
-
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
- return nfreed;
-}
-EXPORT_SYMBOL(iwl_tx_queue_reclaim);
-
-
/**
* iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
*
@@ -1340,7 +641,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
if (!(meta->flags & CMD_ASYNC)) {
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->hdr.cmd));
wake_up_interruptible(&priv->wait_command_queue);
}
@@ -1348,358 +649,37 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
}
EXPORT_SYMBOL(iwl_tx_cmd_complete);
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
- */
-static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
- return -1;
-}
-
-int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
-{
- int sta_id;
- int tx_fifo;
- int txq_id;
- int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
- tx_fifo = default_tid_to_tx_fifo[tid];
- else
- return -EINVAL;
-
- IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
- __func__, ra, tid);
-
- sta_id = iwl_find_station(priv, ra);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Start AGG on invalid station\n");
- return -ENXIO;
- }
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
- IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
- return -ENXIO;
- }
-
- txq_id = iwl_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
- sta_id, tid, *ssn);
- if (ret)
- return ret;
-
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
- } else {
- IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- return ret;
-}
-EXPORT_SYMBOL(iwl_tx_agg_start);
-
-int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
-{
- int tx_fifo_id, txq_id, sta_id, ssn = -1;
- struct iwl_tid_data *tid_data;
- int write_ptr, read_ptr;
- unsigned long flags;
-
- if (!ra) {
- IWL_ERR(priv, "ra = NULL\n");
- return -EINVAL;
- }
-
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
- tx_fifo_id = default_tid_to_tx_fifo[tid];
- else
- return -EINVAL;
-
- sta_id = iwl_find_station(priv, ra);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- if (priv->stations[sta_id].tid[tid].agg.state ==
- IWL_EMPTYING_HW_QUEUE_ADDBA) {
- IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
- return 0;
- }
-
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
- IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
-
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
- txq_id = tid_data->agg.txq_id;
- write_ptr = priv->txq[txq_id].q.write_ptr;
- read_ptr = priv->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
- priv->stations[sta_id].tid[tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- return 0;
- }
-
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
- spin_lock_irqsave(&priv->lock, flags);
- /*
- * the only reason this call can fail is queue number out of range,
- * which can happen if uCode is reloaded and all the station
- * information are lost. if it is outside the range, there is no need
- * to deactivate the uCode queue, just return "success" to allow
- * mac80211 to clean up it own data.
- */
- priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
- tx_fifo_id);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_tx_agg_stop);
-
-int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
-{
- struct iwl_queue *q = &priv->txq[txq_id].q;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
- struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- u16 ssn = SEQ_TO_SN(tid_data->seq_number);
- int tx_fifo = default_tid_to_tx_fifo[tid];
- IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
- priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
- ssn, tx_fifo);
- tid_data->agg.state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
- }
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_txq_check_empty);
-
-/**
- * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl_compressed_ba_resp *ba_resp)
-
-{
- int i, sh, ack;
- u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- u64 bitmap;
- int successes = 0;
- struct ieee80211_tx_info *info;
-
- if (unlikely(!agg->wait_for_ba)) {
- IWL_ERR(priv, "Received BA when not expected\n");
- return -EINVAL;
- }
-
- /* Mark that the expected block-ack response arrived */
- agg->wait_for_ba = 0;
- IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
-
- /* Calculate shift to align block-ack bits with our Tx window bits */
- sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
- if (sh < 0) /* tbw something is wrong with indices */
- sh += 0x100;
-
- /* don't use 64-bit values for now */
- bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
-
- if (agg->frame_count > (64 - sh)) {
- IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
- return -1;
- }
-
- /* check for success or failure according to the
- * transmitted bitmap and block-ack bitmap */
- bitmap &= agg->bitmap;
-
- /* For each frame attempted in aggregation,
- * update driver's record of tx frame's status. */
- for (i = 0; i < agg->frame_count ; i++) {
- ack = bitmap & (1ULL << i);
- successes += !!ack;
- IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
- ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
- agg->start_idx + i);
- }
-
- info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_map = successes;
- info->status.ampdu_ack_len = agg->frame_count;
- iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
-
- IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
-
- return 0;
-}
-
-/**
- * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
- *
- * Handles block-acknowledge notification from device, which reports success
- * of frames sent via aggregation.
- */
-void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_ht_agg *agg;
- int index;
- int sta_id;
- int tid;
-
- /* "flow" corresponds to Tx queue */
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-
- /* "ssn" is start of block-ack Tx window, corresponds to index
- * (in Tx queue's circular buffer) of first TFD/frame in window */
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
-
- if (scd_flow >= priv->hw_params.max_txq_num) {
- IWL_ERR(priv,
- "BUG_ON scd_flow is bigger than number of queues\n");
- return;
- }
-
- txq = &priv->txq[scd_flow];
- sta_id = ba_resp->sta_id;
- tid = ba_resp->tid;
- agg = &priv->stations[sta_id].tid[tid].agg;
-
- /* Find index just before block-ack window */
- index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
- /* TODO: Need to get this copy more safely - now good for debug */
-
- IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
- "sta_id = %d\n",
- agg->wait_for_ba,
- (u8 *) &ba_resp->sta_addr_lo32,
- ba_resp->sta_id);
- IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
- "%d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
- (unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
- IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
- agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- /* Update driver's record of ACK vs. not for each frame in window */
- iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
-
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
- /* calculate mac80211 ampdu sw queue to wake */
- int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- priv->mac80211_registered &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_wake_queue(priv, txq->swq_id);
-
- iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
- }
-}
-EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
-
#ifdef CONFIG_IWLWIFI_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
const char *iwl_get_tx_fail_reason(u32 status)
{
switch (status & TX_STATUS_MSK) {
case TX_STATUS_SUCCESS:
return "SUCCESS";
- TX_STATUS_ENTRY(SHORT_LIMIT);
- TX_STATUS_ENTRY(LONG_LIMIT);
- TX_STATUS_ENTRY(FIFO_UNDERRUN);
- TX_STATUS_ENTRY(MGMNT_ABORT);
- TX_STATUS_ENTRY(NEXT_FRAG);
- TX_STATUS_ENTRY(LIFE_EXPIRE);
- TX_STATUS_ENTRY(DEST_PS);
- TX_STATUS_ENTRY(ABORTED);
- TX_STATUS_ENTRY(BT_RETRY);
- TX_STATUS_ENTRY(STA_INVALID);
- TX_STATUS_ENTRY(FRAG_DROPPED);
- TX_STATUS_ENTRY(TID_DISABLE);
- TX_STATUS_ENTRY(FRAME_FLUSHED);
- TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
- TX_STATUS_ENTRY(TX_LOCKED);
- TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(BT_PRIO);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(FW_DROP);
+ TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP);
}
return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b74a56c..3e5bffb 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -352,11 +352,11 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
static void iwl3945_unset_hw_params(struct iwl_priv *priv)
{
- if (priv->shared_virt)
+ if (priv->_3945.shared_virt)
dma_free_coherent(&priv->pci_dev->dev,
sizeof(struct iwl3945_shared),
- priv->shared_virt,
- priv->shared_phys);
+ priv->_3945.shared_virt,
+ priv->_3945.shared_phys);
}
static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@ -505,24 +505,15 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif
- /* drop all non-injected data frame if we are not associated */
- if (ieee80211_is_data(fc) &&
- !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
- (!iwl_is_associated(priv) ||
- ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
- IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
- goto drop_unlock;
- }
-
spin_unlock_irqrestore(&priv->lock, flags);
hdr_len = ieee80211_hdrlen(fc);
- /* Find (or create) index into station table for destination station */
- if (info->flags & IEEE80211_TX_CTL_INJECTED)
+ /* Find index into station table for destination station */
+ if (!info->control.sta)
sta_id = priv->hw_params.bcast_sta_id;
else
- sta_id = iwl_get_sta_id(priv, hdr);
+ sta_id = iwl_sta_id(info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@@ -607,9 +598,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq->need_update = 0;
}
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
ieee80211_hdrlen(fc));
@@ -754,7 +745,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
if (iwl_is_associated(priv))
add_time =
iwl3945_usecs_to_beacons(
- le64_to_cpu(params->start_time) - priv->last_tsf,
+ le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
le16_to_cpu(priv->rxon_timing.beacon_interval));
memset(&spectrum, 0, sizeof(spectrum));
@@ -768,7 +759,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
if (iwl_is_associated(priv))
spectrum.start_time =
- iwl3945_add_beacon_time(priv->last_beacon_time,
+ iwl3945_add_beacon_time(priv->_3945.last_beacon_time,
add_time,
le16_to_cpu(priv->rxon_timing.beacon_interval));
else
@@ -857,7 +848,6 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
#endif
IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
- return;
}
static void iwl3945_bg_beacon_update(struct work_struct *work)
@@ -966,7 +956,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
* statistics request from the host as well as for the periodic
* statistics notifications (after received beacons) from the uCode.
*/
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
+ priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
iwl_setup_rx_scan_handlers(priv);
@@ -1613,9 +1603,6 @@ static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
return pos;
}
-/* For sanity check only. Actual size is determined by uCode, typ. 512 */
-#define IWL3945_MAX_EVENT_LOG_SIZE (512)
-
#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@@ -1642,16 +1629,16 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
- if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) {
+ if (capacity > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
- capacity, IWL3945_MAX_EVENT_LOG_SIZE);
- capacity = IWL3945_MAX_EVENT_LOG_SIZE;
+ capacity, priv->cfg->max_event_log_size);
+ capacity = priv->cfg->max_event_log_size;
}
- if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) {
+ if (next_entry > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
- next_entry, IWL3945_MAX_EVENT_LOG_SIZE);
- next_entry = IWL3945_MAX_EVENT_LOG_SIZE;
+ next_entry, priv->cfg->max_event_log_size);
+ next_entry = priv->cfg->max_event_log_size;
}
size = num_wraps ? capacity : next_entry;
@@ -1860,7 +1847,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
enum ieee80211_band band,
u8 is_active, u8 n_probes,
- struct iwl3945_scan_channel *scan_ch)
+ struct iwl3945_scan_channel *scan_ch,
+ struct ieee80211_vif *vif)
{
struct ieee80211_channel *chan;
const struct ieee80211_supported_band *sband;
@@ -1874,7 +1862,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
return 0;
active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
if (passive_dwell <= active_dwell)
passive_dwell = active_dwell + 1;
@@ -1947,7 +1935,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
added++;
}
- IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
+ IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
return added;
}
@@ -2122,6 +2110,28 @@ static void iwl3945_nic_start(struct iwl_priv *priv)
iwl_write32(priv, CSR_RESET, 0);
}
+#define IWL3945_UCODE_GET(item) \
+static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
+{ \
+ return le32_to_cpu(ucode->u.v1.item); \
+}
+
+static u32 iwl3945_ucode_get_header_size(u32 api_ver)
+{
+ return 24;
+}
+
+static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
+{
+ return (u8 *) ucode->u.v1.data;
+}
+
+IWL3945_UCODE_GET(inst_size);
+IWL3945_UCODE_GET(data_size);
+IWL3945_UCODE_GET(init_size);
+IWL3945_UCODE_GET(init_data_size);
+IWL3945_UCODE_GET(boot_size);
+
/**
* iwl3945_read_ucode - Read uCode images from disk file.
*
@@ -2170,7 +2180,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
goto error;
/* Make sure that we got at least our header! */
- if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) {
+ if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
IWL_ERR(priv, "File size way too small!\n");
ret = -EINVAL;
goto err_release;
@@ -2181,13 +2191,12 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
priv->ucode_ver = le32_to_cpu(ucode->ver);
api_ver = IWL_UCODE_API(priv->ucode_ver);
- inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver);
- data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver);
- init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver);
- init_data_size =
- priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver);
- boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver);
- src = priv->cfg->ops->ucode->get_data(ucode, api_ver);
+ inst_size = iwl3945_ucode_get_inst_size(ucode);
+ data_size = iwl3945_ucode_get_data_size(ucode);
+ init_size = iwl3945_ucode_get_init_size(ucode);
+ init_data_size = iwl3945_ucode_get_init_data_size(ucode);
+ boot_size = iwl3945_ucode_get_boot_size(ucode);
+ src = iwl3945_ucode_get_data(ucode);
/* api_ver should match the api version forming part of the
* firmware filename ... but we don't check for that and only rely
@@ -2236,7 +2245,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
/* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != priv->cfg->ops->ucode->get_header_size(api_ver) +
+ if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
inst_size + data_size + init_size +
init_data_size + boot_size) {
@@ -2490,8 +2499,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
goto restart;
}
- iwl_clear_stations_table(priv);
-
rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
@@ -2513,13 +2520,19 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
/* After the ALIVE response, we can send commands to 3945 uCode */
set_bit(STATUS_ALIVE, &priv->status);
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ /* Enable timer to monitor the driver queues */
+ mod_timer(&priv->monitor_recover,
+ jiffies +
+ msecs_to_jiffies(priv->cfg->monitor_recover_period));
+ }
+
if (iwl_is_rfkill(priv))
return;
ieee80211_wake_queues(priv->hw);
- priv->active_rate = priv->rates_mask;
- priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
+ priv->active_rate = IWL_RATES_MASK;
iwl_power_update_mode(priv, true);
@@ -2531,11 +2544,11 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
- iwl_connection_init_rx_config(priv, priv->iw_mode);
+ iwl_connection_init_rx_config(priv, NULL);
}
/* Configure Bluetooth device coexistence support */
- iwl_send_bt_config(priv);
+ priv->cfg->ops->hcmd->send_bt_config(priv);
/* Configure the adapter for unassociated operation */
iwlcore_commit_rxon(priv);
@@ -2548,17 +2561,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
set_bit(STATUS_READY, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
- /* reassociate for ADHOC mode */
- if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
- struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
- priv->vif);
- if (beacon)
- iwl_mac_beacon_update(priv->hw, beacon);
- }
-
- if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
- iwl_set_mode(priv, priv->iw_mode);
-
return;
restart:
@@ -2580,7 +2582,10 @@ static void __iwl3945_down(struct iwl_priv *priv)
if (!exit_pending)
set_bit(STATUS_EXIT_PENDING, &priv->status);
- iwl_clear_stations_table(priv);
+ /* Station information will now be cleared in device */
+ iwl_clear_ucode_stations(priv);
+ iwl_dealloc_bcast_station(priv);
+ iwl_clear_driver_stations(priv);
/* Unblock any waiting calls */
wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2661,6 +2666,10 @@ static int __iwl3945_up(struct iwl_priv *priv)
{
int rc, i;
+ rc = iwl_alloc_bcast_station(priv, false);
+ if (rc)
+ return rc;
+
if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
return -EIO;
@@ -2714,12 +2723,10 @@ static int __iwl3945_up(struct iwl_priv *priv)
for (i = 0; i < MAX_HW_RESTARTS; i++) {
- iwl_clear_stations_table(priv);
-
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
* prepare to load the "initialize" uCode */
- priv->cfg->ops->lib->load_ucode(priv);
+ rc = priv->cfg->ops->lib->load_ucode(priv);
if (rc) {
IWL_ERR(priv,
@@ -2787,7 +2794,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
static void iwl3945_rfkill_poll(struct work_struct *data)
{
struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rfkill_poll.work);
+ container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
& CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
@@ -2806,22 +2813,18 @@ static void iwl3945_rfkill_poll(struct work_struct *data)
/* Keep this running, even if radio now enabled. This will be
* cancelled in mac_start() if system decides to start again */
- queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
+ queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
round_jiffies_relative(2 * HZ));
}
-#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
-static void iwl3945_bg_request_scan(struct work_struct *data)
+void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, request_scan);
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_CMD,
.len = sizeof(struct iwl3945_scan_cmd),
.flags = CMD_SIZE_HUGE,
};
- int rc = 0;
struct iwl3945_scan_cmd *scan;
struct ieee80211_conf *conf = NULL;
u8 n_probes = 0;
@@ -2830,8 +2833,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
conf = ieee80211_get_hw_conf(priv->hw);
- mutex_lock(&priv->mutex);
-
cancel_delayed_work(&priv->scan_check);
if (!iwl_is_ready(priv)) {
@@ -2849,7 +2850,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests "
"Ignoring second request.\n");
- rc = -EIO;
goto done;
}
@@ -2875,20 +2875,15 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
goto done;
}
- if (!priv->scan_bands) {
- IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n");
- goto done;
- }
-
- if (!priv->scan) {
- priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan) {
- rc = -ENOMEM;
+ if (!priv->scan_cmd) {
+ priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan_cmd) {
+ IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
goto done;
}
}
- scan = priv->scan;
+ scan = priv->scan_cmd;
memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
@@ -2904,7 +2899,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
spin_lock_irqsave(&priv->lock, flags);
- interval = priv->beacon_int;
+ interval = vif ? vif->bss_conf.beacon_int : 0;
spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
@@ -2927,7 +2922,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
scan_suspend_time, interval);
}
- if (priv->scan_request->n_ssids) {
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
for (i = 0; i < priv->scan_request->n_ssids; i++) {
@@ -2955,12 +2952,14 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
/* flags + rate selection */
- if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
+ switch (priv->scan_band) {
+ case IEEE80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
scan->good_CRC_th = 0;
band = IEEE80211_BAND_2GHZ;
- } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
+ break;
+ case IEEE80211_BAND_5GHZ:
scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
/*
* If active scaning is requested but a certain channel
@@ -2970,27 +2969,32 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
IWL_GOOD_CRC_TH_DISABLED;
band = IEEE80211_BAND_5GHZ;
- } else {
- IWL_WARN(priv, "Invalid scan band count\n");
+ break;
+ default:
+ IWL_WARN(priv, "Invalid scan band\n");
goto done;
}
- scan->tx_cmd.len = cpu_to_le16(
+ if (!priv->is_internal_short_scan) {
+ scan->tx_cmd.len = cpu_to_le16(
iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
priv->scan_request->ie,
priv->scan_request->ie_len,
IWL_MAX_SCAN_SIZE - sizeof(*scan)));
-
+ } else {
+ scan->tx_cmd.len = cpu_to_le16(
+ iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan)));
+ }
/* select Rx antennas */
scan->flags |= iwl3945_get_antenna_flags(priv);
- if (iwl_is_monitor_mode(priv))
- scan->filter_flags = RXON_FILTER_PROMISC_MSK;
-
scan->channel_count =
iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
+ (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif);
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
@@ -3003,14 +3007,12 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
scan->len = cpu_to_le16(cmd.len);
set_bit(STATUS_SCAN_HW, &priv->status);
- rc = iwl_send_cmd_sync(priv, &cmd);
- if (rc)
+ if (iwl_send_cmd_sync(priv, &cmd))
goto done;
queue_delayed_work(priv->workqueue, &priv->scan_check,
IWL_SCAN_CHECK_WATCHDOG);
- mutex_unlock(&priv->mutex);
return;
done:
@@ -3024,7 +3026,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
/* inform mac80211 scan aborted */
queue_work(priv->workqueue, &priv->scan_completed);
- mutex_unlock(&priv->mutex);
}
static void iwl3945_bg_restart(struct work_struct *data)
@@ -3066,28 +3067,25 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
mutex_unlock(&priv->mutex);
}
-#define IWL_DELAY_NEXT_SCAN (HZ*2)
-
-void iwl3945_post_associate(struct iwl_priv *priv)
+void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
int rc = 0;
struct ieee80211_conf *conf = NULL;
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
+ if (!vif || !priv->is_open)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP) {
IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
return;
}
-
IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- priv->assoc_id, priv->active_rxon.bssid_addr);
+ vif->bss_conf.aid, priv->active_rxon.bssid_addr);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (!priv->vif || !priv->is_open)
- return;
-
iwl_scan_cancel_timeout(priv, 200);
conf = ieee80211_get_hw_conf(priv->hw);
@@ -3096,7 +3094,7 @@ void iwl3945_post_associate(struct iwl_priv *priv)
iwlcore_commit_rxon(priv);
memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing), &priv->rxon_timing);
if (rc)
@@ -3105,57 +3103,40 @@ void iwl3945_post_associate(struct iwl_priv *priv)
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
+ priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid);
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- priv->assoc_id, priv->beacon_int);
+ vif->bss_conf.aid, vif->bss_conf.beacon_int);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
}
iwlcore_commit_rxon(priv);
- switch (priv->iw_mode) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
break;
-
case NL80211_IFTYPE_ADHOC:
-
- priv->assoc_id = 1;
- iwl_add_station(priv, priv->bssid, 0, CMD_SYNC, NULL);
- iwl3945_sync_sta(priv, IWL_STA_ID,
- (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
- CMD_ASYNC);
- iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
iwl3945_send_beacon_cmd(priv);
-
break;
-
default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, priv->iw_mode);
+ IWL_ERR(priv, "%s Should not be called in %d mode\n",
+ __func__, vif->type);
break;
}
-
- iwl_activate_qos(priv, 0);
-
- /* we have just associated, don't start scan too early */
- priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
}
/*****************************************************************************
@@ -3214,7 +3195,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
/* ucode is running and will send rfkill notifications,
* no need to poll the killswitch state anymore */
- cancel_delayed_work(&priv->rfkill_poll);
+ cancel_delayed_work(&priv->_3945.rfkill_poll);
iwl_led_start(priv);
@@ -3255,7 +3236,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
flush_workqueue(priv->workqueue);
/* start polling the killswitch state again */
- queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
+ queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
round_jiffies_relative(2 * HZ));
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -3277,7 +3258,7 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-void iwl3945_config_ap(struct iwl_priv *priv)
+void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
int rc = 0;
@@ -3293,7 +3274,7 @@ void iwl3945_config_ap(struct iwl_priv *priv)
/* RXON Timing */
memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing),
&priv->rxon_timing);
@@ -3301,9 +3282,10 @@ void iwl3945_config_ap(struct iwl_priv *priv)
IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
"Attempting to continue.\n");
- /* FIXME: what should be the assoc_id for AP? */
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ priv->staging_rxon.assoc_id = 0;
+
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_PREAMBLE_MSK;
else
@@ -3311,22 +3293,21 @@ void iwl3945_config_ap(struct iwl_priv *priv)
~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability &
- WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
- iwl_add_station(priv, iwl_bcast_addr, 0, CMD_SYNC, NULL);
}
iwl3945_send_beacon_cmd(priv);
@@ -3341,7 +3322,6 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct iwl_priv *priv = hw->priv;
- const u8 *addr;
int ret = 0;
u8 sta_id = IWL_INVALID_STATION;
u8 static_key;
@@ -3353,21 +3333,24 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- addr = sta ? sta->addr : iwl_bcast_addr;
static_key = !iwl_is_associated(priv);
if (!static_key) {
- sta_id = iwl_find_station(priv, addr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
- addr);
- return -EINVAL;
+ if (!sta) {
+ sta_id = priv->hw_params.bcast_sta_id;
+ } else {
+ sta_id = iwl_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_MAC80211(priv,
+ "leave - %pM not in station map.\n",
+ sta->addr);
+ return -EINVAL;
+ }
}
}
mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
- mutex_unlock(&priv->mutex);
switch (cmd) {
case SET_KEY:
@@ -3388,11 +3371,45 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
}
+static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ u8 sta_id;
+
+ sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+ IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+ sta->addr);
+
+ ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
+ &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl3945_rs_rate_init(priv, sta, sta_id);
+
+ return 0;
+}
/*****************************************************************************
*
* sysfs attributes
@@ -3592,7 +3609,7 @@ static ssize_t store_measurement(struct device *d,
struct iwl_priv *priv = dev_get_drvdata(d);
struct ieee80211_measurement_params params = {
.channel = le16_to_cpu(priv->active_rxon.channel),
- .start_time = cpu_to_le64(priv->last_tsf),
+ .start_time = cpu_to_le64(priv->_3945.last_tsf),
.duration = cpu_to_le16(1),
};
u8 type = IWL_MEASURE_BASIC;
@@ -3656,44 +3673,6 @@ static ssize_t show_channels(struct device *d,
static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
-static ssize_t show_statistics(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- u32 size = sizeof(struct iwl3945_notif_statistics);
- u32 len = 0, ofs = 0;
- u8 *data = (u8 *)&priv->statistics_39;
- int rc = 0;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- mutex_lock(&priv->mutex);
- rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (rc) {
- len = sprintf(buf,
- "Error sending statistics request: 0x%08X\n", rc);
- return len;
- }
-
- while (size && (PAGE_SIZE - len)) {
- hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
- len = strlen(buf);
- if (PAGE_SIZE - len)
- buf[len++] = '\n';
-
- ofs += 16;
- size -= min(size, 16U);
- }
-
- return len;
-}
-
-static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
-
static ssize_t show_antenna(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3775,14 +3754,21 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
- INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll);
+ INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
- INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
+ INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
iwl3945_hw_setup_deferred_work(priv);
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ init_timer(&priv->monitor_recover);
+ priv->monitor_recover.data = (unsigned long)priv;
+ priv->monitor_recover.function =
+ priv->cfg->ops->lib->recover_from_tx_stall;
+ }
+
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl3945_irq_tasklet, (unsigned long)priv);
}
@@ -3794,7 +3780,10 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work_sync(&priv->init_alive_start);
cancel_delayed_work(&priv->scan_check);
cancel_delayed_work(&priv->alive_start);
+ cancel_work_sync(&priv->start_internal_scan);
cancel_work_sync(&priv->beacon_update);
+ if (priv->cfg->ops->lib->recover_from_tx_stall)
+ del_timer_sync(&priv->monitor_recover);
}
static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3805,7 +3794,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
&dev_attr_filter_flags.attr,
&dev_attr_measurement.attr,
&dev_attr_retry_rate.attr,
- &dev_attr_statistics.attr,
&dev_attr_status.attr,
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
@@ -3832,7 +3820,9 @@ static struct ieee80211_ops iwl3945_hw_ops = {
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
- .hw_scan = iwl_mac_hw_scan
+ .hw_scan = iwl_mac_hw_scan,
+ .sta_add = iwl3945_mac_sta_add,
+ .sta_remove = iwl_mac_sta_remove,
};
static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3851,9 +3841,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
mutex_init(&priv->mutex);
mutex_init(&priv->sync_cmd_mutex);
- /* Clear the driver's (not device's) station table */
- iwl_clear_stations_table(priv);
-
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
@@ -3861,12 +3848,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
- iwl_reset_qos(priv);
-
- priv->qos_data.qos_active = 0;
- priv->qos_data.qos_cap.val = 0;
-
- priv->rates_mask = IWL_RATES_MASK;
priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
@@ -3902,6 +3883,8 @@ err:
return ret;
}
+#define IWL3945_MAX_PROBE_REQUEST 200
+
static int iwl3945_setup_mac(struct iwl_priv *priv)
{
int ret;
@@ -3909,10 +3892,10 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->rate_control_algorithm = "iwl-3945-rs";
hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SPECTRUM_MGMT;
if (!priv->cfg->broken_powersave)
@@ -3928,7 +3911,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
/* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
+ hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
@@ -4131,7 +4114,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
/* Start monitoring the killswitch */
- queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
+ queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2 * HZ);
return 0;
@@ -4205,7 +4188,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- cancel_delayed_work_sync(&priv->rfkill_poll);
+ cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
iwl3945_dealloc_ucode_pci(priv);
@@ -4214,7 +4197,6 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
iwl3945_hw_txq_ctx_free(priv);
iwl3945_unset_hw_params(priv);
- iwl_clear_stations_table(priv);
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
@@ -4236,7 +4218,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
iwl_free_channel_map(priv);
iwlcore_free_geos(priv);
- kfree(priv->scan);
+ kfree(priv->scan_cmd);
if (priv->ibss_beacon)
dev_kfree_skb(priv->ibss_beacon);
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index b9d34a7..03f998d 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -17,7 +17,7 @@ config IWM
config IWM_DEBUG
bool "Enable full debugging output in iwmc3200wifi"
depends on IWM && DEBUG_FS
- ---help---
+ help
This option will enable debug tracing and setting for iwm
You can set the debug level and module through debugfs. By
@@ -30,3 +30,10 @@ config IWM_DEBUG
Or, if you want the full debug, for all modules:
echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level
echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules
+
+config IWM_TRACING
+ bool "Enable event tracing for iwmc3200wifi"
+ depends on IWM && EVENT_TRACING
+ help
+ Say Y here to trace all the commands and responses between
+ the driver and firmware (including TX/RX frames) with ftrace.
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
index d34291b..cdc7e07 100644
--- a/drivers/net/wireless/iwmc3200wifi/Makefile
+++ b/drivers/net/wireless/iwmc3200wifi/Makefile
@@ -3,3 +3,8 @@ iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o
iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
+iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o
+
+CFLAGS_trace.o := -I$(src)
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwmc3200wifi/bus.h b/drivers/net/wireless/iwmc3200wifi/bus.h
index 836663e..62edd58 100644
--- a/drivers/net/wireless/iwmc3200wifi/bus.h
+++ b/drivers/net/wireless/iwmc3200wifi/bus.h
@@ -31,7 +31,7 @@ struct iwm_if_ops {
int (*disable)(struct iwm_priv *iwm);
int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count);
- int (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
+ void (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
void (*debugfs_exit)(struct iwm_priv *iwm);
const char *umac_name;
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a1d45cc..902e95f 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -264,7 +264,7 @@ static int iwm_cfg80211_get_station(struct wiphy *wiphy,
int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
{
struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct iwm_bss_info *bss, *next;
+ struct iwm_bss_info *bss;
struct iwm_umac_notif_bss_info *umac_bss;
struct ieee80211_mgmt *mgmt;
struct ieee80211_channel *channel;
@@ -272,7 +272,7 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
s32 signal;
int freq;
- list_for_each_entry_safe(bss, next, &iwm->bss_list, node) {
+ list_for_each_entry(bss, &iwm->bss_list, node) {
umac_bss = bss->bss;
mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
@@ -726,23 +726,26 @@ static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
CFG_POWER_INDEX, iwm->conf.power_index);
}
-int iwm_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
+static int iwm_cfg80211_set_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
}
-int iwm_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
+static int iwm_cfg80211_del_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
}
-int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+static int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
struct cfg80211_pmksa pmksa;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 42df726..330c7d9 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -507,7 +507,7 @@ static int iwm_target_read(struct iwm_priv *iwm, __le32 address,
return ret;
}
- /* When succeding, the send_target routine returns the seq number */
+ /* When succeeding, the send_target routine returns the seq number */
seq_num = ret;
ret = wait_event_interruptible_timeout(iwm->nonwifi_queue,
@@ -782,10 +782,9 @@ int iwm_send_mlme_profile(struct iwm_priv *iwm)
return 0;
}
-int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
+int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
{
struct iwm_umac_invalidate_profile invalid;
- int ret;
invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE;
invalid.hdr.buf_size =
@@ -794,7 +793,14 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
invalid.reason = WLAN_REASON_UNSPECIFIED;
- ret = iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
+ return iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
+}
+
+int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
+{
+ int ret;
+
+ ret = __iwm_invalidate_mlme_profile(iwm);
if (ret)
return ret;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 3dfd9f0..7e16bcf 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -488,6 +488,7 @@ int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
void *payload, u16 payload_size);
int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags);
int iwm_send_mlme_profile(struct iwm_priv *iwm);
+int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
diff --git a/drivers/net/wireless/iwmc3200wifi/debug.h b/drivers/net/wireless/iwmc3200wifi/debug.h
index e35c9b6..a0c13a4 100644
--- a/drivers/net/wireless/iwmc3200wifi/debug.h
+++ b/drivers/net/wireless/iwmc3200wifi/debug.h
@@ -113,13 +113,10 @@ struct iwm_debugfs {
};
#ifdef CONFIG_IWM_DEBUG
-int iwm_debugfs_init(struct iwm_priv *iwm);
+void iwm_debugfs_init(struct iwm_priv *iwm);
void iwm_debugfs_exit(struct iwm_priv *iwm);
#else
-static inline int iwm_debugfs_init(struct iwm_priv *iwm)
-{
- return 0;
-}
+static inline void iwm_debugfs_init(struct iwm_priv *iwm) {}
static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {}
#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index cbb81be..53b0b77 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -48,12 +48,11 @@ static struct {
#define add_dbg_module(dbg, name, id, initlevel) \
do { \
- struct dentry *d; \
dbg.dbg_module[id] = (initlevel); \
- d = debugfs_create_x8(name, 0600, dbg.dbgdir, \
- &(dbg.dbg_module[id])); \
- if (!IS_ERR(d)) \
- dbg.dbg_module_dentries[id] = d; \
+ dbg.dbg_module_dentries[id] = \
+ debugfs_create_x8(name, 0600, \
+ dbg.dbgdir, \
+ &(dbg.dbg_module[id])); \
} while (0)
static int iwm_debugfs_u32_read(void *data, u64 *val)
@@ -266,7 +265,7 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
size_t count, loff_t *ppos)
{
struct iwm_priv *iwm = filp->private_data;
- struct iwm_rx_ticket_node *ticket, *next;
+ struct iwm_rx_ticket_node *ticket;
char *buf;
int buf_len = 4096, i;
size_t len = 0;
@@ -281,7 +280,8 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
if (!buf)
return -ENOMEM;
- list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
+ spin_lock(&iwm->ticket_lock);
+ list_for_each_entry(ticket, &iwm->rx_tickets, node) {
len += snprintf(buf + len, buf_len - len, "Ticket #%d\n",
ticket->ticket->id);
len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n",
@@ -289,14 +289,17 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n",
ticket->ticket->flags);
}
+ spin_unlock(&iwm->ticket_lock);
for (i = 0; i < IWM_RX_ID_HASH; i++) {
- struct iwm_rx_packet *packet, *nxt;
+ struct iwm_rx_packet *packet;
struct list_head *pkt_list = &iwm->rx_packets[i];
+
if (!list_empty(pkt_list)) {
len += snprintf(buf + len, buf_len - len,
"Packet hash #%d\n", i);
- list_for_each_entry_safe(packet, nxt, pkt_list, node) {
+ spin_lock(&iwm->packet_lock[i]);
+ list_for_each_entry(packet, pkt_list, node) {
len += snprintf(buf + len, buf_len - len,
"\tPacket id: %d\n",
packet->id);
@@ -304,6 +307,7 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
"\tPacket length: %lu\n",
packet->pkt_size);
}
+ spin_unlock(&iwm->packet_lock[i]);
}
}
@@ -418,89 +422,29 @@ static const struct file_operations iwm_debugfs_fw_err_fops = {
.read = iwm_debugfs_fw_err_read,
};
-int iwm_debugfs_init(struct iwm_priv *iwm)
+void iwm_debugfs_init(struct iwm_priv *iwm)
{
- int i, result;
- char devdir[16];
+ int i;
iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- result = PTR_ERR(iwm->dbg.rootdir);
- if (!result || IS_ERR(iwm->dbg.rootdir)) {
- if (result == -ENODEV) {
- IWM_ERR(iwm, "DebugFS (CONFIG_DEBUG_FS) not "
- "enabled in kernel config\n");
- result = 0; /* No debugfs support */
- }
- IWM_ERR(iwm, "Couldn't create rootdir: %d\n", result);
- goto error;
- }
-
- snprintf(devdir, sizeof(devdir), "%s", wiphy_name(iwm_to_wiphy(iwm)));
-
- iwm->dbg.devdir = debugfs_create_dir(devdir, iwm->dbg.rootdir);
- result = PTR_ERR(iwm->dbg.devdir);
- if (IS_ERR(iwm->dbg.devdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create devdir: %d\n", result);
- goto error;
- }
-
+ iwm->dbg.devdir = debugfs_create_dir(wiphy_name(iwm_to_wiphy(iwm)),
+ iwm->dbg.rootdir);
iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.dbgdir);
- if (IS_ERR(iwm->dbg.dbgdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create dbgdir: %d\n", result);
- goto error;
- }
-
iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.rxdir);
- if (IS_ERR(iwm->dbg.rxdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create rx dir: %d\n", result);
- goto error;
- }
-
iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.txdir);
- if (IS_ERR(iwm->dbg.txdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create tx dir: %d\n", result);
- goto error;
- }
-
iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.busdir);
- if (IS_ERR(iwm->dbg.busdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create bus dir: %d\n", result);
- goto error;
- }
-
- if (iwm->bus_ops->debugfs_init) {
- result = iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
- if (result < 0) {
- IWM_ERR(iwm, "Couldn't create bus entry: %d\n", result);
- goto error;
- }
- }
-
+ if (iwm->bus_ops->debugfs_init)
+ iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
iwm->dbg.dbg_level = IWM_DL_NONE;
iwm->dbg.dbg_level_dentry =
debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm,
&fops_iwm_dbg_level);
- result = PTR_ERR(iwm->dbg.dbg_level_dentry);
- if (IS_ERR(iwm->dbg.dbg_level_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create dbg_level: %d\n", result);
- goto error;
- }
-
iwm->dbg.dbg_modules = IWM_DM_DEFAULT;
iwm->dbg.dbg_modules_dentry =
debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm,
&fops_iwm_dbg_modules);
- result = PTR_ERR(iwm->dbg.dbg_modules_dentry);
- if (IS_ERR(iwm->dbg.dbg_modules_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create dbg_modules: %d\n", result);
- goto error;
- }
for (i = 0; i < __IWM_DM_NR; i++)
add_dbg_module(iwm->dbg, iwm_debug_module[i].name,
@@ -509,44 +453,15 @@ int iwm_debugfs_init(struct iwm_priv *iwm)
iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200,
iwm->dbg.txdir, iwm,
&iwm_debugfs_txq_fops);
- result = PTR_ERR(iwm->dbg.txq_dentry);
- if (IS_ERR(iwm->dbg.txq_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create tx queue: %d\n", result);
- goto error;
- }
-
iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200,
iwm->dbg.txdir, iwm,
&iwm_debugfs_tx_credit_fops);
- result = PTR_ERR(iwm->dbg.tx_credit_dentry);
- if (IS_ERR(iwm->dbg.tx_credit_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create tx credit: %d\n", result);
- goto error;
- }
-
iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200,
iwm->dbg.rxdir, iwm,
&iwm_debugfs_rx_ticket_fops);
- result = PTR_ERR(iwm->dbg.rx_ticket_dentry);
- if (IS_ERR(iwm->dbg.rx_ticket_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create rx ticket: %d\n", result);
- goto error;
- }
-
iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200,
iwm->dbg.dbgdir, iwm,
&iwm_debugfs_fw_err_fops);
- result = PTR_ERR(iwm->dbg.fw_err_dentry);
- if (IS_ERR(iwm->dbg.fw_err_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create last FW err: %d\n", result);
- goto error;
- }
-
-
- return 0;
-
- error:
- return result;
}
void iwm_debugfs_exit(struct iwm_priv *iwm)
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
index 229de99..9531b18 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -105,6 +105,7 @@
#include "hal.h"
#include "umac.h"
#include "debug.h"
+#include "trace.h"
static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm,
struct iwm_nonwifi_cmd *cmd,
@@ -207,9 +208,9 @@ void iwm_cmd_flush(struct iwm_priv *iwm)
struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
{
- struct iwm_wifi_cmd *cmd, *next;
+ struct iwm_wifi_cmd *cmd;
- list_for_each_entry_safe(cmd, next, &iwm->wifi_pending_cmd, pending)
+ list_for_each_entry(cmd, &iwm->wifi_pending_cmd, pending)
if (cmd->seq_num == seq_num) {
list_del(&cmd->pending);
return cmd;
@@ -218,12 +219,12 @@ struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
return NULL;
}
-struct iwm_nonwifi_cmd *
-iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm, u8 seq_num, u8 cmd_opcode)
+struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
+ u8 seq_num, u8 cmd_opcode)
{
- struct iwm_nonwifi_cmd *cmd, *next;
+ struct iwm_nonwifi_cmd *cmd;
- list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending)
+ list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
if ((cmd->seq_num == seq_num) &&
(cmd->udma_cmd.opcode == cmd_opcode) &&
(cmd->resp_received)) {
@@ -277,6 +278,7 @@ static int iwm_send_udma_nonwifi_cmd(struct iwm_priv *iwm,
udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr,
udma_cmd->op1_sz, udma_cmd->op2);
+ trace_iwm_tx_nonwifi_cmd(iwm, udma_hdr);
return iwm_bus_send_chunk(iwm, buf->start, buf->len);
}
@@ -363,6 +365,7 @@ static int iwm_send_udma_wifi_cmd(struct iwm_priv *iwm,
return ret;
}
+ trace_iwm_tx_wifi_cmd(iwm, umac_hdr);
return iwm_bus_send_chunk(iwm, buf->start, buf->len);
}
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.h b/drivers/net/wireless/iwmc3200wifi/hal.h
index 0adfdc8..c20936d 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.h
+++ b/drivers/net/wireless/iwmc3200wifi/hal.h
@@ -75,7 +75,8 @@ do { \
/* UDMA IN OP CODE -- cmd bits [3:0] */
-#define UDMA_IN_OPCODE_MASK 0xF
+#define UDMA_HDI_IN_NW_CMD_OPCODE_POS 0
+#define UDMA_HDI_IN_NW_CMD_OPCODE_SEED 0xF
#define UDMA_IN_OPCODE_GENERAL_RESP 0x0
#define UDMA_IN_OPCODE_READ_RESP 0x1
@@ -130,7 +131,7 @@ do { \
#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
IWM_MAX_WIFI_HEADERS_SIZE)
-#define IWM_HAL_CONCATENATE_BUF_SIZE 8192
+#define IWM_HAL_CONCATENATE_BUF_SIZE (32 * 1024)
struct iwm_wifi_cmd_buff {
u16 len;
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 79ffa3b..13266c3 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -48,6 +48,7 @@
#include "umac.h"
#include "lmac.h"
#include "eeprom.h"
+#include "trace.h"
#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation"
#define IWM_AUTHOR "<ilw@linux.intel.com>"
@@ -268,7 +269,9 @@ struct iwm_priv {
struct sk_buff_head rx_list;
struct list_head rx_tickets;
+ spinlock_t ticket_lock;
struct list_head rx_packets[IWM_RX_ID_HASH];
+ spinlock_t packet_lock[IWM_RX_ID_HASH];
struct workqueue_struct *rx_wq;
struct work_struct rx_worker;
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 23856d3..3620027 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -277,8 +277,11 @@ int iwm_priv_init(struct iwm_priv *iwm)
skb_queue_head_init(&iwm->rx_list);
INIT_LIST_HEAD(&iwm->rx_tickets);
- for (i = 0; i < IWM_RX_ID_HASH; i++)
+ spin_lock_init(&iwm->ticket_lock);
+ for (i = 0; i < IWM_RX_ID_HASH; i++) {
INIT_LIST_HEAD(&iwm->rx_packets[i]);
+ spin_lock_init(&iwm->packet_lock[i]);
+ }
INIT_WORK(&iwm->rx_worker, iwm_rx_worker);
@@ -424,9 +427,9 @@ int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd,
u8 source)
{
- struct iwm_notif *notif, *next;
+ struct iwm_notif *notif;
- list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) {
+ list_for_each_entry(notif, &iwm->pending_notif, pending) {
if ((notif->cmd_id == cmd) && (notif->src == source)) {
list_del(&notif->pending);
return notif;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 3257d4f..e1184de 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -343,15 +343,17 @@ static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node)
static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id)
{
u8 id_hash = IWM_RX_ID_GET_HASH(id);
- struct list_head *packet_list;
- struct iwm_rx_packet *packet, *next;
-
- packet_list = &iwm->rx_packets[id_hash];
+ struct iwm_rx_packet *packet;
- list_for_each_entry_safe(packet, next, packet_list, node)
- if (packet->id == id)
+ spin_lock(&iwm->packet_lock[id_hash]);
+ list_for_each_entry(packet, &iwm->rx_packets[id_hash], node)
+ if (packet->id == id) {
+ list_del(&packet->node);
+ spin_unlock(&iwm->packet_lock[id_hash]);
return packet;
+ }
+ spin_unlock(&iwm->packet_lock[id_hash]);
return NULL;
}
@@ -389,18 +391,22 @@ void iwm_rx_free(struct iwm_priv *iwm)
struct iwm_rx_packet *packet, *np;
int i;
+ spin_lock(&iwm->ticket_lock);
list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
list_del(&ticket->node);
iwm_rx_ticket_node_free(ticket);
}
+ spin_unlock(&iwm->ticket_lock);
for (i = 0; i < IWM_RX_ID_HASH; i++) {
+ spin_lock(&iwm->packet_lock[i]);
list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
node) {
list_del(&packet->node);
kfree_skb(packet->skb);
kfree(packet);
}
+ spin_unlock(&iwm->packet_lock[i]);
}
}
@@ -425,10 +431,13 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
return PTR_ERR(ticket_node);
IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
- ticket->action == IWM_RX_TICKET_RELEASE ?
+ __le16_to_cpu(ticket->action) ==
+ IWM_RX_TICKET_RELEASE ?
"RELEASE" : "DROP",
ticket->id);
+ spin_lock(&iwm->ticket_lock);
list_add_tail(&ticket_node->node, &iwm->rx_tickets);
+ spin_unlock(&iwm->ticket_lock);
/*
* We received an Rx ticket, most likely there's
@@ -461,6 +470,7 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
struct iwm_rx_packet *packet;
u16 id, buf_offset;
u32 packet_size;
+ u8 id_hash;
IWM_DBG_RX(iwm, DBG, "\n");
@@ -478,7 +488,10 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
if (IS_ERR(packet))
return PTR_ERR(packet);
- list_add_tail(&packet->node, &iwm->rx_packets[IWM_RX_ID_GET_HASH(id)]);
+ id_hash = IWM_RX_ID_GET_HASH(id);
+ spin_lock(&iwm->packet_lock[id_hash]);
+ list_add_tail(&packet->node, &iwm->rx_packets[id_hash]);
+ spin_unlock(&iwm->packet_lock[id_hash]);
/* We might (unlikely) have received the packet _after_ the ticket */
queue_work(iwm->rx_wq, &iwm->rx_worker);
@@ -519,6 +532,8 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
unsigned long buf_size,
struct iwm_wifi_cmd *cmd)
{
+ struct wiphy *wiphy = iwm_to_wiphy(iwm);
+ struct ieee80211_channel *chan;
struct iwm_umac_notif_assoc_complete *complete =
(struct iwm_umac_notif_assoc_complete *)buf;
@@ -527,6 +542,18 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
switch (le32_to_cpu(complete->status)) {
case UMAC_ASSOC_COMPLETE_SUCCESS:
+ chan = ieee80211_get_channel(wiphy,
+ ieee80211_channel_to_frequency(complete->channel));
+ if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
+ /* Associated to a unallowed channel, disassociate. */
+ __iwm_invalidate_mlme_profile(iwm);
+ IWM_WARN(iwm, "Couldn't associate with %pM due to "
+ "channel %d is disabled. Check your local "
+ "regulatory setting.\n",
+ complete->bssid, complete->channel);
+ goto failure;
+ }
+
set_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
memcpy(iwm->bssid, complete->bssid, ETH_ALEN);
iwm->channel = complete->channel;
@@ -563,6 +590,7 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
GFP_KERNEL);
break;
case UMAC_ASSOC_COMPLETE_FAILURE:
+ failure:
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
@@ -757,7 +785,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
(struct iwm_umac_notif_bss_info *)buf;
struct ieee80211_channel *channel;
struct ieee80211_supported_band *band;
- struct iwm_bss_info *bss, *next;
+ struct iwm_bss_info *bss;
s32 signal;
int freq;
u16 frame_len = le16_to_cpu(umac_bss->frame_len);
@@ -776,7 +804,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi);
IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len);
- list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
+ list_for_each_entry(bss, &iwm->bss_list, node)
if (bss->bss->table_idx == umac_bss->table_idx)
break;
@@ -843,16 +871,15 @@ static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf,
int i;
for (i = 0; i < le32_to_cpu(bss_rm->count); i++) {
- table_idx = (le16_to_cpu(bss_rm->entries[i])
- & IWM_BSS_REMOVE_INDEX_MSK);
+ table_idx = le16_to_cpu(bss_rm->entries[i]) &
+ IWM_BSS_REMOVE_INDEX_MSK;
list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
if (bss->bss->table_idx == cpu_to_le16(table_idx)) {
struct ieee80211_mgmt *mgmt;
mgmt = (struct ieee80211_mgmt *)
(bss->bss->frame_buf);
- IWM_DBG_MLME(iwm, ERR,
- "BSS removed: %pM\n",
+ IWM_DBG_MLME(iwm, ERR, "BSS removed: %pM\n",
mgmt->bssid);
list_del(&bss->node);
kfree(bss->bss);
@@ -1224,18 +1251,24 @@ static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
u8 source, cmd_id;
u16 seq_num;
u32 count;
- u8 resp;
wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
-
source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
if (source >= IWM_SRC_NUM) {
IWM_CRIT(iwm, "invalid source %d\n", source);
return -EINVAL;
}
- count = (GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT));
+ if (cmd_id == REPLY_RX_MPDU_CMD)
+ trace_iwm_rx_packet(iwm, buf, buf_size);
+ else if ((cmd_id == UMAC_NOTIFY_OPCODE_RX_TICKET) &&
+ (source == UMAC_HDI_IN_SOURCE_FW))
+ trace_iwm_rx_ticket(iwm, buf, buf_size);
+ else
+ trace_iwm_rx_wifi_cmd(iwm, wifi_hdr);
+
+ count = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
count += sizeof(struct iwm_umac_wifi_in_hdr) -
sizeof(struct iwm_dev_cmd_hdr);
if (count > buf_size) {
@@ -1243,8 +1276,6 @@ static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
return -EINVAL;
}
- resp = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_STATUS);
-
seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n",
@@ -1317,8 +1348,9 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
{
u8 seq_num;
struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf;
- struct iwm_nonwifi_cmd *cmd, *next;
+ struct iwm_nonwifi_cmd *cmd;
+ trace_iwm_rx_nonwifi_cmd(iwm, buf, buf_size);
seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
/*
@@ -1329,7 +1361,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
* That means we only support synchronised non wifi command response
* schemes.
*/
- list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending)
+ list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
if (cmd->seq_num == seq_num) {
cmd->resp_received = 1;
cmd->buf.len = buf_size;
@@ -1648,6 +1680,7 @@ void iwm_rx_worker(struct work_struct *work)
* We stop whenever a ticket is missing its packet, as we're
* supposed to send the packets in order.
*/
+ spin_lock(&iwm->ticket_lock);
list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
struct iwm_rx_packet *packet =
iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
@@ -1656,12 +1689,12 @@ void iwm_rx_worker(struct work_struct *work)
IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
"to be handled first\n",
le16_to_cpu(ticket->ticket->id));
- return;
+ break;
}
list_del(&ticket->node);
- list_del(&packet->node);
iwm_rx_process_packet(iwm, packet, ticket);
}
+ spin_unlock(&iwm->ticket_lock);
}
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 1eafd6d..edcb523 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -366,21 +366,13 @@ static const struct file_operations iwm_debugfs_sdio_fops = {
.read = iwm_debugfs_sdio_read,
};
-static int if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
+static void if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
{
- int result;
struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
hw->cccr_dentry = debugfs_create_file("cccr", 0200,
parent_dir, iwm,
&iwm_debugfs_sdio_fops);
- result = PTR_ERR(hw->cccr_dentry);
- if (IS_ERR(hw->cccr_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create CCCR entry: %d\n", result);
- return result;
- }
-
- return 0;
}
static void if_sdio_debugfs_exit(struct iwm_priv *iwm)
@@ -440,11 +432,7 @@ static int iwm_sdio_probe(struct sdio_func *func,
hw = iwm_private(iwm);
hw->iwm = iwm;
- ret = iwm_debugfs_init(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "Debugfs registration failed\n");
- goto if_free;
- }
+ iwm_debugfs_init(iwm);
sdio_set_drvdata(func, hw);
@@ -473,7 +461,6 @@ static int iwm_sdio_probe(struct sdio_func *func,
destroy_workqueue(hw->isr_wq);
debugfs_exit:
iwm_debugfs_exit(iwm);
- if_free:
iwm_if_free(iwm);
return ret;
}
@@ -492,8 +479,6 @@ static void iwm_sdio_remove(struct sdio_func *func)
sdio_set_drvdata(func, NULL);
dev_info(dev, "IWM SDIO remove\n");
-
- return;
}
static const struct sdio_device_id iwm_sdio_ids[] = {
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.c b/drivers/net/wireless/iwmc3200wifi/trace.c
new file mode 100644
index 0000000..904d36f
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/trace.c
@@ -0,0 +1,3 @@
+#include "iwm.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.h b/drivers/net/wireless/iwmc3200wifi/trace.h
new file mode 100644
index 0000000..abb4805
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/trace.h
@@ -0,0 +1,283 @@
+#if !defined(__IWM_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWM_TRACE_H__
+
+#include <linux/tracepoint.h>
+
+#if !defined(CONFIG_IWM_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwm
+
+#define IWM_ENTRY __array(char, ndev_name, 16)
+#define IWM_ASSIGN strlcpy(__entry->ndev_name, iwm_to_ndev(iwm)->name, 16)
+#define IWM_PR_FMT "%s"
+#define IWM_PR_ARG __entry->ndev_name
+
+TRACE_EVENT(iwm_tx_nonwifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, struct iwm_udma_out_nonwifi_hdr *hdr),
+
+ TP_ARGS(iwm, hdr),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, opcode)
+ __field(u8, resp)
+ __field(u8, eot)
+ __field(u8, hw)
+ __field(u16, seq)
+ __field(u32, addr)
+ __field(u32, op1)
+ __field(u32, op2)
+ ),
+
+ TP_fast_assign(
+ IWM_ASSIGN;
+ __entry->opcode = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE);
+ __entry->resp = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP);
+ __entry->eot = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT);
+ __entry->hw = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW);
+ __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM);
+ __entry->addr = le32_to_cpu(hdr->addr);
+ __entry->op1 = le32_to_cpu(hdr->op1_sz);
+ __entry->op2 = le32_to_cpu(hdr->op2);
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Tx TARGET CMD: opcode 0x%x, resp %d, eot %d, "
+ "hw %d, seq 0x%x, addr 0x%x, op1 0x%x, op2 0x%x",
+ IWM_PR_ARG, __entry->opcode, __entry->resp, __entry->eot,
+ __entry->hw, __entry->seq, __entry->addr, __entry->op1,
+ __entry->op2
+ )
+);
+
+TRACE_EVENT(iwm_tx_wifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_out_hdr *hdr),
+
+ TP_ARGS(iwm, hdr),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, opcode)
+ __field(u8, lmac)
+ __field(u8, resp)
+ __field(u8, eot)
+ __field(u8, ra_tid)
+ __field(u8, credit_group)
+ __field(u8, color)
+ __field(u16, seq)
+ ),
+
+ TP_fast_assign(
+ IWM_ASSIGN;
+ __entry->opcode = hdr->sw_hdr.cmd.cmd;
+ __entry->lmac = 0;
+ __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ __entry->resp = GET_VAL8(hdr->sw_hdr.cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ);
+ __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
+ __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
+ __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
+ __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
+ if (__entry->opcode == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH ||
+ __entry->opcode == UMAC_CMD_OPCODE_WIFI_IF_WRAPPER) {
+ __entry->lmac = 1;
+ __entry->opcode = ((struct iwm_lmac_hdr *)(hdr + 1))->id;
+ }
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Tx %cMAC CMD: opcode 0x%x, resp %d, eot %d, "
+ "seq 0x%x, sta_color 0x%x, ra_tid 0x%x, credit_group 0x%x",
+ IWM_PR_ARG, __entry->lmac ? 'L' : 'U', __entry->opcode,
+ __entry->resp, __entry->eot, __entry->seq, __entry->color,
+ __entry->ra_tid, __entry->credit_group
+ )
+);
+
+TRACE_EVENT(iwm_tx_packets,
+ TP_PROTO(struct iwm_priv *iwm, u8 *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, eot)
+ __field(u8, ra_tid)
+ __field(u8, credit_group)
+ __field(u8, color)
+ __field(u16, seq)
+ __field(u8, npkt)
+ __field(u32, bytes)
+ ),
+
+ TP_fast_assign(
+ struct iwm_umac_wifi_out_hdr *hdr =
+ (struct iwm_umac_wifi_out_hdr *)buf;
+
+ IWM_ASSIGN;
+ __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
+ __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
+ __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
+ __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
+ __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ __entry->npkt = 1;
+ __entry->bytes = len;
+
+ if (!__entry->eot) {
+ int count;
+ u8 *ptr = buf;
+
+ __entry->npkt = 0;
+ while (ptr < buf + len) {
+ count = GET_VAL32(hdr->sw_hdr.meta_data,
+ UMAC_FW_CMD_BYTE_COUNT);
+ ptr += ALIGN(sizeof(*hdr) + count, 16);
+ hdr = (struct iwm_umac_wifi_out_hdr *)ptr;
+ __entry->npkt++;
+ }
+ }
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Tx %spacket: eot %d, seq 0x%x, sta_color 0x%x, "
+ "ra_tid 0x%x, credit_group 0x%x, embeded_packets %d, %d bytes",
+ IWM_PR_ARG, !__entry->eot ? "concatenated " : "",
+ __entry->eot, __entry->seq, __entry->color, __entry->ra_tid,
+ __entry->credit_group, __entry->npkt, __entry->bytes
+ )
+);
+
+TRACE_EVENT(iwm_rx_nonwifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, opcode)
+ __field(u16, seq)
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(
+ struct iwm_udma_in_hdr *hdr = buf;
+
+ IWM_ASSIGN;
+ __entry->opcode = GET_VAL32(hdr->cmd, UDMA_HDI_IN_NW_CMD_OPCODE);
+ __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
+ __entry->len = len;
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx TARGET RESP: opcode 0x%x, seq 0x%x, len 0x%x",
+ IWM_PR_ARG, __entry->opcode, __entry->seq, __entry->len
+ )
+);
+
+TRACE_EVENT(iwm_rx_wifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_in_hdr *hdr),
+
+ TP_ARGS(iwm, hdr),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, cmd)
+ __field(u8, source)
+ __field(u16, seq)
+ __field(u32, count)
+ ),
+
+ TP_fast_assign(
+ IWM_ASSIGN;
+ __entry->cmd = hdr->sw_hdr.cmd.cmd;
+ __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
+ __entry->count = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
+ __entry->seq = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx %s RESP: cmd 0x%x, seq 0x%x, count 0x%x",
+ IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ? "LMAC" :
+ __entry->source == UMAC_HDI_IN_SOURCE_FW ? "UMAC" : "UDMA",
+ __entry->cmd, __entry->seq, __entry->count
+ )
+);
+
+#define iwm_ticket_action_symbol \
+ { IWM_RX_TICKET_DROP, "DROP" }, \
+ { IWM_RX_TICKET_RELEASE, "RELEASE" }, \
+ { IWM_RX_TICKET_SNIFFER, "SNIFFER" }, \
+ { IWM_RX_TICKET_ENQUEUE, "ENQUEUE" }
+
+TRACE_EVENT(iwm_rx_ticket,
+ TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, action)
+ __field(u8, reason)
+ __field(u16, id)
+ __field(u16, flags)
+ ),
+
+ TP_fast_assign(
+ struct iwm_rx_ticket *ticket =
+ ((struct iwm_umac_notif_rx_ticket *)buf)->tickets;
+
+ IWM_ASSIGN;
+ __entry->id = le16_to_cpu(ticket->id);
+ __entry->action = le16_to_cpu(ticket->action);
+ __entry->flags = le16_to_cpu(ticket->flags);
+ __entry->reason = (__entry->flags & IWM_RX_TICKET_DROP_REASON_MSK) >> IWM_RX_TICKET_DROP_REASON_POS;
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx ticket: id 0x%x, action %s, %s 0x%x%s",
+ IWM_PR_ARG, __entry->id,
+ __print_symbolic(__entry->action, iwm_ticket_action_symbol),
+ __entry->reason ? "reason" : "flags",
+ __entry->reason ? __entry->reason : __entry->flags,
+ __entry->flags & IWM_RX_TICKET_AMSDU_MSK ? ", AMSDU frame" : ""
+ )
+);
+
+TRACE_EVENT(iwm_rx_packet,
+ TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, source)
+ __field(u16, id)
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(
+ struct iwm_umac_wifi_in_hdr *hdr = buf;
+
+ IWM_ASSIGN;
+ __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
+ __entry->id = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ __entry->len = len - sizeof(*hdr);
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx %s packet: id 0x%x, %d bytes",
+ IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ?
+ "LMAC" : "UMAC", __entry->id, __entry->len
+ )
+);
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
index f6a02f1..3216621 100644
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -302,8 +302,8 @@ void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr)
-static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
- int pool_id, u8 *buf)
+static __le16 iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
+ int pool_id, u8 *buf)
{
struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
struct iwm_udma_wifi_cmd udma_cmd;
@@ -347,6 +347,7 @@ static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
/* mark EOP for the last packet */
iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
+ trace_iwm_tx_packets(iwm, txq->concat_buf, txq->concat_count);
ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
txq->concat_count = 0;
@@ -451,7 +452,6 @@ void iwm_tx_worker(struct work_struct *work)
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct iwm_priv *iwm = ndev_to_iwm(netdev);
- struct net_device *ndev = iwm_to_ndev(iwm);
struct wireless_dev *wdev = iwm_to_wdev(iwm);
struct iwm_tx_info *tx_info;
struct iwm_tx_queue *txq;
@@ -518,12 +518,12 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
drop:
- ndev->stats.tx_dropped++;
+ netdev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index 7f54a14..0cbba3e 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -362,7 +362,7 @@ struct iwm_udma_out_wifi_hdr {
#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4
#define IWM_RX_TICKET_AMSDU_MSK 0x8
#define IWM_RX_TICKET_DROP_REASON_POS 4
-#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << RX_TICKET_FLAGS_DROP_REASON_POS)
+#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << IWM_RX_TICKET_DROP_REASON_POS)
#define IWM_RX_DROP_NO_DROP 0x0
#define IWM_RX_DROP_BAD_CRC 0x1
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 12a2ef9..aa06070e 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -32,6 +32,9 @@ u8 lbs_bg_rates[MAX_RATES] =
0x00, 0x00 };
+static int assoc_helper_wep_keys(struct lbs_private *priv,
+ struct assoc_request *assoc_req);
+
/**
* @brief This function finds common rates between rates and card rates.
*
@@ -611,7 +614,7 @@ static int lbs_assoc_post(struct lbs_private *priv,
if (status_code) {
lbs_mac_event_disconnected(priv);
- ret = -1;
+ ret = status_code;
goto done;
}
@@ -814,7 +817,24 @@ static int lbs_try_associate(struct lbs_private *priv,
goto out;
ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE);
+ /* If the association fails with current auth mode, let's
+ * try by changing the auth mode
+ */
+ if ((priv->authtype_auto) &&
+ (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) &&
+ (assoc_req->secinfo.wep_enabled) &&
+ (priv->connect_status != LBS_CONNECTED)) {
+ if (priv->secinfo.auth_mode == IW_AUTH_ALG_OPEN_SYSTEM)
+ priv->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
+ else
+ priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
+ if (!assoc_helper_wep_keys(priv, assoc_req))
+ ret = lbs_associate(priv, assoc_req,
+ CMD_802_11_ASSOCIATE);
+ }
+ if (ret)
+ ret = -1;
out:
lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
return ret;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index ce7bec4..9d5d3cc 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -79,6 +79,7 @@ static const u32 cipher_suites[] = {
static int lbs_cfg_set_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index a48ccaf..de2caac 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -75,7 +75,7 @@ static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
return -ENOMEM;
pos += snprintf(buf+pos, len-pos,
- "# | ch | rssi | bssid | cap | Qual | SSID \n");
+ "# | ch | rssi | bssid | cap | Qual | SSID\n");
mutex_lock(&priv->lock);
list_for_each_entry (iter_bss, &priv->network_list, list) {
@@ -757,15 +757,12 @@ void lbs_debugfs_init(void)
{
if (!lbs_dir)
lbs_dir = debugfs_create_dir("lbs_wireless", NULL);
-
- return;
}
void lbs_debugfs_remove(void)
{
if (lbs_dir)
debugfs_remove(lbs_dir);
- return;
}
void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 6875e14..a54880e 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -134,6 +134,7 @@ struct lbs_private {
u8 wpa_ie_len;
u16 wep_tx_keyidx;
struct enc_key wep_keys[4];
+ u8 authtype_auto;
/* Wake On LAN */
uint32_t wol_criteria;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 7d1a3c6..64dd345 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -35,6 +35,8 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/host.h>
#include "host.h"
#include "decl.h"
@@ -313,12 +315,30 @@ out:
return ret;
}
+static int if_sdio_wait_status(struct if_sdio_card *card, const u8 condition)
+{
+ u8 status;
+ unsigned long timeout;
+ int ret = 0;
+
+ timeout = jiffies + HZ;
+ while (1) {
+ status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
+ if (ret)
+ return ret;
+ if ((status & condition) == condition)
+ break;
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ mdelay(1);
+ }
+ return ret;
+}
+
static int if_sdio_card_to_host(struct if_sdio_card *card)
{
int ret;
- u8 status;
u16 size, type, chunk;
- unsigned long timeout;
lbs_deb_enter(LBS_DEB_SDIO);
@@ -333,19 +353,9 @@ static int if_sdio_card_to_host(struct if_sdio_card *card)
goto out;
}
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto out;
- if (status & IF_SDIO_IO_RDY)
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto out;
- }
- mdelay(1);
- }
+ ret = if_sdio_wait_status(card, IF_SDIO_IO_RDY);
+ if (ret)
+ goto out;
/*
* The transfer must be in one transaction or the firmware
@@ -412,8 +422,6 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
{
struct if_sdio_card *card;
struct if_sdio_packet *packet;
- unsigned long timeout;
- u8 status;
int ret;
unsigned long flags;
@@ -433,25 +441,15 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
sdio_claim_host(card->func);
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto release;
- if (status & IF_SDIO_IO_RDY)
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto release;
- }
- mdelay(1);
+ ret = if_sdio_wait_status(card, IF_SDIO_IO_RDY);
+ if (ret == 0) {
+ ret = sdio_writesb(card->func, card->ioport,
+ packet->buffer, packet->nb);
}
- ret = sdio_writesb(card->func, card->ioport,
- packet->buffer, packet->nb);
if (ret)
- goto release;
-release:
+ lbs_pr_err("error %d sending packet to firmware\n", ret);
+
sdio_release_host(card->func);
kfree(packet);
@@ -464,10 +462,11 @@ release:
/* Firmware */
/********************************************************************/
+#define FW_DL_READY_STATUS (IF_SDIO_IO_RDY | IF_SDIO_DL_RDY)
+
static int if_sdio_prog_helper(struct if_sdio_card *card)
{
int ret;
- u8 status;
const struct firmware *fw;
unsigned long timeout;
u8 *chunk_buffer;
@@ -499,20 +498,14 @@ static int if_sdio_prog_helper(struct if_sdio_card *card)
size = fw->size;
while (size) {
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto release;
- if ((status & IF_SDIO_IO_RDY) &&
- (status & IF_SDIO_DL_RDY))
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto release;
- }
- mdelay(1);
- }
+ ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
+ if (ret)
+ goto release;
+
+ /* On some platforms (like Davinci) the chip needs more time
+ * between helper blocks.
+ */
+ mdelay(2);
chunk_size = min(size, (size_t)60);
@@ -582,7 +575,6 @@ out:
static int if_sdio_prog_real(struct if_sdio_card *card)
{
int ret;
- u8 status;
const struct firmware *fw;
unsigned long timeout;
u8 *chunk_buffer;
@@ -614,20 +606,9 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
size = fw->size;
while (size) {
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto release;
- if ((status & IF_SDIO_IO_RDY) &&
- (status & IF_SDIO_DL_RDY))
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto release;
- }
- mdelay(1);
- }
+ ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
+ if (ret)
+ goto release;
req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret);
if (ret)
@@ -943,6 +924,7 @@ static int if_sdio_probe(struct sdio_func *func,
int ret, i;
unsigned int model;
struct if_sdio_packet *packet;
+ struct mmc_host *host = func->card->host;
lbs_deb_enter(LBS_DEB_SDIO);
@@ -1023,6 +1005,25 @@ static int if_sdio_probe(struct sdio_func *func,
if (ret)
goto disable;
+ /* For 1-bit transfers to the 8686 model, we need to enable the
+ * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
+ * bit to allow access to non-vendor registers. */
+ if ((card->model == IF_SDIO_MODEL_8686) &&
+ (host->caps & MMC_CAP_SDIO_IRQ) &&
+ (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
+ u8 reg;
+
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+ reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
+ if (ret)
+ goto release_int;
+
+ reg |= SDIO_BUS_ECSI;
+ sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
+ if (ret)
+ goto release_int;
+ }
+
card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
if (ret)
goto release_int;
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index fcea574..f41594c 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -133,8 +133,6 @@ static void if_usb_write_bulk_callback(struct urb *urb)
/* print the failure status number for debug */
lbs_pr_info("URB in failure status: %d\n", urb->status);
}
-
- return;
}
/**
@@ -651,8 +649,6 @@ static void if_usb_receive_fwload(struct urb *urb)
if_usb_submit_rx_urb_fwload(cardp);
kfree(syncfwheader);
-
- return;
}
#define MRVDRV_MIN_PKT_LEN 30
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 5980804..d9b8ee1 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -229,7 +229,7 @@ static void lbs_tx_timeout(struct net_device *dev)
lbs_pr_err("tx watch dog timeout\n");
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
if (priv->currenttxskb)
lbs_send_tx_feedback(priv, 0);
@@ -319,7 +319,7 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
struct net_device *dev, int nr_addrs)
{
int i = nr_addrs;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
int cnt;
if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
@@ -327,19 +327,19 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
netif_addr_lock_bh(dev);
cnt = netdev_mc_count(dev);
- netdev_for_each_mc_addr(mc_list, dev) {
- if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mac_in_list(cmd->maclist, nr_addrs, ha->addr)) {
lbs_deb_net("mcast address %s:%pM skipped\n", dev->name,
- mc_list->dmi_addr);
+ ha->addr);
cnt--;
continue;
}
if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE)
break;
- memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN);
+ memcpy(&cmd->maclist[6*i], ha->addr, ETH_ALEN);
lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name,
- mc_list->dmi_addr);
+ ha->addr);
i++;
cnt--;
}
@@ -836,6 +836,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
priv->is_auto_deep_sleep_enabled = 0;
priv->wakeup_dev_required = 0;
init_waitqueue_head(&priv->ds_awake_q);
+ priv->authtype_auto = 1;
mutex_init(&priv->lock);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 784dae7..a115bfa 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -39,10 +39,10 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
struct sk_buff *skb);
/**
- * @brief This function computes the avgSNR .
+ * @brief This function computes the avgSNR .
*
- * @param priv A pointer to struct lbs_private structure
- * @return avgSNR
+ * @param priv A pointer to struct lbs_private structure
+ * @return avgSNR
*/
static u8 lbs_getavgsnr(struct lbs_private *priv)
{
@@ -57,10 +57,10 @@ static u8 lbs_getavgsnr(struct lbs_private *priv)
}
/**
- * @brief This function computes the AvgNF
+ * @brief This function computes the AvgNF
*
- * @param priv A pointer to struct lbs_private structure
- * @return AvgNF
+ * @param priv A pointer to struct lbs_private structure
+ * @return AvgNF
*/
static u8 lbs_getavgnf(struct lbs_private *priv)
{
@@ -75,11 +75,11 @@ static u8 lbs_getavgnf(struct lbs_private *priv)
}
/**
- * @brief This function save the raw SNR/NF to our internel buffer
+ * @brief This function save the raw SNR/NF to our internel buffer
*
- * @param priv A pointer to struct lbs_private structure
- * @param prxpd A pointer to rxpd structure of received packet
- * @return n/a
+ * @param priv A pointer to struct lbs_private structure
+ * @param prxpd A pointer to rxpd structure of received packet
+ * @return n/a
*/
static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
{
@@ -90,15 +90,14 @@ static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
priv->nextSNRNF++;
if (priv->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR)
priv->nextSNRNF = 0;
- return;
}
/**
- * @brief This function computes the RSSI in received packet.
+ * @brief This function computes the RSSI in received packet.
*
- * @param priv A pointer to struct lbs_private structure
- * @param prxpd A pointer to rxpd structure of received packet
- * @return n/a
+ * @param priv A pointer to struct lbs_private structure
+ * @param prxpd A pointer to rxpd structure of received packet
+ * @return n/a
*/
static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
{
@@ -135,9 +134,9 @@ static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
* @brief This function processes received packet and forwards it
* to kernel/upper layer
*
- * @param priv A pointer to struct lbs_private
- * @param skb A pointer to skb which includes the received packet
- * @return 0 or -1
+ * @param priv A pointer to struct lbs_private
+ * @param skb A pointer to skb which includes the received packet
+ * @return 0 or -1
*/
int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
{
@@ -197,7 +196,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
* before the snap_type.
*/
p_ethhdr = (struct ethhdr *)
- ((u8 *) & p_rx_pkt->eth803_hdr
+ ((u8 *) &p_rx_pkt->eth803_hdr
+ sizeof(p_rx_pkt->eth803_hdr) + sizeof(p_rx_pkt->rfc1042_hdr)
- sizeof(p_rx_pkt->eth803_hdr.dest_addr)
- sizeof(p_rx_pkt->eth803_hdr.src_addr)
@@ -214,7 +213,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
hdrchop = (u8 *)p_ethhdr - (u8 *)p_rx_pd;
} else {
lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP",
- (u8 *) & p_rx_pkt->rfc1042_hdr,
+ (u8 *) &p_rx_pkt->rfc1042_hdr,
sizeof(p_rx_pkt->rfc1042_hdr));
/* Chop off the rxpd */
@@ -255,8 +254,8 @@ EXPORT_SYMBOL_GPL(lbs_process_rxed_packet);
* @brief This function converts Tx/Rx rates from the Marvell WLAN format
* (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s)
*
- * @param rate Input rate
- * @return Output Rate (0 if invalid)
+ * @param rate Input rate
+ * @return Output Rate (0 if invalid)
*/
static u8 convert_mv_rate_to_radiotap(u8 rate)
{
@@ -295,9 +294,9 @@ static u8 convert_mv_rate_to_radiotap(u8 rate)
* @brief This function processes a received 802.11 packet and forwards it
* to kernel/upper layer
*
- * @param priv A pointer to struct lbs_private
- * @param skb A pointer to skb which includes the received packet
- * @return 0 or -1
+ * @param priv A pointer to struct lbs_private
+ * @param skb A pointer to skb which includes the received packet
+ * @return 0 or -1
*/
static int process_rxed_802_11_packet(struct lbs_private *priv,
struct sk_buff *skb)
@@ -314,7 +313,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
p_rx_pkt = (struct rx80211packethdr *) skb->data;
prxpd = &p_rx_pkt->rx_pd;
- // lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100));
+ /* lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); */
if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
lbs_deb_rx("rx err: frame received with bad length\n");
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 52d244e..a9bf658 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -147,8 +147,6 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
-
if (priv->monitormode) {
/* Keep the skb to echo it back once Tx feedback is
received from FW */
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 9b55588..f96a960 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -1441,8 +1441,10 @@ static int lbs_set_encode(struct net_device *dev,
set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
if (dwrq->flags & IW_ENCODE_RESTRICTED) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
} else if (dwrq->flags & IW_ENCODE_OPEN) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
}
@@ -1621,8 +1623,10 @@ static int lbs_set_encodeext(struct net_device *dev,
goto out;
if (dwrq->flags & IW_ENCODE_RESTRICTED) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
} else if (dwrq->flags & IW_ENCODE_OPEN) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
}
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index b620daf..8945afd 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -7,6 +7,8 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include "libertas_tf.h"
@@ -82,6 +84,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
int ret = -1;
u32 i;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN);
@@ -104,6 +108,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff,
priv->fwcapinfo);
+ lbtf_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
+ cmd.hwifversion, cmd.version);
/* Clamp region code to 8-bit since FW spec indicates that it should
* only ever be 8-bit, even though the field size is 16-bit. Some
@@ -118,8 +124,10 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
}
/* if it's unidentified region code, use the default (USA) */
- if (i >= MRVDRV_MAX_REGION_CODE)
+ if (i >= MRVDRV_MAX_REGION_CODE) {
priv->regioncode = 0x10;
+ pr_info("unidentified region code; using the default (USA)\n");
+ }
if (priv->current_addr[0] == 0xff)
memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
@@ -128,6 +136,7 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
lbtf_geo_init(priv);
out:
+ lbtf_deb_leave(LBTF_DEB_CMD);
return ret;
}
@@ -141,13 +150,18 @@ out:
*/
int lbtf_set_channel(struct lbtf_private *priv, u8 channel)
{
+ int ret = 0;
struct cmd_ds_802_11_rf_channel cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET);
cmd.channel = cpu_to_le16(channel);
- return lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
+ ret = lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
+ return ret;
}
int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
@@ -155,20 +169,28 @@ int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
struct cmd_ds_802_11_beacon_set cmd;
int size;
- if (beacon->len > MRVL_MAX_BCN_SIZE)
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
+ if (beacon->len > MRVL_MAX_BCN_SIZE) {
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", -1);
return -1;
+ }
size = sizeof(cmd) - sizeof(cmd.beacon) + beacon->len;
cmd.hdr.size = cpu_to_le16(size);
cmd.len = cpu_to_le16(beacon->len);
memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len);
lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size);
+
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", 0);
return 0;
}
int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
- int beacon_int) {
+ int beacon_int)
+{
struct cmd_ds_802_11_beacon_control cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -176,6 +198,8 @@ int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
cmd.beacon_period = cpu_to_le16(beacon_int);
lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
return 0;
}
@@ -183,17 +207,28 @@ static void lbtf_queue_cmd(struct lbtf_private *priv,
struct cmd_ctrl_node *cmdnode)
{
unsigned long flags;
+ lbtf_deb_enter(LBTF_DEB_HOST);
- if (!cmdnode)
- return;
+ if (!cmdnode) {
+ lbtf_deb_host("QUEUE_CMD: cmdnode is NULL\n");
+ goto qcmd_done;
+ }
- if (!cmdnode->cmdbuf->size)
- return;
+ if (!cmdnode->cmdbuf->size) {
+ lbtf_deb_host("DNLD_CMD: cmd size is zero\n");
+ goto qcmd_done;
+ }
cmdnode->result = 0;
spin_lock_irqsave(&priv->driver_lock, flags);
list_add_tail(&cmdnode->list, &priv->cmdpendingq);
spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+ lbtf_deb_host("QUEUE_CMD: inserted command 0x%04x into cmdpendingq\n",
+ le16_to_cpu(cmdnode->cmdbuf->command));
+
+qcmd_done:
+ lbtf_deb_leave(LBTF_DEB_HOST);
}
static void lbtf_submit_command(struct lbtf_private *priv,
@@ -206,22 +241,33 @@ static void lbtf_submit_command(struct lbtf_private *priv,
int timeo = 5 * HZ;
int ret;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
cmd = cmdnode->cmdbuf;
spin_lock_irqsave(&priv->driver_lock, flags);
priv->cur_cmd = cmdnode;
cmdsize = le16_to_cpu(cmd->size);
command = le16_to_cpu(cmd->command);
+
+ lbtf_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n",
+ command, le16_to_cpu(cmd->seqnum), cmdsize);
+ lbtf_deb_hex(LBTF_DEB_CMD, "DNLD_CMD", (void *) cmdnode->cmdbuf, cmdsize);
+
ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
spin_unlock_irqrestore(&priv->driver_lock, flags);
- if (ret)
+ if (ret) {
+ pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret);
/* Let the timer kick in and retry, and potentially reset
the whole thing if the condition persists */
timeo = HZ;
+ }
/* Setup the timer after transmit command */
mod_timer(&priv->command_timer, jiffies + timeo);
+
+ lbtf_deb_leave(LBTF_DEB_HOST);
}
/**
@@ -231,8 +277,10 @@ static void lbtf_submit_command(struct lbtf_private *priv,
static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
struct cmd_ctrl_node *cmdnode)
{
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
if (!cmdnode)
- return;
+ goto cl_ins_out;
cmdnode->callback = NULL;
cmdnode->callback_arg = 0;
@@ -240,6 +288,9 @@ static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE);
list_add_tail(&cmdnode->list, &priv->cmdfreeq);
+
+cl_ins_out:
+ lbtf_deb_leave(LBTF_DEB_HOST);
}
static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
@@ -268,29 +319,41 @@ int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv)
{
struct cmd_ds_mac_multicast_addr cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr);
+
+ lbtf_deb_cmd("MULTICAST_ADR: setting %d addresses\n", cmd.nr_of_adrs);
+
memcpy(cmd.maclist, priv->multicastlist,
priv->nr_of_multicastmacaddr * ETH_ALEN);
lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
return 0;
}
void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode)
{
struct cmd_ds_set_mode cmd;
+ lbtf_deb_enter(LBTF_DEB_WEXT);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.mode = cpu_to_le16(mode);
+ lbtf_deb_wext("Switching to mode: 0x%x\n", mode);
lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_WEXT);
}
void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
{
struct cmd_ds_set_bssid cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.activate = activate ? 1 : 0;
@@ -298,11 +361,13 @@ void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
memcpy(cmd.bssid, bssid, ETH_ALEN);
lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd));
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
{
struct cmd_ds_802_11_mac_address cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -310,6 +375,7 @@ int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
memcpy(cmd.macadd, mac_addr, ETH_ALEN);
lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd));
+ lbtf_deb_leave(LBTF_DEB_CMD);
return 0;
}
@@ -318,6 +384,8 @@ int lbtf_set_radio_control(struct lbtf_private *priv)
int ret = 0;
struct cmd_ds_802_11_radio_control cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -341,19 +409,28 @@ int lbtf_set_radio_control(struct lbtf_private *priv)
else
cmd.control &= cpu_to_le16(~TURN_ON_RF);
+ lbtf_deb_cmd("RADIO_SET: radio %d, preamble %d\n", priv->radioon,
+ priv->preamble);
+
ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
+
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
return ret;
}
void lbtf_set_mac_control(struct lbtf_private *priv)
{
struct cmd_ds_mac_control cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(priv->mac_control);
cmd.reserved = 0;
lbtf_cmd_async(priv, CMD_MAC_CONTROL,
&cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
/**
@@ -365,29 +442,43 @@ void lbtf_set_mac_control(struct lbtf_private *priv)
*/
int lbtf_allocate_cmd_buffer(struct lbtf_private *priv)
{
+ int ret = 0;
u32 bufsize;
u32 i;
struct cmd_ctrl_node *cmdarray;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
/* Allocate and initialize the command array */
bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS;
cmdarray = kzalloc(bufsize, GFP_KERNEL);
- if (!cmdarray)
- return -1;
+ if (!cmdarray) {
+ lbtf_deb_host("ALLOC_CMD_BUF: tempcmd_array is NULL\n");
+ ret = -1;
+ goto done;
+ }
priv->cmd_array = cmdarray;
/* Allocate and initialize each command buffer in the command array */
for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL);
- if (!cmdarray[i].cmdbuf)
- return -1;
+ if (!cmdarray[i].cmdbuf) {
+ lbtf_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
+ ret = -1;
+ goto done;
+ }
}
for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
init_waitqueue_head(&cmdarray[i].cmdwait_q);
lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]);
}
- return 0;
+
+ ret = 0;
+
+done:
+ lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %d", ret);
+ return ret;
}
/**
@@ -402,9 +493,13 @@ int lbtf_free_cmd_buffer(struct lbtf_private *priv)
struct cmd_ctrl_node *cmdarray;
unsigned int i;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
/* need to check if cmd array is allocated or not */
- if (priv->cmd_array == NULL)
- return 0;
+ if (priv->cmd_array == NULL) {
+ lbtf_deb_host("FREE_CMD_BUF: cmd_array is NULL\n");
+ goto done;
+ }
cmdarray = priv->cmd_array;
@@ -418,6 +513,8 @@ int lbtf_free_cmd_buffer(struct lbtf_private *priv)
kfree(priv->cmd_array);
priv->cmd_array = NULL;
+done:
+ lbtf_deb_leave(LBTF_DEB_HOST);
return 0;
}
@@ -433,6 +530,8 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
struct cmd_ctrl_node *tempnode;
unsigned long flags;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
if (!priv)
return NULL;
@@ -442,11 +541,14 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
tempnode = list_first_entry(&priv->cmdfreeq,
struct cmd_ctrl_node, list);
list_del(&tempnode->list);
- } else
+ } else {
+ lbtf_deb_host("GET_CMD_NODE: cmd_ctrl_node is not available\n");
tempnode = NULL;
+ }
spin_unlock_irqrestore(&priv->driver_lock, flags);
+ lbtf_deb_leave(LBTF_DEB_HOST);
return tempnode;
}
@@ -462,16 +564,20 @@ int lbtf_execute_next_command(struct lbtf_private *priv)
struct cmd_ctrl_node *cmdnode = NULL;
struct cmd_header *cmd;
unsigned long flags;
+ int ret = 0;
- /* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the
+ /* Debug group is lbtf_deb_THREAD and not lbtf_deb_HOST, because the
* only caller to us is lbtf_thread() and we get even when a
* data packet is received */
+ lbtf_deb_enter(LBTF_DEB_THREAD);
spin_lock_irqsave(&priv->driver_lock, flags);
if (priv->cur_cmd) {
+ pr_alert("EXEC_NEXT_CMD: already processing command!\n");
spin_unlock_irqrestore(&priv->driver_lock, flags);
- return -1;
+ ret = -1;
+ goto done;
}
if (!list_empty(&priv->cmdpendingq)) {
@@ -483,11 +589,17 @@ int lbtf_execute_next_command(struct lbtf_private *priv)
cmd = cmdnode->cmdbuf;
list_del(&cmdnode->list);
+ lbtf_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
+ le16_to_cpu(cmd->command));
spin_unlock_irqrestore(&priv->driver_lock, flags);
lbtf_submit_command(priv, cmdnode);
} else
spin_unlock_irqrestore(&priv->driver_lock, flags);
- return 0;
+
+ ret = 0;
+done:
+ lbtf_deb_leave(LBTF_DEB_THREAD);
+ return ret;
}
static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
@@ -498,14 +610,22 @@ static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
{
struct cmd_ctrl_node *cmdnode;
- if (priv->surpriseremoved)
- return ERR_PTR(-ENOENT);
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
+ if (priv->surpriseremoved) {
+ lbtf_deb_host("PREP_CMD: card removed\n");
+ cmdnode = ERR_PTR(-ENOENT);
+ goto done;
+ }
cmdnode = lbtf_get_cmd_ctrl_node(priv);
if (cmdnode == NULL) {
+ lbtf_deb_host("PREP_CMD: cmdnode is NULL\n");
+
/* Wake up main thread to execute next command */
queue_work(lbtf_wq, &priv->cmd_work);
- return ERR_PTR(-ENOBUFS);
+ cmdnode = ERR_PTR(-ENOBUFS);
+ goto done;
}
cmdnode->callback = callback;
@@ -520,17 +640,24 @@ static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
cmdnode->cmdbuf->result = 0;
+
+ lbtf_deb_host("PREP_CMD: command 0x%04x\n", command);
+
cmdnode->cmdwaitqwoken = 0;
lbtf_queue_cmd(priv, cmdnode);
queue_work(lbtf_wq, &priv->cmd_work);
+ done:
+ lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %p", cmdnode);
return cmdnode;
}
void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
struct cmd_header *in_cmd, int in_cmd_size)
{
+ lbtf_deb_enter(LBTF_DEB_CMD);
__lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0);
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
@@ -543,30 +670,35 @@ int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
unsigned long flags;
int ret = 0;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size,
callback, callback_arg);
- if (IS_ERR(cmdnode))
- return PTR_ERR(cmdnode);
+ if (IS_ERR(cmdnode)) {
+ ret = PTR_ERR(cmdnode);
+ goto done;
+ }
might_sleep();
ret = wait_event_interruptible(cmdnode->cmdwait_q,
cmdnode->cmdwaitqwoken);
- if (ret) {
- printk(KERN_DEBUG
- "libertastf: command 0x%04x interrupted by signal",
- command);
- return ret;
+ if (ret) {
+ pr_info("PREP_CMD: command 0x%04x interrupted by signal: %d\n",
+ command, ret);
+ goto done;
}
spin_lock_irqsave(&priv->driver_lock, flags);
ret = cmdnode->result;
if (ret)
- printk(KERN_DEBUG "libertastf: command 0x%04x failed: %d\n",
+ pr_info("PREP_CMD: command 0x%04x failed: %d\n",
command, ret);
__lbtf_cleanup_and_insert_cmd(priv, cmdnode);
spin_unlock_irqrestore(&priv->driver_lock, flags);
+done:
+ lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(__lbtf_cmd);
@@ -587,6 +719,8 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
unsigned long flags;
uint16_t result;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
mutex_lock(&priv->lock);
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -602,7 +736,7 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
result = le16_to_cpu(resp->result);
if (net_ratelimit())
- printk(KERN_DEBUG "libertastf: cmd response 0x%04x, seq %d, size %d\n",
+ pr_info("libertastf: cmd response 0x%04x, seq %d, size %d\n",
respcmd, le16_to_cpu(resp->seqnum),
le16_to_cpu(resp->size));
@@ -639,7 +773,7 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
switch (respcmd) {
case CMD_RET(CMD_GET_HW_SPEC):
case CMD_RET(CMD_802_11_RESET):
- printk(KERN_DEBUG "libertastf: reset failed\n");
+ pr_info("libertastf: reset failed\n");
break;
}
@@ -666,5 +800,6 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
done:
mutex_unlock(&priv->lock);
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
return ret;
}
diff --git a/drivers/net/wireless/libertas_tf/deb_defs.h b/drivers/net/wireless/libertas_tf/deb_defs.h
new file mode 100644
index 0000000..ae75396
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/deb_defs.h
@@ -0,0 +1,104 @@
+/**
+ * This header file contains global constant/enum definitions,
+ * global variable declaration.
+ */
+#ifndef _LBS_DEB_DEFS_H_
+#define _LBS_DEB_EFS_H_
+
+#ifndef DRV_NAME
+#define DRV_NAME "libertas_tf"
+#endif
+
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_LIBERTAS_THINFIRM_DEBUG
+#define DEBUG
+#define PROC_DEBUG
+#endif
+
+#define LBTF_DEB_ENTER 0x00000001
+#define LBTF_DEB_LEAVE 0x00000002
+#define LBTF_DEB_MAIN 0x00000004
+#define LBTF_DEB_NET 0x00000008
+#define LBTF_DEB_MESH 0x00000010
+#define LBTF_DEB_WEXT 0x00000020
+#define LBTF_DEB_IOCTL 0x00000040
+#define LBTF_DEB_SCAN 0x00000080
+#define LBTF_DEB_ASSOC 0x00000100
+#define LBTF_DEB_JOIN 0x00000200
+#define LBTF_DEB_11D 0x00000400
+#define LBTF_DEB_DEBUGFS 0x00000800
+#define LBTF_DEB_ETHTOOL 0x00001000
+#define LBTF_DEB_HOST 0x00002000
+#define LBTF_DEB_CMD 0x00004000
+#define LBTF_DEB_RX 0x00008000
+#define LBTF_DEB_TX 0x00010000
+#define LBTF_DEB_USB 0x00020000
+#define LBTF_DEB_CS 0x00040000
+#define LBTF_DEB_FW 0x00080000
+#define LBTF_DEB_THREAD 0x00100000
+#define LBTF_DEB_HEX 0x00200000
+#define LBTF_DEB_SDIO 0x00400000
+#define LBTF_DEB_MACOPS 0x00800000
+
+extern unsigned int lbtf_debug;
+
+
+#ifdef DEBUG
+#define LBTF_DEB_LL(grp, grpnam, fmt, args...) \
+do { if ((lbtf_debug & (grp)) == (grp)) \
+ printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \
+ in_interrupt() ? " (INT)" : "", ## args); } while (0)
+#else
+#define LBTF_DEB_LL(grp, grpnam, fmt, args...) do {} while (0)
+#endif
+
+#define lbtf_deb_enter(grp) \
+ LBTF_DEB_LL(grp | LBTF_DEB_ENTER, " enter", "%s()\n", __func__);
+#define lbtf_deb_enter_args(grp, fmt, args...) \
+ LBTF_DEB_LL(grp | LBTF_DEB_ENTER, " enter", "%s(" fmt ")\n", __func__, ## args);
+#define lbtf_deb_leave(grp) \
+ LBTF_DEB_LL(grp | LBTF_DEB_LEAVE, " leave", "%s()\n", __func__);
+#define lbtf_deb_leave_args(grp, fmt, args...) \
+ LBTF_DEB_LL(grp | LBTF_DEB_LEAVE, " leave", "%s(), " fmt "\n", \
+ __func__, ##args);
+#define lbtf_deb_main(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MAIN, " main", fmt, ##args)
+#define lbtf_deb_net(fmt, args...) LBTF_DEB_LL(LBTF_DEB_NET, " net", fmt, ##args)
+#define lbtf_deb_mesh(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MESH, " mesh", fmt, ##args)
+#define lbtf_deb_wext(fmt, args...) LBTF_DEB_LL(LBTF_DEB_WEXT, " wext", fmt, ##args)
+#define lbtf_deb_ioctl(fmt, args...) LBTF_DEB_LL(LBTF_DEB_IOCTL, " ioctl", fmt, ##args)
+#define lbtf_deb_scan(fmt, args...) LBTF_DEB_LL(LBTF_DEB_SCAN, " scan", fmt, ##args)
+#define lbtf_deb_assoc(fmt, args...) LBTF_DEB_LL(LBTF_DEB_ASSOC, " assoc", fmt, ##args)
+#define lbtf_deb_join(fmt, args...) LBTF_DEB_LL(LBTF_DEB_JOIN, " join", fmt, ##args)
+#define lbtf_deb_11d(fmt, args...) LBTF_DEB_LL(LBTF_DEB_11D, " 11d", fmt, ##args)
+#define lbtf_deb_debugfs(fmt, args...) LBTF_DEB_LL(LBTF_DEB_DEBUGFS, " debugfs", fmt, ##args)
+#define lbtf_deb_ethtool(fmt, args...) LBTF_DEB_LL(LBTF_DEB_ETHTOOL, " ethtool", fmt, ##args)
+#define lbtf_deb_host(fmt, args...) LBTF_DEB_LL(LBTF_DEB_HOST, " host", fmt, ##args)
+#define lbtf_deb_cmd(fmt, args...) LBTF_DEB_LL(LBTF_DEB_CMD, " cmd", fmt, ##args)
+#define lbtf_deb_rx(fmt, args...) LBTF_DEB_LL(LBTF_DEB_RX, " rx", fmt, ##args)
+#define lbtf_deb_tx(fmt, args...) LBTF_DEB_LL(LBTF_DEB_TX, " tx", fmt, ##args)
+#define lbtf_deb_fw(fmt, args...) LBTF_DEB_LL(LBTF_DEB_FW, " fw", fmt, ##args)
+#define lbtf_deb_usb(fmt, args...) LBTF_DEB_LL(LBTF_DEB_USB, " usb", fmt, ##args)
+#define lbtf_deb_usbd(dev, fmt, args...) LBTF_DEB_LL(LBTF_DEB_USB, " usbd", "%s:" fmt, dev_name(dev), ##args)
+#define lbtf_deb_cs(fmt, args...) LBTF_DEB_LL(LBTF_DEB_CS, " cs", fmt, ##args)
+#define lbtf_deb_thread(fmt, args...) LBTF_DEB_LL(LBTF_DEB_THREAD, " thread", fmt, ##args)
+#define lbtf_deb_sdio(fmt, args...) LBTF_DEB_LL(LBTF_DEB_SDIO, " thread", fmt, ##args)
+#define lbtf_deb_macops(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MACOPS, " thread", fmt, ##args)
+
+#ifdef DEBUG
+static inline void lbtf_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len)
+{
+ char newprompt[32];
+
+ if (len &&
+ (lbtf_debug & LBTF_DEB_HEX) &&
+ (lbtf_debug & grp)) {
+ snprintf(newprompt, sizeof(newprompt), DRV_NAME " %s: ", prompt);
+ print_hex_dump_bytes(prompt, DUMP_PREFIX_NONE, buf, len);
+ }
+}
+#else
+#define lbtf_deb_hex(grp, prompt, buf, len) do {} while (0)
+#endif
+
+#endif
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 8cc9db6..c445500 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -7,6 +7,13 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#define DRV_NAME "lbtf_usb"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "libertas_tf.h"
+#include "if_usb.h"
+
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
@@ -14,10 +21,8 @@
#include <linux/slab.h>
#include <linux/usb.h>
-#define DRV_NAME "lbtf_usb"
-
-#include "libertas_tf.h"
-#include "if_usb.h"
+#define INSANEDEBUG 0
+#define lbtf_deb_usb2(...) do { if (INSANEDEBUG) lbtf_deb_usbd(__VA_ARGS__); } while (0)
#define MESSAGE_HEADER_LEN 4
@@ -53,9 +58,14 @@ static int if_usb_reset_device(struct if_usb_card *cardp);
*/
static void if_usb_write_bulk_callback(struct urb *urb)
{
- if (urb->status != 0)
- printk(KERN_INFO "libertastf: URB in failure status: %d\n",
- urb->status);
+ if (urb->status != 0) {
+ /* print the failure status number for debug */
+ pr_info("URB in failure status: %d\n", urb->status);
+ } else {
+ lbtf_deb_usb2(&urb->dev->dev, "URB status is successful\n");
+ lbtf_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n",
+ urb->actual_length);
+ }
}
/**
@@ -65,6 +75,8 @@ static void if_usb_write_bulk_callback(struct urb *urb)
*/
static void if_usb_free(struct if_usb_card *cardp)
{
+ lbtf_deb_enter(LBTF_DEB_USB);
+
/* Unlink tx & rx urb */
usb_kill_urb(cardp->tx_urb);
usb_kill_urb(cardp->rx_urb);
@@ -81,6 +93,8 @@ static void if_usb_free(struct if_usb_card *cardp)
kfree(cardp->ep_out_buf);
cardp->ep_out_buf = NULL;
+
+ lbtf_deb_leave(LBTF_DEB_USB);
}
static void if_usb_setup_firmware(struct lbtf_private *priv)
@@ -88,23 +102,33 @@ static void if_usb_setup_firmware(struct lbtf_private *priv)
struct if_usb_card *cardp = priv->card;
struct cmd_ds_set_boot2_ver b2_cmd;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
if_usb_submit_rx_urb(cardp);
b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd));
b2_cmd.action = 0;
b2_cmd.version = cardp->boot2_version;
if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd))
- printk(KERN_INFO "libertastf: setting boot2 version failed\n");
+ lbtf_deb_usb("Setting boot2 version failed\n");
+
+ lbtf_deb_leave(LBTF_DEB_USB);
}
static void if_usb_fw_timeo(unsigned long priv)
{
struct if_usb_card *cardp = (void *)priv;
- if (!cardp->fwdnldover)
+ lbtf_deb_enter(LBTF_DEB_USB);
+ if (!cardp->fwdnldover) {
/* Download timed out */
cardp->priv->surpriseremoved = 1;
+ pr_err("Download timed out\n");
+ } else {
+ lbtf_deb_usb("Download complete, no event. Assuming success\n");
+ }
wake_up(&cardp->fw_wq);
+ lbtf_deb_leave(LBTF_DEB_USB);
}
/**
@@ -125,11 +149,14 @@ static int if_usb_probe(struct usb_interface *intf,
struct if_usb_card *cardp;
int i;
+ lbtf_deb_enter(LBTF_DEB_USB);
udev = interface_to_usbdev(intf);
cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
- if (!cardp)
+ if (!cardp) {
+ pr_err("Out of memory allocating private data.\n");
goto error;
+ }
setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
init_waitqueue_head(&cardp->fw_wq);
@@ -137,38 +164,62 @@ static int if_usb_probe(struct usb_interface *intf,
cardp->udev = udev;
iface_desc = intf->cur_altsetting;
+ lbtf_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X"
+ " bDeviceSubClass = 0x%X, bDeviceProtocol = 0x%X\n",
+ le16_to_cpu(udev->descriptor.bcdUSB),
+ udev->descriptor.bDeviceClass,
+ udev->descriptor.bDeviceSubClass,
+ udev->descriptor.bDeviceProtocol);
+
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (usb_endpoint_is_bulk_in(endpoint)) {
cardp->ep_in_size =
le16_to_cpu(endpoint->wMaxPacketSize);
cardp->ep_in = usb_endpoint_num(endpoint);
+
+ lbtf_deb_usbd(&udev->dev, "in_endpoint = %d\n", cardp->ep_in);
+ lbtf_deb_usbd(&udev->dev, "Bulk in size is %d\n", cardp->ep_in_size);
} else if (usb_endpoint_is_bulk_out(endpoint)) {
cardp->ep_out_size =
le16_to_cpu(endpoint->wMaxPacketSize);
cardp->ep_out = usb_endpoint_num(endpoint);
+
+ lbtf_deb_usbd(&udev->dev, "out_endpoint = %d\n", cardp->ep_out);
+ lbtf_deb_usbd(&udev->dev, "Bulk out size is %d\n",
+ cardp->ep_out_size);
}
}
- if (!cardp->ep_out_size || !cardp->ep_in_size)
+ if (!cardp->ep_out_size || !cardp->ep_in_size) {
+ lbtf_deb_usbd(&udev->dev, "Endpoints not found\n");
/* Endpoints not found */
goto dealloc;
+ }
cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->rx_urb)
+ if (!cardp->rx_urb) {
+ lbtf_deb_usbd(&udev->dev, "Rx URB allocation failed\n");
goto dealloc;
+ }
cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->tx_urb)
+ if (!cardp->tx_urb) {
+ lbtf_deb_usbd(&udev->dev, "Tx URB allocation failed\n");
goto dealloc;
+ }
cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->cmd_urb)
+ if (!cardp->cmd_urb) {
+ lbtf_deb_usbd(&udev->dev, "Cmd URB allocation failed\n");
goto dealloc;
+ }
cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE,
GFP_KERNEL);
- if (!cardp->ep_out_buf)
+ if (!cardp->ep_out_buf) {
+ lbtf_deb_usbd(&udev->dev, "Could not allocate buffer\n");
goto dealloc;
+ }
priv = lbtf_add_card(cardp, &udev->dev);
if (!priv)
@@ -189,6 +240,7 @@ static int if_usb_probe(struct usb_interface *intf,
dealloc:
if_usb_free(cardp);
error:
+lbtf_deb_leave(LBTF_DEB_MAIN);
return -ENOMEM;
}
@@ -202,6 +254,8 @@ static void if_usb_disconnect(struct usb_interface *intf)
struct if_usb_card *cardp = usb_get_intfdata(intf);
struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
if_usb_reset_device(cardp);
if (priv)
@@ -212,6 +266,8 @@ static void if_usb_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
+
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
/**
@@ -226,6 +282,8 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
struct fwdata *fwdata = cardp->ep_out_buf;
u8 *firmware = (u8 *) cardp->fw->data;
+ lbtf_deb_enter(LBTF_DEB_FW);
+
/* If we got a CRC failure on the last block, back
up and retry it */
if (!cardp->CRC_OK) {
@@ -233,6 +291,9 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
cardp->fwseqnum--;
}
+ lbtf_deb_usb2(&cardp->udev->dev, "totalbytes = %d\n",
+ cardp->totalbytes);
+
/* struct fwdata (which we sent to the card) has an
extra __le32 field in between the header and the data,
which is not in the struct fwheader in the actual
@@ -246,18 +307,33 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
memcpy(fwdata->data, &firmware[cardp->totalbytes],
le32_to_cpu(fwdata->hdr.datalength));
+ lbtf_deb_usb2(&cardp->udev->dev, "Data length = %d\n",
+ le32_to_cpu(fwdata->hdr.datalength));
+
fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum);
cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength);
usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) +
le32_to_cpu(fwdata->hdr.datalength), 0);
- if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK))
+ if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) {
+ lbtf_deb_usb2(&cardp->udev->dev, "There are data to follow\n");
+ lbtf_deb_usb2(&cardp->udev->dev, "seqnum = %d totalbytes = %d\n",
+ cardp->fwseqnum, cardp->totalbytes);
+ } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) {
+ lbtf_deb_usb2(&cardp->udev->dev, "Host has finished FW downloading\n");
+ lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n");
+
/* Host has finished FW downloading
* Donwloading FW JUMP BLOCK
*/
cardp->fwfinalblk = 1;
+ }
+ lbtf_deb_usb2(&cardp->udev->dev, "Firmware download done; size %d\n",
+ cardp->totalbytes);
+
+ lbtf_deb_leave(LBTF_DEB_FW);
return 0;
}
@@ -266,6 +342,8 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4;
int ret;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET);
@@ -280,6 +358,8 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
ret = usb_reset_device(cardp->udev);
msleep(100);
+ lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(if_usb_reset_device);
@@ -297,11 +377,15 @@ EXPORT_SYMBOL_GPL(if_usb_reset_device);
static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
uint16_t nb, u8 data)
{
+ int ret = -1;
struct urb *urb;
+ lbtf_deb_enter(LBTF_DEB_USB);
/* check if device is removed */
- if (cardp->priv->surpriseremoved)
- return -1;
+ if (cardp->priv->surpriseremoved) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Device removed\n");
+ goto tx_ret;
+ }
if (data)
urb = cardp->tx_urb;
@@ -315,19 +399,34 @@ static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
urb->transfer_flags |= URB_ZERO_PACKET;
- if (usb_submit_urb(urb, GFP_ATOMIC))
- return -1;
- return 0;
+ if (usb_submit_urb(urb, GFP_ATOMIC)) {
+ lbtf_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret);
+ goto tx_ret;
+ }
+
+ lbtf_deb_usb2(&cardp->udev->dev, "usb_submit_urb success\n");
+
+ ret = 0;
+
+tx_ret:
+ lbtf_deb_leave(LBTF_DEB_USB);
+ return ret;
}
static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
void (*callbackfn)(struct urb *urb))
{
struct sk_buff *skb;
+ int ret = -1;
+
+ lbtf_deb_enter(LBTF_DEB_USB);
skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
- if (!skb)
+ if (!skb) {
+ pr_err("No free skb\n");
+ lbtf_deb_leave(LBTF_DEB_USB);
return -1;
+ }
cardp->rx_skb = skb;
@@ -339,12 +438,19 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
- if (usb_submit_urb(cardp->rx_urb, GFP_ATOMIC)) {
+ lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
+ ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
+ if (ret) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
kfree_skb(skb);
cardp->rx_skb = NULL;
+ lbtf_deb_leave(LBTF_DEB_USB);
return -1;
- } else
+ } else {
+ lbtf_deb_usb2(&cardp->udev->dev, "Submit Rx URB success\n");
+ lbtf_deb_leave(LBTF_DEB_USB);
return 0;
+ }
}
static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp)
@@ -364,8 +470,12 @@ static void if_usb_receive_fwload(struct urb *urb)
struct fwsyncheader *syncfwheader;
struct bootcmdresp bcmdresp;
+ lbtf_deb_enter(LBTF_DEB_USB);
if (urb->status) {
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "URB status is failed during fw load\n");
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
@@ -373,12 +483,17 @@ static void if_usb_receive_fwload(struct urb *urb)
__le32 *tmp = (__le32 *)(skb->data);
if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) &&
- tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY))
+ tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) {
/* Firmware ready event received */
+ pr_info("Firmware ready event received\n");
wake_up(&cardp->fw_wq);
- else
+ } else {
+ lbtf_deb_usb("Waiting for confirmation; got %x %x\n",
+ le32_to_cpu(tmp[0]), le32_to_cpu(tmp[1]));
if_usb_submit_rx_urb_fwload(cardp);
+ }
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
if (cardp->bootcmdresp <= 0) {
@@ -389,34 +504,60 @@ static void if_usb_receive_fwload(struct urb *urb)
if_usb_submit_rx_urb_fwload(cardp);
cardp->bootcmdresp = 1;
/* Received valid boot command response */
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "Received valid boot command response\n");
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) {
if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) ||
bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) ||
- bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION))
+ bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) {
+ if (!cardp->bootcmdresp)
+ pr_info("Firmware already seems alive; resetting\n");
cardp->bootcmdresp = -1;
- } else if (bcmdresp.cmd == BOOT_CMD_FW_BY_USB &&
- bcmdresp.result == BOOT_CMD_RESP_OK)
+ } else {
+ pr_info("boot cmd response wrong magic number (0x%x)\n",
+ le32_to_cpu(bcmdresp.magic));
+ }
+ } else if (bcmdresp.cmd != BOOT_CMD_FW_BY_USB) {
+ pr_info("boot cmd response cmd_tag error (%d)\n",
+ bcmdresp.cmd);
+ } else if (bcmdresp.result != BOOT_CMD_RESP_OK) {
+ pr_info("boot cmd response result error (%d)\n",
+ bcmdresp.result);
+ } else {
cardp->bootcmdresp = 1;
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "Received valid boot command response\n");
+ }
kfree_skb(skb);
if_usb_submit_rx_urb_fwload(cardp);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
if (!syncfwheader) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
- if (!syncfwheader->cmd)
+ if (!syncfwheader->cmd) {
+ lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
+ lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
+ le32_to_cpu(syncfwheader->seqnum));
cardp->CRC_OK = 1;
- else
+ } else {
+ lbtf_deb_usbd(&cardp->udev->dev, "FW received Blk with CRC error\n");
cardp->CRC_OK = 0;
+ }
+
kfree_skb(skb);
/* reschedule timer for 200ms hence */
@@ -434,7 +575,7 @@ static void if_usb_receive_fwload(struct urb *urb)
kfree(syncfwheader);
- return;
+ lbtf_deb_leave(LBTF_DEB_USB);
}
#define MRVDRV_MIN_PKT_LEN 30
@@ -445,6 +586,7 @@ static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
{
if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN
|| recvlength < MRVDRV_MIN_PKT_LEN) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Packet length is Invalid\n");
kfree_skb(skb);
return;
}
@@ -460,6 +602,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
struct lbtf_private *priv)
{
if (recvlength > LBS_CMD_BUFFER_SIZE) {
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "The receive buffer is too large\n");
kfree_skb(skb);
return;
}
@@ -489,16 +633,24 @@ static void if_usb_receive(struct urb *urb)
uint32_t recvtype = 0;
__le32 *pkt = (__le32 *) skb->data;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
if (recvlength) {
if (urb->status) {
+ lbtf_deb_usbd(&cardp->udev->dev, "RX URB failed: %d\n",
+ urb->status);
kfree_skb(skb);
goto setup_for_next;
}
recvbuff = skb->data;
recvtype = le32_to_cpu(pkt[0]);
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "Recv length = 0x%x, Recv type = 0x%X\n",
+ recvlength, recvtype);
} else if (urb->status) {
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
@@ -515,6 +667,7 @@ static void if_usb_receive(struct urb *urb)
{
/* Event cause handling */
u32 event_cause = le32_to_cpu(pkt[1]);
+ lbtf_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event_cause);
/* Icky undocumented magic special case */
if (event_cause & 0xffff0000) {
@@ -529,21 +682,22 @@ static void if_usb_receive(struct urb *urb)
} else if (event_cause == LBTF_EVENT_BCN_SENT)
lbtf_bcn_sent(priv);
else
- printk(KERN_DEBUG
+ lbtf_deb_usbd(&cardp->udev->dev,
"Unsupported notification %d received\n",
event_cause);
kfree_skb(skb);
break;
}
default:
- printk(KERN_DEBUG "libertastf: unknown command type 0x%X\n",
- recvtype);
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "libertastf: unknown command type 0x%X\n", recvtype);
kfree_skb(skb);
break;
}
setup_for_next:
if_usb_submit_rx_urb(cardp);
+ lbtf_deb_leave(LBTF_DEB_USB);
}
/**
@@ -562,6 +716,9 @@ static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
struct if_usb_card *cardp = priv->card;
u8 data = 0;
+ lbtf_deb_usbd(&cardp->udev->dev, "*** type = %u\n", type);
+ lbtf_deb_usbd(&cardp->udev->dev, "size after = %d\n", nb);
+
if (type == MVMS_CMD) {
*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
} else {
@@ -639,8 +796,10 @@ static int check_fwfile_format(const u8 *data, u32 totlen)
} while (!exit);
if (ret)
- printk(KERN_INFO
- "libertastf: firmware file format check failed\n");
+ pr_err("firmware file format check FAIL\n");
+ else
+ lbtf_deb_fw("firmware file format check PASS\n");
+
return ret;
}
@@ -651,10 +810,12 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
static int reset_count = 10;
int ret = 0;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
if (ret < 0) {
- printk(KERN_INFO "libertastf: firmware %s not found\n",
- lbtf_fw_name);
+ pr_err("request_firmware() failed with %#x\n", ret);
+ pr_err("firmware %s not found\n", lbtf_fw_name);
goto done;
}
@@ -663,6 +824,7 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
restart:
if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
+ lbtf_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
ret = -1;
goto release_fw;
}
@@ -709,14 +871,13 @@ restart:
usb_kill_urb(cardp->rx_urb);
if (!cardp->fwdnldover) {
- printk(KERN_INFO "libertastf: failed to load fw,"
- " resetting device!\n");
+ pr_info("failed to load fw, resetting device!\n");
if (--reset_count >= 0) {
if_usb_reset_device(cardp);
goto restart;
}
- printk(KERN_INFO "libertastf: fw download failure\n");
+ pr_info("FW download failure, time = %d ms\n", i * 100);
ret = -1;
goto release_fw;
}
@@ -730,6 +891,7 @@ restart:
if_usb_setup_firmware(cardp->priv);
done:
+ lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
@@ -751,13 +913,19 @@ static int __init if_usb_init_module(void)
{
int ret = 0;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
ret = usb_register(&if_usb_driver);
+
+ lbtf_deb_leave_args(LBTF_DEB_MAIN, "ret %d", ret);
return ret;
}
static void __exit if_usb_exit_module(void)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
usb_deregister(&if_usb_driver);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
module_init(if_usb_init_module);
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
index 4cc42dd..fbbaaae 100644
--- a/drivers/net/wireless/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -13,6 +13,8 @@
#include <linux/kthread.h>
#include <net/mac80211.h>
+#include "deb_defs.h"
+
#ifndef DRV_NAME
#define DRV_NAME "libertas_tf"
#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 7945ff5..6a04c21 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -7,10 +7,12 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
+#include <linux/etherdevice.h>
#include "libertas_tf.h"
-#include "linux/etherdevice.h"
#define DRIVER_RELEASE_VERSION "004.p0"
/* thinfirm version: 5.132.X.pX */
@@ -18,7 +20,17 @@
#define LBTF_FW_VER_MAX 0x0584ffff
#define QOS_CONTROL_LEN 2
-static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION;
+/* Module parameters */
+unsigned int lbtf_debug;
+EXPORT_SYMBOL_GPL(lbtf_debug);
+module_param_named(libertas_tf_debug, lbtf_debug, int, 0644);
+
+static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION
+#ifdef DEBUG
+ "-dbg"
+#endif
+ "";
+
struct workqueue_struct *lbtf_wq;
static const struct ieee80211_channel lbtf_channels[] = {
@@ -81,6 +93,9 @@ static void lbtf_cmd_work(struct work_struct *work)
{
struct lbtf_private *priv = container_of(work, struct lbtf_private,
cmd_work);
+
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
spin_lock_irq(&priv->driver_lock);
/* command response? */
if (priv->cmd_response_rxed) {
@@ -108,11 +123,16 @@ static void lbtf_cmd_work(struct work_struct *work)
priv->cmd_timed_out = 0;
spin_unlock_irq(&priv->driver_lock);
- if (!priv->fw_ready)
+ if (!priv->fw_ready) {
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "fw not ready");
return;
+ }
+
/* Execute the next command */
if (!priv->cur_cmd)
lbtf_execute_next_command(priv);
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
/**
@@ -126,6 +146,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
{
int ret = -1;
+ lbtf_deb_enter(LBTF_DEB_FW);
/*
* Read priv address from HW
*/
@@ -141,6 +162,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
ret = 0;
done:
+ lbtf_deb_leave_args(LBTF_DEB_FW, "ret: %d", ret);
return ret;
}
@@ -152,6 +174,7 @@ static void command_timer_fn(unsigned long data)
{
struct lbtf_private *priv = (struct lbtf_private *)data;
unsigned long flags;
+ lbtf_deb_enter(LBTF_DEB_CMD);
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -168,10 +191,12 @@ static void command_timer_fn(unsigned long data)
queue_work(lbtf_wq, &priv->cmd_work);
out:
spin_unlock_irqrestore(&priv->driver_lock, flags);
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
static int lbtf_init_adapter(struct lbtf_private *priv)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
memset(priv->current_addr, 0xff, ETH_ALEN);
mutex_init(&priv->lock);
@@ -188,13 +213,16 @@ static int lbtf_init_adapter(struct lbtf_private *priv)
if (lbtf_allocate_cmd_buffer(priv))
return -1;
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
static void lbtf_free_adapter(struct lbtf_private *priv)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
lbtf_free_cmd_buffer(priv);
del_timer(&priv->command_timer);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -221,14 +249,18 @@ static void lbtf_tx_work(struct work_struct *work)
struct sk_buff *skb = NULL;
int err;
+ lbtf_deb_enter(LBTF_DEB_MACOPS | LBTF_DEB_TX);
+
if ((priv->vif->type == NL80211_IFTYPE_AP) &&
(!skb_queue_empty(&priv->bc_ps_buf)))
skb = skb_dequeue(&priv->bc_ps_buf);
else if (priv->skb_to_tx) {
skb = priv->skb_to_tx;
priv->skb_to_tx = NULL;
- } else
+ } else {
+ lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
return;
+ }
len = skb->len;
info = IEEE80211_SKB_CB(skb);
@@ -236,6 +268,7 @@ static void lbtf_tx_work(struct work_struct *work)
if (priv->surpriseremoved) {
dev_kfree_skb_any(skb);
+ lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
return;
}
@@ -249,6 +282,7 @@ static void lbtf_tx_work(struct work_struct *work)
ETH_ALEN);
txpd->tx_packet_length = cpu_to_le16(len);
txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
+ lbtf_deb_hex(LBTF_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));
BUG_ON(priv->tx_skb);
spin_lock_irq(&priv->driver_lock);
priv->tx_skb = skb;
@@ -257,7 +291,9 @@ static void lbtf_tx_work(struct work_struct *work)
if (err) {
dev_kfree_skb_any(skb);
priv->tx_skb = NULL;
+ pr_err("TX error: %d", err);
}
+ lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
}
static int lbtf_op_start(struct ieee80211_hw *hw)
@@ -266,6 +302,8 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
void *card = priv->card;
int ret = -1;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
+
if (!priv->fw_ready)
/* Upload firmware */
if (priv->hw_prog_firmware(card))
@@ -286,10 +324,12 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
}
printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
err_prog_firmware:
priv->hw_reset_device(card);
+ lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programing fw; ret=%d", ret);
return ret;
}
@@ -300,6 +340,9 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
struct sk_buff *skb;
struct cmd_ctrl_node *cmdnode;
+
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
+
/* Flush pending command nodes */
spin_lock_irqsave(&priv->driver_lock, flags);
list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
@@ -316,13 +359,14 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
priv->radioon = RADIO_OFF;
lbtf_set_radio_control(priv);
- return;
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (priv->vif != NULL)
return -EOPNOTSUPP;
@@ -340,6 +384,7 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
}
lbtf_set_mac_address(priv, (u8 *) vif->addr);
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
}
@@ -347,6 +392,7 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (priv->vif->type == NL80211_IFTYPE_AP ||
priv->vif->type == NL80211_IFTYPE_MESH_POINT)
@@ -354,37 +400,38 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
lbtf_set_mode(priv, LBTF_PASSIVE_MODE);
lbtf_set_bssid(priv, 0, NULL);
priv->vif = NULL;
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct lbtf_private *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (conf->channel->center_freq != priv->cur_freq) {
priv->cur_freq = conf->channel->center_freq;
lbtf_set_channel(priv, conf->channel->hw_value);
}
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
}
static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct lbtf_private *priv = hw->priv;
int i;
+ struct netdev_hw_addr *ha;
+ int mc_count = netdev_hw_addr_list_count(mc_list);
if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE)
return mc_count;
priv->nr_of_multicastmacaddr = mc_count;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- memcpy(&priv->multicastlist[i], mclist->da_addr,
- ETH_ALEN);
- mclist = mclist->next;
- }
+ i = 0;
+ netdev_hw_addr_list_for_each(ha, mc_list)
+ memcpy(&priv->multicastlist[i++], ha->addr, ETH_ALEN);
return mc_count;
}
@@ -397,11 +444,16 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
{
struct lbtf_private *priv = hw->priv;
int old_mac_control = priv->mac_control;
+
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
+
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
- if (!changed_flags)
+ if (!changed_flags) {
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return;
+ }
if (*new_flags & (FIF_PROMISC_IN_BSS))
priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
@@ -427,6 +479,8 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
if (priv->mac_control != old_mac_control)
lbtf_set_mac_control(priv);
+
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
@@ -436,6 +490,7 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
{
struct lbtf_private *priv = hw->priv;
struct sk_buff *beacon;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) {
switch (priv->vif->type) {
@@ -466,6 +521,8 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
priv->preamble = CMD_TYPE_LONG_PREAMBLE;
lbtf_set_radio_control(priv);
}
+
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static const struct ieee80211_ops lbtf_ops = {
@@ -488,6 +545,8 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
unsigned int flags;
struct ieee80211_hdr *hdr;
+ lbtf_deb_enter(LBTF_DEB_RX);
+
prxpd = (struct rxpd *) skb->data;
stats.flag = 0;
@@ -496,7 +555,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
stats.freq = priv->cur_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = prxpd->snr;
- stats.noise = prxpd->nf;
/* Marvell rate index has a hole at value 4 */
if (prxpd->rx_rate > 4)
--prxpd->rx_rate;
@@ -518,7 +576,15 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
}
memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
+
+ lbtf_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n",
+ skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
+ lbtf_deb_hex(LBTF_DEB_RX, "RX Data", skb->data,
+ min_t(unsigned int, skb->len, 100));
+
ieee80211_rx_irqsafe(priv->hw, skb);
+
+ lbtf_deb_leave(LBTF_DEB_RX);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_rx);
@@ -535,6 +601,8 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
struct ieee80211_hw *hw;
struct lbtf_private *priv = NULL;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops);
if (!hw)
goto done;
@@ -577,6 +645,7 @@ err_init_adapter:
priv = NULL;
done:
+ lbtf_deb_leave_args(LBTF_DEB_MAIN, "priv %p", priv);
return priv;
}
EXPORT_SYMBOL_GPL(lbtf_add_card);
@@ -586,6 +655,8 @@ int lbtf_remove_card(struct lbtf_private *priv)
{
struct ieee80211_hw *hw = priv->hw;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
priv->surpriseremoved = 1;
del_timer(&priv->command_timer);
lbtf_free_adapter(priv);
@@ -593,6 +664,7 @@ int lbtf_remove_card(struct lbtf_private *priv)
ieee80211_unregister_hw(hw);
ieee80211_free_hw(hw);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_remove_card);
@@ -651,17 +723,21 @@ EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
static int __init lbtf_init_module(void)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
lbtf_wq = create_workqueue("libertastf");
if (lbtf_wq == NULL) {
printk(KERN_ERR "libertastf: couldn't create workqueue\n");
return -ENOMEM;
}
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
static void __exit lbtf_exit_module(void)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
destroy_workqueue(lbtf_wq);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
module_init(lbtf_init_module);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7cd5f56..6f8cb3e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -291,7 +291,8 @@ struct mac80211_hwsim_data {
struct ieee80211_channel *channel;
unsigned long beacon_int; /* in jiffies unit */
unsigned int rx_filter;
- bool started, idle;
+ bool started, idle, scanning;
+ struct mutex mutex;
struct timer_list beacon_timer;
enum ps_mode {
PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
@@ -651,17 +652,17 @@ static void mac80211_hwsim_beacon(unsigned long arg)
add_timer(&data->beacon_timer);
}
+static const char *hwsim_chantypes[] = {
+ [NL80211_CHAN_NO_HT] = "noht",
+ [NL80211_CHAN_HT20] = "ht20",
+ [NL80211_CHAN_HT40MINUS] = "ht40-",
+ [NL80211_CHAN_HT40PLUS] = "ht40+",
+};
static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
{
struct mac80211_hwsim_data *data = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
- static const char *chantypes[4] = {
- [NL80211_CHAN_NO_HT] = "noht",
- [NL80211_CHAN_HT20] = "ht20",
- [NL80211_CHAN_HT40MINUS] = "ht40-",
- [NL80211_CHAN_HT40PLUS] = "ht40+",
- };
static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
[IEEE80211_SMPS_AUTOMATIC] = "auto",
[IEEE80211_SMPS_OFF] = "off",
@@ -672,7 +673,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
wiphy_name(hw->wiphy), __func__,
conf->channel->center_freq,
- chantypes[conf->channel_type],
+ hwsim_chantypes[conf->channel_type],
!!(conf->flags & IEEE80211_CONF_IDLE),
!!(conf->flags & IEEE80211_CONF_PS),
smps_modes[conf->smps_mode]);
@@ -760,9 +761,10 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_HT) {
- printk(KERN_DEBUG " %s: HT: op_mode=0x%x\n",
+ printk(KERN_DEBUG " %s: HT: op_mode=0x%x, chantype=%s\n",
wiphy_name(hw->wiphy),
- info->ht_operation_mode);
+ info->ht_operation_mode,
+ hwsim_chantypes[info->channel_type]);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
@@ -829,6 +831,33 @@ static int mac80211_hwsim_conf_tx(
return 0;
}
+static int mac80211_hwsim_get_survey(
+ struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ieee80211_conf *conf = &hw->conf;
+
+ printk(KERN_DEBUG "%s:%s (idx=%d)\n",
+ wiphy_name(hw->wiphy), __func__, idx);
+
+ if (idx != 0)
+ return -ENOENT;
+
+ /* Current channel */
+ survey->channel = conf->channel;
+
+ /*
+ * Magically conjured noise level --- this is only ok for simulated hardware.
+ *
+ * A real driver which cannot determine the real channel noise MUST NOT
+ * report any noise, especially not a magically conjured one :-)
+ */
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = -92;
+
+ return 0;
+}
+
#ifdef CONFIG_NL80211_TESTMODE
/*
* This section contains example code for using netlink
@@ -946,6 +975,7 @@ static void hw_scan_done(struct work_struct *work)
}
static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
@@ -957,9 +987,9 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
hsd->hw = hw;
INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
- printk(KERN_DEBUG "hwsim scan request\n");
+ printk(KERN_DEBUG "hwsim hw_scan request\n");
for (i = 0; i < req->n_channels; i++)
- printk(KERN_DEBUG "hwsim scan freq %d\n",
+ printk(KERN_DEBUG "hwsim hw_scan freq %d\n",
req->channels[i]->center_freq);
ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
@@ -967,6 +997,36 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
return 0;
}
+static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
+{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+
+ if (hwsim->scanning) {
+ printk(KERN_DEBUG "two hwsim sw_scans detected!\n");
+ goto out;
+ }
+
+ printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n");
+ hwsim->scanning = true;
+
+out:
+ mutex_unlock(&hwsim->mutex);
+}
+
+static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+
+ printk(KERN_DEBUG "hwsim sw_scan_complete\n");
+ hwsim->scanning = false;
+
+ mutex_unlock(&hwsim->mutex);
+}
+
static struct ieee80211_ops mac80211_hwsim_ops =
{
.tx = mac80211_hwsim_tx,
@@ -982,8 +1042,11 @@ static struct ieee80211_ops mac80211_hwsim_ops =
.sta_notify = mac80211_hwsim_sta_notify,
.set_tim = mac80211_hwsim_set_tim,
.conf_tx = mac80211_hwsim_conf_tx,
+ .get_survey = mac80211_hwsim_get_survey,
CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
.ampdu_action = mac80211_hwsim_ampdu_action,
+ .sw_scan_start = mac80211_hwsim_sw_scan,
+ .sw_scan_complete = mac80211_hwsim_sw_scan_complete,
.flush = mac80211_hwsim_flush,
};
@@ -1179,8 +1242,11 @@ static int __init init_mac80211_hwsim(void)
if (radios < 1 || radios > 100)
return -EINVAL;
- if (fake_hw_scan)
+ if (fake_hw_scan) {
mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+ mac80211_hwsim_ops.sw_scan_start = NULL;
+ mac80211_hwsim_ops.sw_scan_complete = NULL;
+ }
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
@@ -1235,7 +1301,8 @@ static int __init init_mac80211_hwsim(void)
hw->flags = IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_AMPDU_AGGREGATION;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
@@ -1285,6 +1352,7 @@ static int __init init_mac80211_hwsim(void)
}
/* By default all radios are belonging to the first group */
data->group = 1;
+ mutex_init(&data->mutex);
/* Work to be done prior to ieee80211_register_hw() */
switch (regtest) {
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 12fdcb2..808adb9 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -750,7 +750,6 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
memset(status, 0, sizeof(*status));
status->signal = -rxd->rssi;
- status->noise = -rxd->noise_floor;
if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
status->flag |= RX_FLAG_HT;
@@ -852,7 +851,6 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
memset(status, 0, sizeof(*status));
status->signal = -rxd->rssi;
- status->noise = -rxd->noise_level;
status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
@@ -1939,11 +1937,15 @@ struct mwl8k_cmd_mac_multicast_adr {
static struct mwl8k_cmd_pkt *
__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_mac_multicast_adr *cmd;
int size;
+ int mc_count = 0;
+
+ if (mc_list)
+ mc_count = netdev_hw_addr_list_count(mc_list);
if (allmulti || mc_count > priv->num_mcaddrs) {
allmulti = 1;
@@ -1964,17 +1966,13 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
if (allmulti) {
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST);
} else if (mc_count) {
- int i;
+ struct netdev_hw_addr *ha;
+ int i = 0;
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
cmd->numaddr = cpu_to_le16(mc_count);
- for (i = 0; i < mc_count && mclist; i++) {
- if (mclist->da_addrlen != ETH_ALEN) {
- kfree(cmd);
- return NULL;
- }
- memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
- mclist = mclist->next;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
}
}
@@ -3553,7 +3551,7 @@ mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct mwl8k_cmd_pkt *cmd;
@@ -3564,7 +3562,7 @@ static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
* we'll end up throwing this packet away and creating a new
* one in mwl8k_configure_filter().
*/
- cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist);
+ cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_list);
return (unsigned long)cmd;
}
@@ -3687,7 +3685,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
*/
if (*total_flags & FIF_ALLMULTI) {
kfree(cmd);
- cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL);
+ cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, NULL);
}
if (cmd != NULL) {
@@ -3984,8 +3982,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
hw->queues = MWL8K_TX_QUEUES;
- /* Set rssi and noise values to dBm */
- hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
+ /* Set rssi values to dBm */
+ hw->flags |= IEEE80211_HW_SIGNAL_DBM;
hw->vif_data_size = sizeof(struct mwl8k_vif);
hw->sta_data_size = sizeof(struct mwl8k_sta);
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index e2a2c18..60819bc 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -27,6 +27,17 @@ config HERMES
configure your card and that /etc/pcmcia/wireless.opts works :
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
+config HERMES_PRISM
+ bool "Support Prism 2/2.5 chipset"
+ depends on HERMES
+ ---help---
+
+ Say Y to enable support for Prism 2 and 2.5 chipsets. These
+ chipsets are better handled by the hostap driver. This driver
+ would not support WPA or firmware download for Prism chipset.
+
+ If you are not sure, say N.
+
config HERMES_CACHE_FW_ON_INIT
bool "Cache Hermes firmware on driver initialisation"
depends on HERMES
@@ -86,7 +97,7 @@ config NORTEL_HERMES
config PCI_HERMES
tristate "Prism 2.5 PCI 802.11b adaptor support"
- depends on PCI && HERMES
+ depends on PCI && HERMES && HERMES_PRISM
help
Enable support for PCI and mini-PCI 802.11b wireless NICs based on
the Prism 2.5 chipset. These are true PCI cards, not the 802.11b
@@ -121,3 +132,10 @@ config PCMCIA_SPECTRUM
This driver requires firmware download on startup. Utilities
for downloading Symbol firmware are available at
<http://sourceforge.net/projects/orinoco/>
+
+config ORINOCO_USB
+ tristate "Agere Orinoco USB support"
+ depends on USB && HERMES
+ select FW_LOADER
+ ---help---
+ This driver is for USB versions of the Agere Orinoco card.
diff --git a/drivers/net/wireless/orinoco/Makefile b/drivers/net/wireless/orinoco/Makefile
index 9abd632..bfdefb8 100644
--- a/drivers/net/wireless/orinoco/Makefile
+++ b/drivers/net/wireless/orinoco/Makefile
@@ -11,3 +11,7 @@ obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o
obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
+obj-$(CONFIG_ORINOCO_USB) += orinoco_usb.o
+
+# Orinoco should be endian clean.
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/orinoco/airport.c
index c60df2c..9bcee10 100644
--- a/drivers/net/wireless/orinoco/airport.c
+++ b/drivers/net/wireless/orinoco/airport.c
@@ -77,9 +77,9 @@ airport_resume(struct macio_dev *mdev)
enable_irq(card->irq);
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = orinoco_up(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
return err;
}
@@ -195,7 +195,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
ssleep(1);
/* Reset it before we get the interrupt */
- hermes_init(hw);
+ hw->ops->init(hw);
if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) {
printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq);
@@ -210,7 +210,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
}
/* Register an interface with the stack */
- if (orinoco_if_add(priv, phys_addr, card->irq) != 0) {
+ if (orinoco_if_add(priv, phys_addr, card->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
}
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 27f2d33..8c4169c 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -88,7 +88,9 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
wiphy->rts_threshold = priv->rts_thresh;
if (!priv->has_mwo)
- wiphy->frag_threshold = priv->frag_thresh;
+ wiphy->frag_threshold = priv->frag_thresh + 1;
+ wiphy->retry_short = priv->short_retry_limit;
+ wiphy->retry_long = priv->long_retry_limit;
return wiphy_register(wiphy);
}
@@ -157,6 +159,7 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
}
static int orinoco_set_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
@@ -187,7 +190,7 @@ static int orinoco_set_channel(struct wiphy *wiphy,
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Fast channel change - no commit if successful */
hermes_t *hw = &priv->hw;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_SET_CHANNEL,
channel, NULL);
}
@@ -196,8 +199,92 @@ static int orinoco_set_channel(struct wiphy *wiphy,
return err;
}
+static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct orinoco_private *priv = wiphy_priv(wiphy);
+ int frag_value = -1;
+ int rts_value = -1;
+ int err = 0;
+
+ if (changed & WIPHY_PARAM_RETRY_SHORT) {
+ /* Setting short retry not supported */
+ err = -EINVAL;
+ }
+
+ if (changed & WIPHY_PARAM_RETRY_LONG) {
+ /* Setting long retry not supported */
+ err = -EINVAL;
+ }
+
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+ /* Set fragmentation */
+ if (priv->has_mwo) {
+ if (wiphy->frag_threshold < 0)
+ frag_value = 0;
+ else {
+ printk(KERN_WARNING "%s: Fixed fragmentation "
+ "is not supported on this firmware. "
+ "Using MWO robust instead.\n",
+ priv->ndev->name);
+ frag_value = 1;
+ }
+ } else {
+ if (wiphy->frag_threshold < 0)
+ frag_value = 2346;
+ else if ((wiphy->frag_threshold < 257) ||
+ (wiphy->frag_threshold > 2347))
+ err = -EINVAL;
+ else
+ /* cfg80211 value is 257-2347 (odd only)
+ * orinoco rid has range 256-2346 (even only) */
+ frag_value = wiphy->frag_threshold & ~0x1;
+ }
+ }
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ /* Set RTS.
+ *
+ * Prism documentation suggests default of 2432,
+ * and a range of 0-3000.
+ *
+ * Current implementation uses 2347 as the default and
+ * the upper limit.
+ */
+
+ if (wiphy->rts_threshold < 0)
+ rts_value = 2347;
+ else if (wiphy->rts_threshold > 2347)
+ err = -EINVAL;
+ else
+ rts_value = wiphy->rts_threshold;
+ }
+
+ if (!err) {
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ if (frag_value >= 0) {
+ if (priv->has_mwo)
+ priv->mwo_robust = frag_value;
+ else
+ priv->frag_thresh = frag_value;
+ }
+ if (rts_value >= 0)
+ priv->rts_thresh = rts_value;
+
+ err = orinoco_commit(priv);
+
+ orinoco_unlock(priv, &flags);
+ }
+
+ return err;
+}
+
const struct cfg80211_ops orinoco_cfg_ops = {
.change_virtual_intf = orinoco_change_vif,
.set_channel = orinoco_set_channel,
.scan = orinoco_scan,
+ .set_wiphy_params = orinoco_set_wiphy_params,
};
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 5ea0f7c..3e1947d 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -122,7 +122,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
dev_dbg(dev, "Attempting to download firmware %s\n", firmware);
/* Read current plug data */
- err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0);
+ err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
dev_dbg(dev, "Read PDA returned %d\n", err);
if (err)
goto free;
@@ -149,7 +149,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
}
/* Enable aux port to allow programming */
- err = hermesi_program_init(hw, le32_to_cpu(hdr->entry_point));
+ err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point));
dev_dbg(dev, "Program init returned %d\n", err);
if (err != 0)
goto abort;
@@ -177,7 +177,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
goto abort;
/* Tell card we've finished */
- err = hermesi_program_end(hw);
+ err = hw->ops->program_end(hw);
dev_dbg(dev, "Program end returned %d\n", err);
if (err != 0)
goto abort;
@@ -224,7 +224,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
if (!pda)
return -ENOMEM;
- ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1);
+ ret = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
if (ret)
goto free;
}
@@ -260,7 +260,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
}
/* Reset hermes chip and make sure it responds */
- ret = hermes_init(hw);
+ ret = hw->ops->init(hw);
/* hermes_reset() should return 0 with the secondary firmware */
if (secondary && ret != 0)
diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index 1a2fca7..6c6a23e 100644
--- a/drivers/net/wireless/orinoco/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
@@ -52,6 +52,26 @@
#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */
/*
+ * AUX port access. To unlock the AUX port write the access keys to the
+ * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
+ * register. Then read it and make sure it's HERMES_AUX_ENABLED.
+ */
+#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
+#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
+#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
+#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
+
+#define HERMES_AUX_PW0 0xFE01
+#define HERMES_AUX_PW1 0xDC23
+#define HERMES_AUX_PW2 0xBA45
+
+/* HERMES_CMD_DOWNLD */
+#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
+
+/*
* Debugging helpers
*/
@@ -70,6 +90,7 @@
#endif /* ! HERMES_DEBUG */
+static const struct hermes_ops hermes_ops_local;
/*
* Internal functions
@@ -111,9 +132,9 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
*/
/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
-int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
- u16 parm0, u16 parm1, u16 parm2,
- struct hermes_response *resp)
+static int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
+ u16 parm0, u16 parm1, u16 parm2,
+ struct hermes_response *resp)
{
int err = 0;
int k;
@@ -163,17 +184,18 @@ int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
out:
return err;
}
-EXPORT_SYMBOL(hermes_doicmd_wait);
void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
{
hw->iobase = address;
hw->reg_spacing = reg_spacing;
hw->inten = 0x0;
+ hw->eeprom_pda = false;
+ hw->ops = &hermes_ops_local;
}
EXPORT_SYMBOL(hermes_struct_init);
-int hermes_init(hermes_t *hw)
+static int hermes_init(hermes_t *hw)
{
u16 reg;
int err = 0;
@@ -217,7 +239,6 @@ int hermes_init(hermes_t *hw)
return err;
}
-EXPORT_SYMBOL(hermes_init);
/* Issue a command to the chip, and (busy!) wait for it to
* complete.
@@ -228,8 +249,8 @@ EXPORT_SYMBOL(hermes_init);
* > 0 on error returned by the firmware
*
* Callable from any context, but locking is your problem. */
-int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
- struct hermes_response *resp)
+static int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp)
{
int err;
int k;
@@ -291,9 +312,8 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
out:
return err;
}
-EXPORT_SYMBOL(hermes_docmd_wait);
-int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
+static int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
{
int err = 0;
int k;
@@ -333,7 +353,6 @@ int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
return 0;
}
-EXPORT_SYMBOL(hermes_allocate);
/* Set up a BAP to read a particular chunk of data from card's internal buffer.
*
@@ -403,8 +422,8 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
* 0 on success
* > 0 on error from firmware
*/
-int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
- u16 id, u16 offset)
+static int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
+ u16 id, u16 offset)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
@@ -422,7 +441,6 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
out:
return err;
}
-EXPORT_SYMBOL(hermes_bap_pread);
/* Write a block of data to the chip's buffer, via the
* BAP. Synchronization/serialization is the caller's problem.
@@ -432,8 +450,8 @@ EXPORT_SYMBOL(hermes_bap_pread);
* 0 on success
* > 0 on error from firmware
*/
-int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
- u16 id, u16 offset)
+static int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
+ u16 id, u16 offset)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
@@ -451,7 +469,6 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
out:
return err;
}
-EXPORT_SYMBOL(hermes_bap_pwrite);
/* Read a Length-Type-Value record from the card.
*
@@ -461,8 +478,8 @@ EXPORT_SYMBOL(hermes_bap_pwrite);
* practice.
*
* Callable from user or bh context. */
-int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
- u16 *length, void *buf)
+static int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
+ u16 *length, void *buf)
{
int err = 0;
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
@@ -505,10 +522,9 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
return 0;
}
-EXPORT_SYMBOL(hermes_read_ltv);
-int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
- u16 length, const void *value)
+static int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *value)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
@@ -533,4 +549,228 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
return err;
}
-EXPORT_SYMBOL(hermes_write_ltv);
+
+/*** Hermes AUX control ***/
+
+static inline void
+hermes_aux_setaddr(hermes_t *hw, u32 addr)
+{
+ hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
+ hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
+}
+
+static inline int
+hermes_aux_control(hermes_t *hw, int enabled)
+{
+ int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
+ int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
+ int i;
+
+ /* Already open? */
+ if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
+ return 0;
+
+ hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
+ hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
+ hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
+ hermes_write_reg(hw, HERMES_CONTROL, action);
+
+ for (i = 0; i < 20; i++) {
+ udelay(10);
+ if (hermes_read_reg(hw, HERMES_CONTROL) ==
+ desired_state)
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/*** Hermes programming ***/
+
+/* About to start programming data (Hermes I)
+ * offset is the entry point
+ *
+ * Spectrum_cs' Symbol fw does not require this
+ * wl_lkm Agere fw does
+ * Don't know about intersil
+ */
+static int hermesi_program_init(hermes_t *hw, u32 offset)
+{
+ int err;
+
+ /* Disable interrupts?*/
+ /*hw->inten = 0x0;*/
+ /*hermes_write_regn(hw, INTEN, 0);*/
+ /*hermes_set_irqmask(hw, 0);*/
+
+ /* Acknowledge any outstanding command */
+ hermes_write_regn(hw, EVACK, 0xFFFF);
+
+ /* Using init_cmd_wait rather than cmd_wait */
+ err = hw->ops->init_cmd_wait(hw,
+ 0x0100 | HERMES_CMD_INIT,
+ 0, 0, 0, NULL);
+ if (err)
+ return err;
+
+ err = hw->ops->init_cmd_wait(hw,
+ 0x0000 | HERMES_CMD_INIT,
+ 0, 0, 0, NULL);
+ if (err)
+ return err;
+
+ err = hermes_aux_control(hw, 1);
+ pr_debug("AUX enable returned %d\n", err);
+
+ if (err)
+ return err;
+
+ pr_debug("Enabling volatile, EP 0x%08x\n", offset);
+ err = hw->ops->init_cmd_wait(hw,
+ HERMES_PROGRAM_ENABLE_VOLATILE,
+ offset & 0xFFFFu,
+ offset >> 16,
+ 0,
+ NULL);
+ pr_debug("PROGRAM_ENABLE returned %d\n", err);
+
+ return err;
+}
+
+/* Done programming data (Hermes I)
+ *
+ * Spectrum_cs' Symbol fw does not require this
+ * wl_lkm Agere fw does
+ * Don't know about intersil
+ */
+static int hermesi_program_end(hermes_t *hw)
+{
+ struct hermes_response resp;
+ int rc = 0;
+ int err;
+
+ rc = hw->ops->cmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
+
+ pr_debug("PROGRAM_DISABLE returned %d, "
+ "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
+ rc, resp.resp0, resp.resp1, resp.resp2);
+
+ if ((rc == 0) &&
+ ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
+ rc = -EIO;
+
+ err = hermes_aux_control(hw, 0);
+ pr_debug("AUX disable returned %d\n", err);
+
+ /* Acknowledge any outstanding command */
+ hermes_write_regn(hw, EVACK, 0xFFFF);
+
+ /* Reinitialise, ignoring return */
+ (void) hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
+ 0, 0, 0, NULL);
+
+ return rc ? rc : err;
+}
+
+static int hermes_program_bytes(struct hermes *hw, const char *data,
+ u32 addr, u32 len)
+{
+ /* wl lkm splits the programming into chunks of 2000 bytes.
+ * This restriction appears to come from USB. The PCMCIA
+ * adapters can program the whole lot in one go */
+ hermes_aux_setaddr(hw, addr);
+ hermes_write_bytes(hw, HERMES_AUXDATA, data, len);
+ return 0;
+}
+
+/* Read PDA from the adapter */
+static int hermes_read_pda(hermes_t *hw, __le16 *pda, u32 pda_addr, u16 pda_len)
+{
+ int ret;
+ u16 pda_size;
+ u16 data_len = pda_len;
+ __le16 *data = pda;
+
+ if (hw->eeprom_pda) {
+ /* PDA of spectrum symbol is in eeprom */
+
+ /* Issue command to read EEPROM */
+ ret = hw->ops->cmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
+ if (ret)
+ return ret;
+ } else {
+ /* wl_lkm does not include PDA size in the PDA area.
+ * We will pad the information into pda, so other routines
+ * don't have to be modified */
+ pda[0] = cpu_to_le16(pda_len - 2);
+ /* Includes CFG_PROD_DATA but not itself */
+ pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
+ data_len = pda_len - 4;
+ data = pda + 2;
+ }
+
+ /* Open auxiliary port */
+ ret = hermes_aux_control(hw, 1);
+ pr_debug("AUX enable returned %d\n", ret);
+ if (ret)
+ return ret;
+
+ /* Read PDA */
+ hermes_aux_setaddr(hw, pda_addr);
+ hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
+
+ /* Close aux port */
+ ret = hermes_aux_control(hw, 0);
+ pr_debug("AUX disable returned %d\n", ret);
+
+ /* Check PDA length */
+ pda_size = le16_to_cpu(pda[0]);
+ pr_debug("Actual PDA length %d, Max allowed %d\n",
+ pda_size, pda_len);
+ if (pda_size > pda_len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hermes_lock_irqsave(spinlock_t *lock,
+ unsigned long *flags) __acquires(lock)
+{
+ spin_lock_irqsave(lock, *flags);
+}
+
+static void hermes_unlock_irqrestore(spinlock_t *lock,
+ unsigned long *flags) __releases(lock)
+{
+ spin_unlock_irqrestore(lock, *flags);
+}
+
+static void hermes_lock_irq(spinlock_t *lock) __acquires(lock)
+{
+ spin_lock_irq(lock);
+}
+
+static void hermes_unlock_irq(spinlock_t *lock) __releases(lock)
+{
+ spin_unlock_irq(lock);
+}
+
+/* Hermes operations for local buses */
+static const struct hermes_ops hermes_ops_local = {
+ .init = hermes_init,
+ .cmd_wait = hermes_docmd_wait,
+ .init_cmd_wait = hermes_doicmd_wait,
+ .allocate = hermes_allocate,
+ .read_ltv = hermes_read_ltv,
+ .write_ltv = hermes_write_ltv,
+ .bap_pread = hermes_bap_pread,
+ .bap_pwrite = hermes_bap_pwrite,
+ .read_pda = hermes_read_pda,
+ .program_init = hermesi_program_init,
+ .program_end = hermesi_program_end,
+ .program = hermes_program_bytes,
+ .lock_irqsave = hermes_lock_irqsave,
+ .unlock_irqrestore = hermes_unlock_irqrestore,
+ .lock_irq = hermes_lock_irq,
+ .unlock_irq = hermes_unlock_irq,
+};
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 2dddbb5..9ca34e7 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -374,6 +374,37 @@ struct hermes_multicast {
/* Timeouts */
#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
+struct hermes;
+
+/* Functions to access hardware */
+struct hermes_ops {
+ int (*init)(struct hermes *hw);
+ int (*cmd_wait)(struct hermes *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp);
+ int (*init_cmd_wait)(struct hermes *hw, u16 cmd,
+ u16 parm0, u16 parm1, u16 parm2,
+ struct hermes_response *resp);
+ int (*allocate)(struct hermes *hw, u16 size, u16 *fid);
+ int (*read_ltv)(struct hermes *hw, int bap, u16 rid, unsigned buflen,
+ u16 *length, void *buf);
+ int (*write_ltv)(struct hermes *hw, int bap, u16 rid,
+ u16 length, const void *value);
+ int (*bap_pread)(struct hermes *hw, int bap, void *buf, int len,
+ u16 id, u16 offset);
+ int (*bap_pwrite)(struct hermes *hw, int bap, const void *buf,
+ int len, u16 id, u16 offset);
+ int (*read_pda)(struct hermes *hw, __le16 *pda,
+ u32 pda_addr, u16 pda_len);
+ int (*program_init)(struct hermes *hw, u32 entry_point);
+ int (*program_end)(struct hermes *hw);
+ int (*program)(struct hermes *hw, const char *buf,
+ u32 addr, u32 len);
+ void (*lock_irqsave)(spinlock_t *lock, unsigned long *flags);
+ void (*unlock_irqrestore)(spinlock_t *lock, unsigned long *flags);
+ void (*lock_irq)(spinlock_t *lock);
+ void (*unlock_irq)(spinlock_t *lock);
+};
+
/* Basic control structure */
typedef struct hermes {
void __iomem *iobase;
@@ -381,6 +412,9 @@ typedef struct hermes {
#define HERMES_16BIT_REGSPACING 0
#define HERMES_32BIT_REGSPACING 1
u16 inten; /* Which interrupts should be enabled? */
+ bool eeprom_pda;
+ const struct hermes_ops *ops;
+ void *priv;
} hermes_t;
/* Register access convenience macros */
@@ -394,22 +428,6 @@ typedef struct hermes {
/* Function prototypes */
void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing);
-int hermes_init(hermes_t *hw);
-int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
- struct hermes_response *resp);
-int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
- u16 parm0, u16 parm1, u16 parm2,
- struct hermes_response *resp);
-int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
-
-int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
- u16 id, u16 offset);
-int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
- u16 id, u16 offset);
-int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
- u16 *length, void *buf);
-int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
- u16 length, const void *value);
/* Inline functions */
@@ -426,13 +444,13 @@ static inline void hermes_set_irqmask(hermes_t *hw, u16 events)
static inline int hermes_enable_port(hermes_t *hw, int port)
{
- return hermes_docmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
+ return hw->ops->cmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
0, NULL);
}
static inline int hermes_disable_port(hermes_t *hw, int port)
{
- return hermes_docmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
+ return hw->ops->cmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
0, NULL);
}
@@ -440,7 +458,7 @@ static inline int hermes_disable_port(hermes_t *hw, int port)
* information frame in __orinoco_ev_info() */
static inline int hermes_inquire(hermes_t *hw, u16 rid)
{
- return hermes_docmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
+ return hw->ops->cmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
}
#define HERMES_BYTES_TO_RECLEN(n) ((((n)+1)/2) + 1)
@@ -475,10 +493,10 @@ static inline void hermes_clear_words(struct hermes *hw, int off,
}
#define HERMES_READ_RECORD(hw, bap, rid, buf) \
- (hermes_read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
+ (hw->ops->read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
- (hermes_write_ltv((hw), (bap), (rid), \
- HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
+ (hw->ops->write_ltv((hw), (bap), (rid), \
+ HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
{
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index fb157eb..6da85e7 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -46,37 +46,11 @@
#define PFX "hermes_dld: "
-/*
- * AUX port access. To unlock the AUX port write the access keys to the
- * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
- * register. Then read it and make sure it's HERMES_AUX_ENABLED.
- */
-#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
-#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
-#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
-#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
-
-#define HERMES_AUX_PW0 0xFE01
-#define HERMES_AUX_PW1 0xDC23
-#define HERMES_AUX_PW2 0xBA45
-
-/* HERMES_CMD_DOWNLD */
-#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
-
/* End markers used in dblocks */
#define PDI_END 0x00000000 /* End of PDA */
#define BLOCK_END 0xFFFFFFFF /* Last image block */
#define TEXT_END 0x1A /* End of text header */
-/* Limit the amout we try to download in a single shot.
- * Size is in bytes.
- */
-#define MAX_DL_SIZE 1024
-#define LIMIT_PROGRAM_SIZE 0
-
/*
* The following structures have little-endian fields denoted by
* the leading underscore. Don't access them directly - use inline
@@ -165,41 +139,6 @@ pdi_len(const struct pdi *pdi)
return 2 * (le16_to_cpu(pdi->len) - 1);
}
-/*** Hermes AUX control ***/
-
-static inline void
-hermes_aux_setaddr(hermes_t *hw, u32 addr)
-{
- hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
- hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
-}
-
-static inline int
-hermes_aux_control(hermes_t *hw, int enabled)
-{
- int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
- int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
- int i;
-
- /* Already open? */
- if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
- return 0;
-
- hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
- hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
- hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
- hermes_write_reg(hw, HERMES_CONTROL, action);
-
- for (i = 0; i < 20; i++) {
- udelay(10);
- if (hermes_read_reg(hw, HERMES_CONTROL) ==
- desired_state)
- return 0;
- }
-
- return -EBUSY;
-}
-
/*** Plug Data Functions ***/
/*
@@ -271,62 +210,7 @@ hermes_plug_pdi(hermes_t *hw, const struct pdr *first_pdr,
return -EINVAL;
/* do the actual plugging */
- hermes_aux_setaddr(hw, pdr_addr(pdr));
- hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
-
- return 0;
-}
-
-/* Read PDA from the adapter */
-int hermes_read_pda(hermes_t *hw,
- __le16 *pda,
- u32 pda_addr,
- u16 pda_len,
- int use_eeprom) /* can we get this into hw? */
-{
- int ret;
- u16 pda_size;
- u16 data_len = pda_len;
- __le16 *data = pda;
-
- if (use_eeprom) {
- /* PDA of spectrum symbol is in eeprom */
-
- /* Issue command to read EEPROM */
- ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
- if (ret)
- return ret;
- } else {
- /* wl_lkm does not include PDA size in the PDA area.
- * We will pad the information into pda, so other routines
- * don't have to be modified */
- pda[0] = cpu_to_le16(pda_len - 2);
- /* Includes CFG_PROD_DATA but not itself */
- pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
- data_len = pda_len - 4;
- data = pda + 2;
- }
-
- /* Open auxiliary port */
- ret = hermes_aux_control(hw, 1);
- pr_debug(PFX "AUX enable returned %d\n", ret);
- if (ret)
- return ret;
-
- /* read PDA from EEPROM */
- hermes_aux_setaddr(hw, pda_addr);
- hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
-
- /* Close aux port */
- ret = hermes_aux_control(hw, 0);
- pr_debug(PFX "AUX disable returned %d\n", ret);
-
- /* Check PDA length */
- pda_size = le16_to_cpu(pda[0]);
- pr_debug(PFX "Actual PDA length %d, Max allowed %d\n",
- pda_size, pda_len);
- if (pda_size > pda_len)
- return -EINVAL;
+ hw->ops->program(hw, pdi->data, pdr_addr(pdr), pdi_len(pdi));
return 0;
}
@@ -389,101 +273,13 @@ hermes_blocks_length(const char *first_block, const void *end)
/*** Hermes programming ***/
-/* About to start programming data (Hermes I)
- * offset is the entry point
- *
- * Spectrum_cs' Symbol fw does not require this
- * wl_lkm Agere fw does
- * Don't know about intersil
- */
-int hermesi_program_init(hermes_t *hw, u32 offset)
-{
- int err;
-
- /* Disable interrupts?*/
- /*hw->inten = 0x0;*/
- /*hermes_write_regn(hw, INTEN, 0);*/
- /*hermes_set_irqmask(hw, 0);*/
-
- /* Acknowledge any outstanding command */
- hermes_write_regn(hw, EVACK, 0xFFFF);
-
- /* Using doicmd_wait rather than docmd_wait */
- err = hermes_doicmd_wait(hw,
- 0x0100 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
- if (err)
- return err;
-
- err = hermes_doicmd_wait(hw,
- 0x0000 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
- if (err)
- return err;
-
- err = hermes_aux_control(hw, 1);
- pr_debug(PFX "AUX enable returned %d\n", err);
-
- if (err)
- return err;
-
- pr_debug(PFX "Enabling volatile, EP 0x%08x\n", offset);
- err = hermes_doicmd_wait(hw,
- HERMES_PROGRAM_ENABLE_VOLATILE,
- offset & 0xFFFFu,
- offset >> 16,
- 0,
- NULL);
- pr_debug(PFX "PROGRAM_ENABLE returned %d\n", err);
-
- return err;
-}
-
-/* Done programming data (Hermes I)
- *
- * Spectrum_cs' Symbol fw does not require this
- * wl_lkm Agere fw does
- * Don't know about intersil
- */
-int hermesi_program_end(hermes_t *hw)
-{
- struct hermes_response resp;
- int rc = 0;
- int err;
-
- rc = hermes_docmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
-
- pr_debug(PFX "PROGRAM_DISABLE returned %d, "
- "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
- rc, resp.resp0, resp.resp1, resp.resp2);
-
- if ((rc == 0) &&
- ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
- rc = -EIO;
-
- err = hermes_aux_control(hw, 0);
- pr_debug(PFX "AUX disable returned %d\n", err);
-
- /* Acknowledge any outstanding command */
- hermes_write_regn(hw, EVACK, 0xFFFF);
-
- /* Reinitialise, ignoring return */
- (void) hermes_doicmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
-
- return rc ? rc : err;
-}
-
/* Program the data blocks */
int hermes_program(hermes_t *hw, const char *first_block, const void *end)
{
const struct dblock *blk;
u32 blkaddr;
u32 blklen;
-#if LIMIT_PROGRAM_SIZE
- u32 addr;
- u32 len;
-#endif
+ int err = 0;
blk = (const struct dblock *) first_block;
@@ -498,30 +294,10 @@ int hermes_program(hermes_t *hw, const char *first_block, const void *end)
pr_debug(PFX "Programming block of length %d "
"to address 0x%08x\n", blklen, blkaddr);
-#if !LIMIT_PROGRAM_SIZE
- /* wl_lkm driver splits this into writes of 2000 bytes */
- hermes_aux_setaddr(hw, blkaddr);
- hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
- blklen);
-#else
- len = (blklen < MAX_DL_SIZE) ? blklen : MAX_DL_SIZE;
- addr = blkaddr;
-
- while (addr < (blkaddr + blklen)) {
- pr_debug(PFX "Programming subblock of length %d "
- "to address 0x%08x. Data @ %p\n",
- len, addr, &blk->data[addr - blkaddr]);
-
- hermes_aux_setaddr(hw, addr);
- hermes_write_bytes(hw, HERMES_AUXDATA,
- &blk->data[addr - blkaddr],
- len);
-
- addr += len;
- len = ((blkaddr + blklen - addr) < MAX_DL_SIZE) ?
- (blkaddr + blklen - addr) : MAX_DL_SIZE;
- }
-#endif
+ err = hw->ops->program(hw, blk->data, blkaddr, blklen);
+ if (err)
+ break;
+
blk = (const struct dblock *) &blk->data[blklen];
if ((void *) blk > (end - sizeof(*blk)))
@@ -530,7 +306,7 @@ int hermes_program(hermes_t *hw, const char *first_block, const void *end)
blkaddr = dblock_addr(blk);
blklen = dblock_len(blk);
}
- return 0;
+ return err;
}
/*** Default plugging data for Hermes I ***/
@@ -690,9 +466,8 @@ int hermes_apply_pda_with_defaults(hermes_t *hw,
if ((pdi_len(pdi) == pdr_len(pdr)) &&
((void *) pdi->data + pdi_len(pdi) < pda_end)) {
/* do the actual plugging */
- hermes_aux_setaddr(hw, pdr_addr(pdr));
- hermes_write_bytes(hw, HERMES_AUXDATA,
- pdi->data, pdi_len(pdi));
+ hw->ops->program(hw, pdi->data, pdr_addr(pdr),
+ pdi_len(pdi));
}
}
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index e636924..6fbd788 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -177,9 +177,9 @@ int determine_fw_capabilities(struct orinoco_private *priv,
/* 3Com MAC : 00:50:DA:* */
memset(tmp, 0, sizeof(tmp));
/* Get the Symbol firmware version */
- err = hermes_read_ltv(hw, USER_BAP,
- HERMES_RID_SECONDARYVERSION_SYMBOL,
- SYMBOL_MAX_VER_LEN, NULL, &tmp);
+ err = hw->ops->read_ltv(hw, USER_BAP,
+ HERMES_RID_SECONDARYVERSION_SYMBOL,
+ SYMBOL_MAX_VER_LEN, NULL, &tmp);
if (err) {
dev_warn(dev, "Error %d reading Symbol firmware info. "
"Wildly guessing capabilities...\n", err);
@@ -262,6 +262,13 @@ int determine_fw_capabilities(struct orinoco_private *priv,
if (fw_name)
dev_info(dev, "Firmware determined as %s\n", fw_name);
+#ifndef CONFIG_HERMES_PRISM
+ if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
+ dev_err(dev, "Support for Prism chipset is not enabled\n");
+ return -ENODEV;
+ }
+#endif
+
return 0;
}
@@ -279,8 +286,8 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
u16 reclen;
/* Get the MAC address */
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
- ETH_ALEN, NULL, dev_addr);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ ETH_ALEN, NULL, dev_addr);
if (err) {
dev_warn(dev, "Failed to read MAC address!\n");
goto out;
@@ -289,8 +296,8 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
dev_dbg(dev, "MAC address %pM\n", dev_addr);
/* Get the station name */
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
- sizeof(nickbuf), &reclen, &nickbuf);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ sizeof(nickbuf), &reclen, &nickbuf);
if (err) {
dev_err(dev, "failed to read station name\n");
goto out;
@@ -367,6 +374,32 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFPREAMBLE_SYMBOL,
&priv->preamble);
+ if (err) {
+ dev_err(dev, "Failed to read preamble setup\n");
+ goto out;
+ }
+ }
+
+ /* Retry settings */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
+ &priv->short_retry_limit);
+ if (err) {
+ dev_err(dev, "Failed to read short retry limit\n");
+ goto out;
+ }
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
+ &priv->long_retry_limit);
+ if (err) {
+ dev_err(dev, "Failed to read long retry limit\n");
+ goto out;
+ }
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
+ &priv->retry_lifetime);
+ if (err) {
+ dev_err(dev, "Failed to read max retry lifetime\n");
+ goto out;
}
out:
@@ -380,11 +413,11 @@ int orinoco_hw_allocate_fid(struct orinoco_private *priv)
struct hermes *hw = &priv->hw;
int err;
- err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
/* Try workaround for old Symbol firmware bug */
priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
- err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
dev_warn(dev, "Firmware ALLOC bug detected "
"(old Symbol firmware?). Work around %s\n",
@@ -430,8 +463,9 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
struct hermes_idstring idbuf;
/* Set the MAC address */
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
- HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr);
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ HERMES_BYTES_TO_RECLEN(ETH_ALEN),
+ dev->dev_addr);
if (err) {
printk(KERN_ERR "%s: Error %d setting MAC address\n",
dev->name, err);
@@ -494,7 +528,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
/* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
&idbuf);
if (err) {
@@ -502,7 +536,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
dev->name, err);
return err;
}
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
&idbuf);
if (err) {
@@ -514,9 +548,9 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
/* Set the station name */
idbuf.len = cpu_to_le16(strlen(priv->nick));
memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
- HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
- &idbuf);
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
+ &idbuf);
if (err) {
printk(KERN_ERR "%s: Error %d setting nickname\n",
dev->name, err);
@@ -631,12 +665,12 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Enable monitor mode */
dev->type = ARPHRD_IEEE80211;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_MONITOR, 0, NULL);
} else {
/* Disable monitor mode */
dev->type = ARPHRD_ETHER;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_STOP, 0, NULL);
}
if (err)
@@ -662,8 +696,8 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
if ((key < 0) || (key >= 4))
return -EINVAL;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
- sizeof(tsc_arr), NULL, &tsc_arr);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
+ sizeof(tsc_arr), NULL, &tsc_arr);
if (!err)
memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
@@ -842,7 +876,7 @@ int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
memcpy(key, priv->keys[i].key,
priv->keys[i].key_len);
- err = hermes_write_ltv(hw, USER_BAP,
+ err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFDEFAULTKEY0 + i,
HERMES_BYTES_TO_RECLEN(keylen),
key);
@@ -1049,17 +1083,17 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
* group address if either we want to multicast, or if we were
* multicasting and want to stop */
if (!promisc && (mc_count || priv->mc_count)) {
- struct dev_mc_list *p;
+ struct netdev_hw_addr *ha;
struct hermes_multicast mclist;
int i = 0;
- netdev_for_each_mc_addr(p, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i == mc_count)
break;
- memcpy(mclist.addr[i++], p->dmi_addr, ETH_ALEN);
+ memcpy(mclist.addr[i++], ha->addr, ETH_ALEN);
}
- err = hermes_write_ltv(hw, USER_BAP,
+ err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFGROUPADDRESSES,
HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
&mclist);
@@ -1101,15 +1135,15 @@ int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
HERMES_RID_CNFDESIREDSSID;
- err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
- NULL, &essidbuf);
+ err = hw->ops->read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
+ NULL, &essidbuf);
if (err)
goto fail_unlock;
} else {
*active = 0;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
- sizeof(essidbuf), NULL, &essidbuf);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
+ sizeof(essidbuf), NULL, &essidbuf);
if (err)
goto fail_unlock;
}
@@ -1180,8 +1214,8 @@ int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
- sizeof(list), NULL, &list);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
+ sizeof(list), NULL, &list);
orinoco_unlock(priv, &flags);
if (err)
@@ -1248,7 +1282,7 @@ int orinoco_hw_trigger_scan(struct orinoco_private *priv,
idbuf.len = cpu_to_le16(len);
memcpy(idbuf.val, ssid->ssid, len);
- err = hermes_write_ltv(hw, USER_BAP,
+ err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFSCANSSID_AGERE,
HERMES_BYTES_TO_RECLEN(len + 2),
&idbuf);
@@ -1312,8 +1346,8 @@ int orinoco_hw_get_current_bssid(struct orinoco_private *priv,
hermes_t *hw = &priv->hw;
int err;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
- ETH_ALEN, NULL, addr);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
+ ETH_ALEN, NULL, addr);
return err;
}
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 9799a1d..97af71e 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -22,7 +22,6 @@
/* Forward declarations */
struct orinoco_private;
-struct dev_addr_list;
int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
size_t fw_name_len, u32 *hw_ver);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 413e9ab..ca71f08 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -254,7 +254,7 @@ void set_port_type(struct orinoco_private *priv)
/* Device methods */
/********************************************************************/
-static int orinoco_open(struct net_device *dev)
+int orinoco_open(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
@@ -272,8 +272,9 @@ static int orinoco_open(struct net_device *dev)
return err;
}
+EXPORT_SYMBOL(orinoco_open);
-static int orinoco_stop(struct net_device *dev)
+int orinoco_stop(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
int err = 0;
@@ -281,25 +282,27 @@ static int orinoco_stop(struct net_device *dev)
/* We mustn't use orinoco_lock() here, because we need to be
able to close the interface even if hw_unavailable is set
(e.g. as we're released after a PC Card removal) */
- spin_lock_irq(&priv->lock);
+ orinoco_lock_irq(priv);
priv->open = 0;
err = __orinoco_down(priv);
- spin_unlock_irq(&priv->lock);
+ orinoco_unlock_irq(priv);
return err;
}
+EXPORT_SYMBOL(orinoco_stop);
-static struct net_device_stats *orinoco_get_stats(struct net_device *dev)
+struct net_device_stats *orinoco_get_stats(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
return &priv->stats;
}
+EXPORT_SYMBOL(orinoco_get_stats);
-static void orinoco_set_multicast_list(struct net_device *dev)
+void orinoco_set_multicast_list(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
@@ -313,8 +316,9 @@ static void orinoco_set_multicast_list(struct net_device *dev)
__orinoco_set_multicast_list(dev);
orinoco_unlock(priv, &flags);
}
+EXPORT_SYMBOL(orinoco_set_multicast_list);
-static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
+int orinoco_change_mtu(struct net_device *dev, int new_mtu)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -330,23 +334,115 @@ static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+EXPORT_SYMBOL(orinoco_change_mtu);
/********************************************************************/
/* Tx path */
/********************************************************************/
+/* Add encapsulation and MIC to the existing SKB.
+ * The main xmit routine will then send the whole lot to the card.
+ * Need 8 bytes headroom
+ * Need 8 bytes tailroom
+ *
+ * With encapsulated ethernet II frame
+ * --------
+ * 803.3 header (14 bytes)
+ * dst[6]
+ * -------- src[6]
+ * 803.3 header (14 bytes) len[2]
+ * dst[6] 803.2 header (8 bytes)
+ * src[6] encaps[6]
+ * len[2] <- leave alone -> len[2]
+ * -------- -------- <-- 0
+ * Payload Payload
+ * ... ...
+ *
+ * -------- --------
+ * MIC (8 bytes)
+ * --------
+ *
+ * returns 0 on success, -ENOMEM on error.
+ */
+int orinoco_process_xmit_skb(struct sk_buff *skb,
+ struct net_device *dev,
+ struct orinoco_private *priv,
+ int *tx_control,
+ u8 *mic_buf)
+{
+ struct orinoco_tkip_key *key;
+ struct ethhdr *eh;
+ int do_mic;
+
+ key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key;
+
+ do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) &&
+ (key != NULL));
+
+ if (do_mic)
+ *tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
+ HERMES_TXCTRL_MIC;
+
+ eh = (struct ethhdr *)skb->data;
+
+ /* Encapsulate Ethernet-II frames */
+ if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
+ struct header_struct {
+ struct ethhdr eth; /* 802.3 header */
+ u8 encap[6]; /* 802.2 header */
+ } __attribute__ ((packed)) hdr;
+ int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
+
+ if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "%s: Not enough headroom for 802.2 headers %d\n",
+ dev->name, skb_headroom(skb));
+ return -ENOMEM;
+ }
+
+ /* Fill in new header */
+ memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
+ hdr.eth.h_proto = htons(len);
+ memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
+
+ /* Make room for the new header, and copy it in */
+ eh = (struct ethhdr *) skb_push(skb, ENCAPS_OVERHEAD);
+ memcpy(eh, &hdr, sizeof(hdr));
+ }
+
+ /* Calculate Michael MIC */
+ if (do_mic) {
+ size_t len = skb->len - ETH_HLEN;
+ u8 *mic = &mic_buf[0];
+
+ /* Have to write to an even address, so copy the spare
+ * byte across */
+ if (skb->len % 2) {
+ *mic = skb->data[skb->len - 1];
+ mic++;
+ }
+
+ orinoco_mic(priv->tx_tfm_mic, key->tx_mic,
+ eh->h_dest, eh->h_source, 0 /* priority */,
+ skb->data + ETH_HLEN,
+ len, mic);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(orinoco_process_xmit_skb);
+
static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
- struct orinoco_tkip_key *key;
hermes_t *hw = &priv->hw;
int err = 0;
u16 txfid = priv->txfid;
- struct ethhdr *eh;
int tx_control;
unsigned long flags;
- int do_mic;
+ u8 mic_buf[MICHAEL_MIC_LEN+1];
if (!netif_running(dev)) {
printk(KERN_ERR "%s: Tx on stopped device!\n",
@@ -378,16 +474,12 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->len < ETH_HLEN)
goto drop;
- key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key;
-
- do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) &&
- (key != NULL));
-
tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
- if (do_mic)
- tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
- HERMES_TXCTRL_MIC;
+ err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
+ &mic_buf[0]);
+ if (err)
+ goto drop;
if (priv->has_alt_txcntl) {
/* WPA enabled firmwares have tx_cntl at the end of
@@ -400,8 +492,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&desc, 0, sizeof(desc));
*txcntl = cpu_to_le16(tx_control);
- err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
- txfid, 0);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
+ txfid, 0);
if (err) {
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing Tx "
@@ -414,8 +506,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&desc, 0, sizeof(desc));
desc.tx_control = cpu_to_le16(tx_control);
- err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
- txfid, 0);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
+ txfid, 0);
if (err) {
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing Tx "
@@ -430,68 +522,24 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
}
- eh = (struct ethhdr *)skb->data;
-
- /* Encapsulate Ethernet-II frames */
- if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
- struct header_struct {
- struct ethhdr eth; /* 802.3 header */
- u8 encap[6]; /* 802.2 header */
- } __attribute__ ((packed)) hdr;
-
- /* Strip destination and source from the data */
- skb_pull(skb, 2 * ETH_ALEN);
-
- /* And move them to a separate header */
- memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
- hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
- memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
-
- /* Insert the SNAP header */
- if (skb_headroom(skb) < sizeof(hdr)) {
- printk(KERN_ERR
- "%s: Not enough headroom for 802.2 headers %d\n",
- dev->name, skb_headroom(skb));
- goto drop;
- }
- eh = (struct ethhdr *) skb_push(skb, sizeof(hdr));
- memcpy(eh, &hdr, sizeof(hdr));
- }
-
- err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
- txfid, HERMES_802_3_OFFSET);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, skb->data, skb->len,
+ txfid, HERMES_802_3_OFFSET);
if (err) {
printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
dev->name, err);
goto busy;
}
- /* Calculate Michael MIC */
- if (do_mic) {
- u8 mic_buf[MICHAEL_MIC_LEN + 1];
- u8 *mic;
- size_t offset;
- size_t len;
+ if (tx_control & HERMES_TXCTRL_MIC) {
+ size_t offset = HERMES_802_3_OFFSET + skb->len;
+ size_t len = MICHAEL_MIC_LEN;
- if (skb->len % 2) {
- /* MIC start is on an odd boundary */
- mic_buf[0] = skb->data[skb->len - 1];
- mic = &mic_buf[1];
- offset = skb->len - 1;
- len = MICHAEL_MIC_LEN + 1;
- } else {
- mic = &mic_buf[0];
- offset = skb->len;
- len = MICHAEL_MIC_LEN;
+ if (offset % 2) {
+ offset--;
+ len++;
}
-
- orinoco_mic(priv->tx_tfm_mic, key->tx_mic,
- eh->h_dest, eh->h_source, 0 /* priority */,
- skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic);
-
- /* Write the MIC */
- err = hermes_bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
- txfid, HERMES_802_3_OFFSET + offset);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
+ txfid, offset);
if (err) {
printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
dev->name, err);
@@ -502,7 +550,7 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
/* Finally, we actually initiate the send */
netif_stop_queue(dev);
- err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
txfid, NULL);
if (err) {
netif_start_queue(dev);
@@ -512,7 +560,6 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
goto busy;
}
- dev->trans_start = jiffies;
stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
goto ok;
@@ -572,9 +619,9 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
return; /* Nothing's really happened */
/* Read part of the frame header - we need status and addr1 */
- err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
- sizeof(struct hermes_txexc_data),
- fid, 0);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &hdr,
+ sizeof(struct hermes_txexc_data),
+ fid, 0);
hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
stats->tx_errors++;
@@ -615,7 +662,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
netif_wake_queue(dev);
}
-static void orinoco_tx_timeout(struct net_device *dev)
+void orinoco_tx_timeout(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
@@ -630,6 +677,7 @@ static void orinoco_tx_timeout(struct net_device *dev)
schedule_work(&priv->reset_work);
}
+EXPORT_SYMBOL(orinoco_tx_timeout);
/********************************************************************/
/* Rx path (data frames) */
@@ -764,9 +812,9 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
/* If any, copy the data from the card to the skb */
if (datalen > 0) {
- err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
- ALIGN(datalen, 2), rxfid,
- HERMES_802_2_OFFSET);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
+ ALIGN(datalen, 2), rxfid,
+ HERMES_802_2_OFFSET);
if (err) {
printk(KERN_ERR "%s: error %d reading monitor frame\n",
dev->name, err);
@@ -792,7 +840,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
stats->rx_dropped++;
}
-static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
+void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
@@ -814,8 +862,8 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
rxfid = hermes_read_regn(hw, RXFID);
- err = hermes_bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
- rxfid, 0);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
+ rxfid, 0);
if (err) {
printk(KERN_ERR "%s: error %d reading Rx descriptor. "
"Frame dropped.\n", dev->name, err);
@@ -882,9 +930,9 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
nothing is removed. 2 is for aligning the IP header. */
skb_reserve(skb, ETH_HLEN + 2);
- err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, length),
- ALIGN(length, 2), rxfid,
- HERMES_802_2_OFFSET);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, length),
+ ALIGN(length, 2), rxfid,
+ HERMES_802_2_OFFSET);
if (err) {
printk(KERN_ERR "%s: error %d reading frame. "
"Frame dropped.\n", dev->name, err);
@@ -913,6 +961,7 @@ update_stats:
out:
kfree(desc);
}
+EXPORT_SYMBOL(__orinoco_ev_rx);
static void orinoco_rx(struct net_device *dev,
struct hermes_rx_descriptor *desc,
@@ -1145,9 +1194,9 @@ static void orinoco_join_ap(struct work_struct *work)
goto out;
/* Read scan results from the firmware */
- err = hermes_read_ltv(hw, USER_BAP,
- HERMES_RID_SCANRESULTSTABLE,
- MAX_SCAN_LEN, &len, buf);
+ err = hw->ops->read_ltv(hw, USER_BAP,
+ HERMES_RID_SCANRESULTSTABLE,
+ MAX_SCAN_LEN, &len, buf);
if (err) {
printk(KERN_ERR "%s: Cannot read scan results\n",
dev->name);
@@ -1194,8 +1243,8 @@ static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
union iwreq_data wrqu;
int err;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
- ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
+ ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
if (err != 0)
return;
@@ -1217,8 +1266,8 @@ static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
if (!priv->has_wpa)
return;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
- sizeof(buf), NULL, &buf);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
+ sizeof(buf), NULL, &buf);
if (err != 0)
return;
@@ -1247,8 +1296,9 @@ static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
if (!priv->has_wpa)
return;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO,
- sizeof(buf), NULL, &buf);
+ err = hw->ops->read_ltv(hw, USER_BAP,
+ HERMES_RID_CURRENT_ASSOC_RESP_INFO,
+ sizeof(buf), NULL, &buf);
if (err != 0)
return;
@@ -1353,7 +1403,7 @@ static void orinoco_process_scan_results(struct work_struct *work)
spin_unlock_irqrestore(&priv->scan_lock, flags);
}
-static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
+void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
u16 infofid;
@@ -1371,8 +1421,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
infofid = hermes_read_regn(hw, INFOFID);
/* Read the info frame header - don't try too hard */
- err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info),
- infofid, 0);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &info, sizeof(info),
+ infofid, 0);
if (err) {
printk(KERN_ERR "%s: error %d reading info frame. "
"Frame dropped.\n", dev->name, err);
@@ -1393,8 +1443,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
len = sizeof(tallies);
}
- err = hermes_bap_pread(hw, IRQ_BAP, &tallies, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &tallies, len,
+ infofid, sizeof(info));
if (err)
break;
@@ -1429,8 +1479,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
break;
}
- err = hermes_bap_pread(hw, IRQ_BAP, &linkstatus, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &linkstatus, len,
+ infofid, sizeof(info));
if (err)
break;
newstatus = le16_to_cpu(linkstatus.linkstatus);
@@ -1494,8 +1544,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
}
/* Read scan data */
- err = hermes_bap_pread(hw, IRQ_BAP, (void *) buf, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) buf, len,
+ infofid, sizeof(info));
if (err) {
kfree(buf);
qabort_scan(priv);
@@ -1547,8 +1597,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
break;
/* Read scan data */
- err = hermes_bap_pread(hw, IRQ_BAP, (void *) bss, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) bss, len,
+ infofid, sizeof(info));
if (err)
kfree(bss);
else
@@ -1568,9 +1618,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
/* We don't actually do anything about it */
break;
}
-
- return;
}
+EXPORT_SYMBOL(__orinoco_ev_info);
static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
{
@@ -1647,7 +1696,7 @@ static int orinoco_reinit_firmware(struct orinoco_private *priv)
struct hermes *hw = &priv->hw;
int err;
- err = hermes_init(hw);
+ err = hw->ops->init(hw);
if (priv->do_fw_download && !err) {
err = orinoco_download(priv);
if (err)
@@ -1735,7 +1784,7 @@ void orinoco_reset(struct work_struct *work)
}
/* This has to be called from user context */
- spin_lock_irq(&priv->lock);
+ orinoco_lock_irq(priv);
priv->hw_unavailable--;
@@ -1750,7 +1799,7 @@ void orinoco_reset(struct work_struct *work)
dev->trans_start = jiffies;
}
- spin_unlock_irq(&priv->lock);
+ orinoco_unlock_irq(priv);
return;
disable:
@@ -1984,7 +2033,7 @@ int orinoco_init(struct orinoco_private *priv)
priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN;
/* Initialize the firmware */
- err = hermes_init(hw);
+ err = hw->ops->init(hw);
if (err != 0) {
dev_err(dev, "Failed to initialize firmware (err = %d)\n",
err);
@@ -2067,9 +2116,9 @@ int orinoco_init(struct orinoco_private *priv)
/* Make the hardware available, as long as it hasn't been
* removed elsewhere (e.g. by PCMCIA hot unplug) */
- spin_lock_irq(&priv->lock);
+ orinoco_lock_irq(priv);
priv->hw_unavailable--;
- spin_unlock_irq(&priv->lock);
+ orinoco_unlock_irq(priv);
dev_dbg(dev, "Ready\n");
@@ -2192,7 +2241,8 @@ EXPORT_SYMBOL(alloc_orinocodev);
*/
int orinoco_if_add(struct orinoco_private *priv,
unsigned long base_addr,
- unsigned int irq)
+ unsigned int irq,
+ const struct net_device_ops *ops)
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct wireless_dev *wdev;
@@ -2211,16 +2261,21 @@ int orinoco_if_add(struct orinoco_private *priv,
/* Setup / override net_device fields */
dev->ieee80211_ptr = wdev;
- dev->netdev_ops = &orinoco_netdev_ops;
dev->watchdog_timeo = HZ; /* 1 second timeout */
dev->wireless_handlers = &orinoco_handler_def;
#ifdef WIRELESS_SPY
dev->wireless_data = &priv->wireless_data;
#endif
+ /* Default to standard ops if not set */
+ if (ops)
+ dev->netdev_ops = ops;
+ else
+ dev->netdev_ops = &orinoco_netdev_ops;
+
/* we use the default eth_mac_addr for setting the MAC addr */
/* Reserve space in skb for the SNAP header */
- dev->hard_header_len += ENCAPS_OVERHEAD;
+ dev->needed_headroom = ENCAPS_OVERHEAD;
netif_carrier_off(dev);
@@ -2305,7 +2360,7 @@ int orinoco_up(struct orinoco_private *priv)
unsigned long flags;
int err;
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = orinoco_reinit_firmware(priv);
if (err) {
@@ -2325,7 +2380,7 @@ int orinoco_up(struct orinoco_private *priv)
}
exit:
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
return 0;
}
@@ -2337,7 +2392,7 @@ void orinoco_down(struct orinoco_private *priv)
unsigned long flags;
int err;
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = __orinoco_down(priv);
if (err)
printk(KERN_WARNING "%s: Error %d downing interface\n",
@@ -2345,7 +2400,7 @@ void orinoco_down(struct orinoco_private *priv)
netif_device_detach(dev);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
}
EXPORT_SYMBOL(orinoco_down);
diff --git a/drivers/net/wireless/orinoco/main.h b/drivers/net/wireless/orinoco/main.h
index 21ab36c..4dadf98 100644
--- a/drivers/net/wireless/orinoco/main.h
+++ b/drivers/net/wireless/orinoco/main.h
@@ -33,18 +33,6 @@ int orinoco_commit(struct orinoco_private *priv);
void orinoco_reset(struct work_struct *work);
/* Information element helpers - find a home for these... */
-static inline u8 *orinoco_get_ie(u8 *data, size_t len,
- enum ieee80211_eid eid)
-{
- u8 *p = data;
- while ((p + 2) < (data + len)) {
- if (p[0] == eid)
- return p;
- p += p[1] + 2;
- }
- return NULL;
-}
-
#define WPA_OUI_TYPE "\x00\x50\xF2\x01"
#define WPA_SELECTOR_LEN 4
static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 665ef56..a6da86e 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -131,6 +131,8 @@ struct orinoco_private {
u16 ap_density, rts_thresh;
u16 pm_on, pm_mcast, pm_period, pm_timeout;
u16 preamble;
+ u16 short_retry_limit, long_retry_limit;
+ u16 retry_lifetime;
#ifdef WIRELESS_SPY
struct iw_spy_data spy_data; /* iwspy support */
struct iw_public_data wireless_data;
@@ -188,12 +190,30 @@ extern void free_orinocodev(struct orinoco_private *priv);
extern int orinoco_init(struct orinoco_private *priv);
extern int orinoco_if_add(struct orinoco_private *priv,
unsigned long base_addr,
- unsigned int irq);
+ unsigned int irq,
+ const struct net_device_ops *ops);
extern void orinoco_if_del(struct orinoco_private *priv);
extern int orinoco_up(struct orinoco_private *priv);
extern void orinoco_down(struct orinoco_private *priv);
extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
+extern void __orinoco_ev_info(struct net_device *dev, hermes_t *hw);
+extern void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw);
+
+int orinoco_process_xmit_skb(struct sk_buff *skb,
+ struct net_device *dev,
+ struct orinoco_private *priv,
+ int *tx_control,
+ u8 *mic);
+
+/* Common ndo functions exported for reuse by orinoco_usb */
+int orinoco_open(struct net_device *dev);
+int orinoco_stop(struct net_device *dev);
+struct net_device_stats *orinoco_get_stats(struct net_device *dev);
+void orinoco_set_multicast_list(struct net_device *dev);
+int orinoco_change_mtu(struct net_device *dev, int new_mtu);
+void orinoco_tx_timeout(struct net_device *dev);
+
/********************************************************************/
/* Locking and synchronization functions */
/********************************************************************/
@@ -201,11 +221,11 @@ extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
static inline int orinoco_lock(struct orinoco_private *priv,
unsigned long *flags)
{
- spin_lock_irqsave(&priv->lock, *flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, flags);
if (priv->hw_unavailable) {
DEBUG(1, "orinoco_lock() called with hw_unavailable (dev=%p)\n",
priv->ndev);
- spin_unlock_irqrestore(&priv->lock, *flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
return -EBUSY;
}
return 0;
@@ -214,7 +234,17 @@ static inline int orinoco_lock(struct orinoco_private *priv,
static inline void orinoco_unlock(struct orinoco_private *priv,
unsigned long *flags)
{
- spin_unlock_irqrestore(&priv->lock, *flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
+}
+
+static inline void orinoco_lock_irq(struct orinoco_private *priv)
+{
+ priv->hw.ops->lock_irq(&priv->lock);
+}
+
+static inline void orinoco_unlock_irq(struct orinoco_private *priv)
+{
+ priv->hw.ops->unlock_irq(&priv->lock);
}
/*** Navigate from net_device to orinoco_private ***/
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 03056ab..b16d5db 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -281,7 +281,7 @@ orinoco_cs_config(struct pcmcia_device *link)
/* Register an interface with the stack */
if (orinoco_if_add(priv, link->io.BasePort1,
- link->irq) != 0) {
+ link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
}
@@ -306,9 +306,9 @@ orinoco_cs_release(struct pcmcia_device *link)
/* We're committed to taking the device away now, so mark the
* hardware as unavailable */
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
pcmcia_disable_device(link);
if (priv->hw.iobase)
@@ -353,87 +353,90 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
"Pavel Roskin <proski@gnu.org>, et al)";
static struct pcmcia_device_id orinoco_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
- PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
- PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
- PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
- PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
+ PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
+ PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
+ PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
+ PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
+ PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
+ PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
+ PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
+ PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
+ PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
+ PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
+ PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
+ PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
+ PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
+ PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
+ PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
+ PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
+ PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
+ PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
+ PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
+ PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
+ PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
+#ifdef CONFIG_HERMES_PRISM
+ /* Only entries that certainly identify Prism chipset */
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
+ PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
+ PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
+ PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
+ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
+ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */
PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */
- PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
- PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
+ PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
- PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
- PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
- PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
- PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
- PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
- PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
- PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
+ PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
+ PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
- PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
- PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
- PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
- PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
- PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
- PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
+ PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
- PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
- PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
- PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
- PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01),
- PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
- PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1),
- PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
+ PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
- PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
- PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
- PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
- PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757),
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
- PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
+#endif
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 075f446..bc3ea0b 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -220,7 +220,7 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index bda5317..468197f 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -170,7 +170,7 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index e0d5874..9358f4d 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -259,7 +259,7 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 88cbc79..784605f 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -156,7 +156,7 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
new file mode 100644
index 0000000..78f089b
--- /dev/null
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -0,0 +1,1795 @@
+/*
+ * USB Orinoco driver
+ *
+ * Copyright (c) 2003 Manuel Estrada Sainz
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ *
+ * Queueing code based on linux-wlan-ng 0.2.1-pre5
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ *
+ * The license is the same as above.
+ *
+ * Initialy based on USB Skeleton driver - 0.7
+ *
+ * Copyright (c) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * NOTE: The original USB Skeleton driver is GPL, but all that code is
+ * gone so MPL/GPL applies.
+ */
+
+#define DRIVER_NAME "orinoco_usb"
+#define PFX DRIVER_NAME ": "
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/usb.h>
+#include <linux/timer.h>
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+
+#include "mic.h"
+#include "orinoco.h"
+
+#ifndef URB_ASYNC_UNLINK
+#define URB_ASYNC_UNLINK 0
+#endif
+
+/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
+static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
+
+struct header_struct {
+ /* 802.3 */
+ u8 dest[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+ /* 802.2 */
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl;
+ /* SNAP */
+ u8 oui[3];
+ __be16 ethertype;
+} __attribute__ ((packed));
+
+struct ez_usb_fw {
+ u16 size;
+ const u8 *code;
+};
+
+static struct ez_usb_fw firmware = {
+ .size = 0,
+ .code = NULL,
+};
+
+#ifdef CONFIG_USB_DEBUG
+static int debug = 1;
+#else
+static int debug;
+#endif
+
+/* Debugging macros */
+#undef dbg
+#define dbg(format, arg...) \
+ do { if (debug) printk(KERN_DEBUG PFX "%s: " format "\n", \
+ __func__ , ## arg); } while (0)
+#undef err
+#define err(format, arg...) \
+ do { printk(KERN_ERR PFX format "\n", ## arg); } while (0)
+
+/* Module paramaters */
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug enabled or not");
+
+MODULE_FIRMWARE("orinoco_ezusb_fw");
+
+/*
+ * Under some conditions, the card gets stuck and stops paying attention
+ * to the world (i.e. data communication stalls) until we do something to
+ * it. Sending an INQ_TALLIES command seems to be enough and should be
+ * harmless otherwise. This behaviour has been observed when using the
+ * driver on a systemimager client during installation. In the past a
+ * timer was used to send INQ_TALLIES commands when there was no other
+ * activity, but it was troublesome and was removed.
+ */
+
+#define USB_COMPAQ_VENDOR_ID 0x049f /* Compaq Computer Corp. */
+#define USB_COMPAQ_WL215_ID 0x001f /* Compaq WL215 USB Adapter */
+#define USB_COMPAQ_W200_ID 0x0076 /* Compaq W200 USB Adapter */
+#define USB_HP_WL215_ID 0x0082 /* Compaq WL215 USB Adapter */
+
+#define USB_MELCO_VENDOR_ID 0x0411
+#define USB_BUFFALO_L11_ID 0x0006 /* BUFFALO WLI-USB-L11 */
+#define USB_BUFFALO_L11G_WR_ID 0x000B /* BUFFALO WLI-USB-L11G-WR */
+#define USB_BUFFALO_L11G_ID 0x000D /* BUFFALO WLI-USB-L11G */
+
+#define USB_LUCENT_VENDOR_ID 0x047E /* Lucent Technologies */
+#define USB_LUCENT_ORINOCO_ID 0x0300 /* Lucent/Agere Orinoco USB Client */
+
+#define USB_AVAYA8_VENDOR_ID 0x0D98
+#define USB_AVAYAE_VENDOR_ID 0x0D9E
+#define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya Wireless USB Card */
+
+#define USB_AGERE_VENDOR_ID 0x0D4E /* Agere Systems */
+#define USB_AGERE_MODEL0801_ID 0x1000 /* Wireless USB Card Model 0801 */
+#define USB_AGERE_MODEL0802_ID 0x1001 /* Wireless USB Card Model 0802 */
+#define USB_AGERE_REBRANDED_ID 0x047A /* WLAN USB Card */
+
+#define USB_ELSA_VENDOR_ID 0x05CC
+#define USB_ELSA_AIRLANCER_ID 0x3100 /* ELSA AirLancer USB-11 */
+
+#define USB_LEGEND_VENDOR_ID 0x0E7C
+#define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet WLAN USB Card */
+
+#define USB_SAMSUNG_VENDOR_ID 0x04E8
+#define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */
+#define USB_SAMSUNG_SEW2001U2_ID 0x5B11 /* Samsung SEW-2001u Card */
+#define USB_SAMSUNG_SEW2003U_ID 0x7011 /* Samsung SEW-2003U Card */
+
+#define USB_IGATE_VENDOR_ID 0x0681
+#define USB_IGATE_IGATE_11M_ID 0x0012 /* I-GATE 11M USB Card */
+
+#define USB_FUJITSU_VENDOR_ID 0x0BF8
+#define USB_FUJITSU_E1100_ID 0x1002 /* connect2AIR WLAN E-1100 USB */
+
+#define USB_2WIRE_VENDOR_ID 0x1630
+#define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire Wireless USB adapter */
+
+
+#define EZUSB_REQUEST_FW_TRANS 0xA0
+#define EZUSB_REQUEST_TRIGER 0xAA
+#define EZUSB_REQUEST_TRIG_AC 0xAC
+#define EZUSB_CPUCS_REG 0x7F92
+
+#define EZUSB_RID_TX 0x0700
+#define EZUSB_RID_RX 0x0701
+#define EZUSB_RID_INIT1 0x0702
+#define EZUSB_RID_ACK 0x0710
+#define EZUSB_RID_READ_PDA 0x0800
+#define EZUSB_RID_PROG_INIT 0x0852
+#define EZUSB_RID_PROG_SET_ADDR 0x0853
+#define EZUSB_RID_PROG_BYTES 0x0854
+#define EZUSB_RID_PROG_END 0x0855
+#define EZUSB_RID_DOCMD 0x0860
+
+/* Recognize info frames */
+#define EZUSB_IS_INFO(id) ((id >= 0xF000) && (id <= 0xF2FF))
+
+#define EZUSB_MAGIC 0x0210
+
+#define EZUSB_FRAME_DATA 1
+#define EZUSB_FRAME_CONTROL 2
+
+#define DEF_TIMEOUT (3*HZ)
+
+#define BULK_BUF_SIZE 2048
+
+#define MAX_DL_SIZE (BULK_BUF_SIZE - sizeof(struct ezusb_packet))
+
+#define FW_BUF_SIZE 64
+#define FW_VAR_OFFSET_PTR 0x359
+#define FW_VAR_VALUE 0
+#define FW_HOLE_START 0x100
+#define FW_HOLE_END 0x300
+
+struct ezusb_packet {
+ __le16 magic; /* 0x0210 */
+ u8 req_reply_count;
+ u8 ans_reply_count;
+ __le16 frame_type; /* 0x01 for data frames, 0x02 otherwise */
+ __le16 size; /* transport size */
+ __le16 crc; /* CRC up to here */
+ __le16 hermes_len;
+ __le16 hermes_rid;
+ u8 data[0];
+} __attribute__ ((packed));
+
+/* Table of devices that work or may work with this driver */
+static struct usb_device_id ezusb_table[] = {
+ {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_WL215_ID)},
+ {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_HP_WL215_ID)},
+ {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_W200_ID)},
+ {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11_ID)},
+ {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_WR_ID)},
+ {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_ID)},
+ {USB_DEVICE(USB_LUCENT_VENDOR_ID, USB_LUCENT_ORINOCO_ID)},
+ {USB_DEVICE(USB_AVAYA8_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
+ {USB_DEVICE(USB_AVAYAE_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
+ {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0801_ID)},
+ {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0802_ID)},
+ {USB_DEVICE(USB_ELSA_VENDOR_ID, USB_ELSA_AIRLANCER_ID)},
+ {USB_DEVICE(USB_LEGEND_VENDOR_ID, USB_LEGEND_JOYNET_ID)},
+ {USB_DEVICE_VER(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U1_ID,
+ 0, 0)},
+ {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U2_ID)},
+ {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2003U_ID)},
+ {USB_DEVICE(USB_IGATE_VENDOR_ID, USB_IGATE_IGATE_11M_ID)},
+ {USB_DEVICE(USB_FUJITSU_VENDOR_ID, USB_FUJITSU_E1100_ID)},
+ {USB_DEVICE(USB_2WIRE_VENDOR_ID, USB_2WIRE_WIRELESS_ID)},
+ {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_REBRANDED_ID)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ezusb_table);
+
+/* Structure to hold all of our device specific stuff */
+struct ezusb_priv {
+ struct usb_device *udev;
+ struct net_device *dev;
+ struct mutex mtx;
+ spinlock_t req_lock;
+ struct list_head req_pending;
+ struct list_head req_active;
+ spinlock_t reply_count_lock;
+ u16 hermes_reg_fake[0x40];
+ u8 *bap_buf;
+ struct urb *read_urb;
+ int read_pipe;
+ int write_pipe;
+ u8 reply_count;
+};
+
+enum ezusb_state {
+ EZUSB_CTX_START,
+ EZUSB_CTX_QUEUED,
+ EZUSB_CTX_REQ_SUBMITTED,
+ EZUSB_CTX_REQ_COMPLETE,
+ EZUSB_CTX_RESP_RECEIVED,
+ EZUSB_CTX_REQ_TIMEOUT,
+ EZUSB_CTX_REQ_FAILED,
+ EZUSB_CTX_RESP_TIMEOUT,
+ EZUSB_CTX_REQSUBMIT_FAIL,
+ EZUSB_CTX_COMPLETE,
+};
+
+struct request_context {
+ struct list_head list;
+ atomic_t refcount;
+ struct completion done; /* Signals that CTX is dead */
+ int killed;
+ struct urb *outurb; /* OUT for req pkt */
+ struct ezusb_priv *upriv;
+ struct ezusb_packet *buf;
+ int buf_length;
+ struct timer_list timer; /* Timeout handling */
+ enum ezusb_state state; /* Current state */
+ /* the RID that we will wait for */
+ u16 out_rid;
+ u16 in_rid;
+};
+
+
+/* Forward declarations */
+static void ezusb_ctx_complete(struct request_context *ctx);
+static void ezusb_req_queue_run(struct ezusb_priv *upriv);
+static void ezusb_bulk_in_callback(struct urb *urb);
+
+static inline u8 ezusb_reply_inc(u8 count)
+{
+ if (count < 0x7F)
+ return count + 1;
+ else
+ return 1;
+}
+
+static void ezusb_request_context_put(struct request_context *ctx)
+{
+ if (!atomic_dec_and_test(&ctx->refcount))
+ return;
+
+ WARN_ON(!ctx->done.done);
+ BUG_ON(ctx->outurb->status == -EINPROGRESS);
+ BUG_ON(timer_pending(&ctx->timer));
+ usb_free_urb(ctx->outurb);
+ kfree(ctx->buf);
+ kfree(ctx);
+}
+
+static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
+ struct timer_list *timer,
+ unsigned long expire)
+{
+ if (!upriv->udev)
+ return;
+ mod_timer(timer, expire);
+}
+
+static void ezusb_request_timerfn(u_long _ctx)
+{
+ struct request_context *ctx = (void *) _ctx;
+
+ ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
+ if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
+ ctx->state = EZUSB_CTX_REQ_TIMEOUT;
+ } else {
+ ctx->state = EZUSB_CTX_RESP_TIMEOUT;
+ dbg("couldn't unlink");
+ atomic_inc(&ctx->refcount);
+ ctx->killed = 1;
+ ezusb_ctx_complete(ctx);
+ ezusb_request_context_put(ctx);
+ }
+};
+
+static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
+ u16 out_rid, u16 in_rid)
+{
+ struct request_context *ctx;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return NULL;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC);
+ if (!ctx->buf) {
+ kfree(ctx);
+ return NULL;
+ }
+ ctx->outurb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!ctx->outurb) {
+ kfree(ctx->buf);
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->upriv = upriv;
+ ctx->state = EZUSB_CTX_START;
+ ctx->out_rid = out_rid;
+ ctx->in_rid = in_rid;
+
+ atomic_set(&ctx->refcount, 1);
+ init_completion(&ctx->done);
+
+ init_timer(&ctx->timer);
+ ctx->timer.function = ezusb_request_timerfn;
+ ctx->timer.data = (u_long) ctx;
+ return ctx;
+}
+
+
+/* Hopefully the real complete_all will soon be exported, in the mean
+ * while this should work. */
+static inline void ezusb_complete_all(struct completion *comp)
+{
+ complete(comp);
+ complete(comp);
+ complete(comp);
+ complete(comp);
+}
+
+static void ezusb_ctx_complete(struct request_context *ctx)
+{
+ struct ezusb_priv *upriv = ctx->upriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ list_del_init(&ctx->list);
+ if (upriv->udev) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ ezusb_req_queue_run(upriv);
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ }
+
+ switch (ctx->state) {
+ case EZUSB_CTX_COMPLETE:
+ case EZUSB_CTX_REQSUBMIT_FAIL:
+ case EZUSB_CTX_REQ_FAILED:
+ case EZUSB_CTX_REQ_TIMEOUT:
+ case EZUSB_CTX_RESP_TIMEOUT:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) {
+ struct net_device *dev = upriv->dev;
+ struct orinoco_private *priv = ndev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+
+ if (ctx->state != EZUSB_CTX_COMPLETE)
+ stats->tx_errors++;
+ else
+ stats->tx_packets++;
+
+ netif_wake_queue(dev);
+ }
+ ezusb_complete_all(&ctx->done);
+ ezusb_request_context_put(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ if (!upriv->udev) {
+ /* This is normal, as all request contexts get flushed
+ * when the device is disconnected */
+ err("Called, CTX not terminating, but device gone");
+ ezusb_complete_all(&ctx->done);
+ ezusb_request_context_put(ctx);
+ break;
+ }
+
+ err("Called, CTX not in terminating state.");
+ /* Things are really bad if this happens. Just leak
+ * the CTX because it may still be linked to the
+ * queue or the OUT urb may still be active.
+ * Just leaking at least prevents an Oops or Panic.
+ */
+ break;
+ }
+}
+
+/**
+ * ezusb_req_queue_run:
+ * Description:
+ * Note: Only one active CTX at any one time, because there's no
+ * other (reliable) way to match the response URB to the correct
+ * CTX.
+ **/
+static void ezusb_req_queue_run(struct ezusb_priv *upriv)
+{
+ unsigned long flags;
+ struct request_context *ctx;
+ int result;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ if (!list_empty(&upriv->req_active))
+ goto unlock;
+
+ if (list_empty(&upriv->req_pending))
+ goto unlock;
+
+ ctx =
+ list_entry(upriv->req_pending.next, struct request_context,
+ list);
+
+ if (!ctx->upriv->udev)
+ goto unlock;
+
+ /* We need to split this off to avoid a race condition */
+ list_move_tail(&ctx->list, &upriv->req_active);
+
+ if (ctx->state == EZUSB_CTX_QUEUED) {
+ atomic_inc(&ctx->refcount);
+ result = usb_submit_urb(ctx->outurb, GFP_ATOMIC);
+ if (result) {
+ ctx->state = EZUSB_CTX_REQSUBMIT_FAIL;
+
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ err("Fatal, failed to submit command urb."
+ " error=%d\n", result);
+
+ ezusb_ctx_complete(ctx);
+ ezusb_request_context_put(ctx);
+ goto done;
+ }
+
+ ctx->state = EZUSB_CTX_REQ_SUBMITTED;
+ ezusb_mod_timer(ctx->upriv, &ctx->timer,
+ jiffies + DEF_TIMEOUT);
+ }
+
+ unlock:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ done:
+ return;
+}
+
+static void ezusb_req_enqueue_run(struct ezusb_priv *upriv,
+ struct request_context *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ if (!ctx->upriv->udev) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ goto done;
+ }
+ atomic_inc(&ctx->refcount);
+ list_add_tail(&ctx->list, &upriv->req_pending);
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ ctx->state = EZUSB_CTX_QUEUED;
+ ezusb_req_queue_run(upriv);
+
+ done:
+ return;
+}
+
+static void ezusb_request_out_callback(struct urb *urb)
+{
+ unsigned long flags;
+ enum ezusb_state state;
+ struct request_context *ctx = urb->context;
+ struct ezusb_priv *upriv = ctx->upriv;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ del_timer(&ctx->timer);
+
+ if (ctx->killed) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ pr_warning("interrupt called with dead ctx");
+ goto out;
+ }
+
+ state = ctx->state;
+
+ if (urb->status == 0) {
+ switch (state) {
+ case EZUSB_CTX_REQ_SUBMITTED:
+ if (ctx->in_rid) {
+ ctx->state = EZUSB_CTX_REQ_COMPLETE;
+ /* reply URB still pending */
+ ezusb_mod_timer(upriv, &ctx->timer,
+ jiffies + DEF_TIMEOUT);
+ spin_unlock_irqrestore(&upriv->req_lock,
+ flags);
+ break;
+ }
+ /* fall through */
+ case EZUSB_CTX_RESP_RECEIVED:
+ /* IN already received before this OUT-ACK */
+ ctx->state = EZUSB_CTX_COMPLETE;
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ ezusb_ctx_complete(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ err("Unexpected state(0x%x, %d) in OUT URB",
+ state, urb->status);
+ break;
+ }
+ } else {
+ /* If someone cancels the OUT URB then its status
+ * should be either -ECONNRESET or -ENOENT.
+ */
+ switch (state) {
+ case EZUSB_CTX_REQ_SUBMITTED:
+ case EZUSB_CTX_RESP_RECEIVED:
+ ctx->state = EZUSB_CTX_REQ_FAILED;
+ /* fall through */
+
+ case EZUSB_CTX_REQ_FAILED:
+ case EZUSB_CTX_REQ_TIMEOUT:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ ezusb_ctx_complete(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ err("Unexpected state(0x%x, %d) in OUT URB",
+ state, urb->status);
+ break;
+ }
+ }
+ out:
+ ezusb_request_context_put(ctx);
+}
+
+static void ezusb_request_in_callback(struct ezusb_priv *upriv,
+ struct urb *urb)
+{
+ struct ezusb_packet *ans = urb->transfer_buffer;
+ struct request_context *ctx = NULL;
+ enum ezusb_state state;
+ unsigned long flags;
+
+ /* Find the CTX on the active queue that requested this URB */
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ if (upriv->udev) {
+ struct list_head *item;
+
+ list_for_each(item, &upriv->req_active) {
+ struct request_context *c;
+ int reply_count;
+
+ c = list_entry(item, struct request_context, list);
+ reply_count =
+ ezusb_reply_inc(c->buf->req_reply_count);
+ if ((ans->ans_reply_count == reply_count)
+ && (le16_to_cpu(ans->hermes_rid) == c->in_rid)) {
+ ctx = c;
+ break;
+ }
+ dbg("Skipped (0x%x/0x%x) (%d/%d)",
+ le16_to_cpu(ans->hermes_rid),
+ c->in_rid, ans->ans_reply_count, reply_count);
+ }
+ }
+
+ if (ctx == NULL) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ err("%s: got unexpected RID: 0x%04X", __func__,
+ le16_to_cpu(ans->hermes_rid));
+ ezusb_req_queue_run(upriv);
+ return;
+ }
+
+ /* The data we want is in the in buffer, exchange */
+ urb->transfer_buffer = ctx->buf;
+ ctx->buf = (void *) ans;
+ ctx->buf_length = urb->actual_length;
+
+ state = ctx->state;
+ switch (state) {
+ case EZUSB_CTX_REQ_SUBMITTED:
+ /* We have received our response URB before
+ * our request has been acknowledged. Do NOT
+ * destroy our CTX yet, because our OUT URB
+ * is still alive ...
+ */
+ ctx->state = EZUSB_CTX_RESP_RECEIVED;
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ /* Let the machine continue running. */
+ break;
+
+ case EZUSB_CTX_REQ_COMPLETE:
+ /* This is the usual path: our request
+ * has already been acknowledged, and
+ * we have now received the reply.
+ */
+ ctx->state = EZUSB_CTX_COMPLETE;
+
+ /* Stop the intimer */
+ del_timer(&ctx->timer);
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ /* Call the completion handler */
+ ezusb_ctx_complete(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ pr_warning("Matched IN URB, unexpected context state(0x%x)",
+ state);
+ /* Throw this CTX away and try submitting another */
+ del_timer(&ctx->timer);
+ ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
+ usb_unlink_urb(ctx->outurb);
+ ezusb_req_queue_run(upriv);
+ break;
+ } /* switch */
+}
+
+
+static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
+ struct request_context *ctx)
+{
+ switch (ctx->state) {
+ case EZUSB_CTX_QUEUED:
+ case EZUSB_CTX_REQ_SUBMITTED:
+ case EZUSB_CTX_REQ_COMPLETE:
+ case EZUSB_CTX_RESP_RECEIVED:
+ if (in_softirq()) {
+ /* If we get called from a timer, timeout timers don't
+ * get the chance to run themselves. So we make sure
+ * that we don't sleep for ever */
+ int msecs = DEF_TIMEOUT * (1000 / HZ);
+ while (!ctx->done.done && msecs--)
+ udelay(1000);
+ } else {
+ wait_event_interruptible(ctx->done.wait,
+ ctx->done.done);
+ }
+ break;
+ default:
+ /* Done or failed - nothing to wait for */
+ break;
+ }
+}
+
+static inline u16 build_crc(struct ezusb_packet *data)
+{
+ u16 crc = 0;
+ u8 *bytes = (u8 *)data;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) + bytes[i];
+
+ return crc;
+}
+
+/**
+ * ezusb_fill_req:
+ *
+ * if data == NULL and length > 0 the data is assumed to be already in
+ * the target buffer and only the header is filled.
+ *
+ */
+static int ezusb_fill_req(struct ezusb_packet *req, u16 length, u16 rid,
+ const void *data, u16 frame_type, u8 reply_count)
+{
+ int total_size = sizeof(*req) + length;
+
+ BUG_ON(total_size > BULK_BUF_SIZE);
+
+ req->magic = cpu_to_le16(EZUSB_MAGIC);
+ req->req_reply_count = reply_count;
+ req->ans_reply_count = 0;
+ req->frame_type = cpu_to_le16(frame_type);
+ req->size = cpu_to_le16(length + 4);
+ req->crc = cpu_to_le16(build_crc(req));
+ req->hermes_len = cpu_to_le16(HERMES_BYTES_TO_RECLEN(length));
+ req->hermes_rid = cpu_to_le16(rid);
+ if (data)
+ memcpy(req->data, data, length);
+ return total_size;
+}
+
+static int ezusb_submit_in_urb(struct ezusb_priv *upriv)
+{
+ int retval = 0;
+ void *cur_buf = upriv->read_urb->transfer_buffer;
+
+ if (upriv->read_urb->status == -EINPROGRESS) {
+ dbg("urb busy, not resubmiting");
+ retval = -EBUSY;
+ goto exit;
+ }
+ usb_fill_bulk_urb(upriv->read_urb, upriv->udev, upriv->read_pipe,
+ cur_buf, BULK_BUF_SIZE,
+ ezusb_bulk_in_callback, upriv);
+ upriv->read_urb->transfer_flags = 0;
+ retval = usb_submit_urb(upriv->read_urb, GFP_ATOMIC);
+ if (retval)
+ err("%s submit failed %d", __func__, retval);
+
+ exit:
+ return retval;
+}
+
+static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset)
+{
+ u8 res_val = reset; /* avoid argument promotion */
+
+ if (!upriv->udev) {
+ err("%s: !upriv->udev", __func__);
+ return -EFAULT;
+ }
+ return usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_FW_TRANS,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_OUT, EZUSB_CPUCS_REG, 0, &res_val,
+ sizeof(res_val), DEF_TIMEOUT);
+}
+
+static int ezusb_firmware_download(struct ezusb_priv *upriv,
+ struct ez_usb_fw *fw)
+{
+ u8 fw_buffer[FW_BUF_SIZE];
+ int retval, addr;
+ int variant_offset;
+
+ /*
+ * This byte is 1 and should be replaced with 0. The offset is
+ * 0x10AD in version 0.0.6. The byte in question should follow
+ * the end of the code pointed to by the jump in the beginning
+ * of the firmware. Also, it is read by code located at 0x358.
+ */
+ variant_offset = be16_to_cpup((__be16 *) &fw->code[FW_VAR_OFFSET_PTR]);
+ if (variant_offset >= fw->size) {
+ printk(KERN_ERR PFX "Invalid firmware variant offset: "
+ "0x%04x\n", variant_offset);
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ retval = ezusb_8051_cpucs(upriv, 1);
+ if (retval < 0)
+ goto fail;
+ for (addr = 0; addr < fw->size; addr += FW_BUF_SIZE) {
+ /* 0x100-0x300 should be left alone, it contains card
+ * specific data, like USB enumeration information */
+ if ((addr >= FW_HOLE_START) && (addr < FW_HOLE_END))
+ continue;
+
+ memcpy(fw_buffer, &fw->code[addr], FW_BUF_SIZE);
+ if (variant_offset >= addr &&
+ variant_offset < addr + FW_BUF_SIZE) {
+ dbg("Patching card_variant byte at 0x%04X",
+ variant_offset);
+ fw_buffer[variant_offset - addr] = FW_VAR_VALUE;
+ }
+ retval = usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_FW_TRANS,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE
+ | USB_DIR_OUT,
+ addr, 0x0,
+ fw_buffer, FW_BUF_SIZE,
+ DEF_TIMEOUT);
+
+ if (retval < 0)
+ goto fail;
+ }
+ retval = ezusb_8051_cpucs(upriv, 0);
+ if (retval < 0)
+ goto fail;
+
+ goto exit;
+ fail:
+ printk(KERN_ERR PFX "Firmware download failed, error %d\n",
+ retval);
+ exit:
+ return retval;
+}
+
+static int ezusb_access_ltv(struct ezusb_priv *upriv,
+ struct request_context *ctx,
+ u16 length, const void *data, u16 frame_type,
+ void *ans_buff, int ans_size, u16 *ans_length)
+{
+ int req_size;
+ int retval = 0;
+ enum ezusb_state state;
+
+ BUG_ON(in_irq());
+
+ if (!upriv->udev) {
+ dbg("Device disconnected");
+ return -ENODEV;
+ }
+
+ if (upriv->read_urb->status != -EINPROGRESS)
+ err("%s: in urb not pending", __func__);
+
+ /* protect upriv->reply_count, guarantee sequential numbers */
+ spin_lock_bh(&upriv->reply_count_lock);
+ req_size = ezusb_fill_req(ctx->buf, length, ctx->out_rid, data,
+ frame_type, upriv->reply_count);
+ usb_fill_bulk_urb(ctx->outurb, upriv->udev, upriv->write_pipe,
+ ctx->buf, req_size,
+ ezusb_request_out_callback, ctx);
+
+ if (ctx->in_rid)
+ upriv->reply_count = ezusb_reply_inc(upriv->reply_count);
+
+ ezusb_req_enqueue_run(upriv, ctx);
+
+ spin_unlock_bh(&upriv->reply_count_lock);
+
+ if (ctx->in_rid)
+ ezusb_req_ctx_wait(upriv, ctx);
+
+ state = ctx->state;
+ switch (state) {
+ case EZUSB_CTX_COMPLETE:
+ retval = ctx->outurb->status;
+ break;
+
+ case EZUSB_CTX_QUEUED:
+ case EZUSB_CTX_REQ_SUBMITTED:
+ if (!ctx->in_rid)
+ break;
+ default:
+ err("%s: Unexpected context state %d", __func__,
+ state);
+ /* fall though */
+ case EZUSB_CTX_REQ_TIMEOUT:
+ case EZUSB_CTX_REQ_FAILED:
+ case EZUSB_CTX_RESP_TIMEOUT:
+ case EZUSB_CTX_REQSUBMIT_FAIL:
+ printk(KERN_ERR PFX "Access failed, resetting (state %d,"
+ " reply_count %d)\n", state, upriv->reply_count);
+ upriv->reply_count = 0;
+ if (state == EZUSB_CTX_REQ_TIMEOUT
+ || state == EZUSB_CTX_RESP_TIMEOUT) {
+ printk(KERN_ERR PFX "ctx timed out\n");
+ retval = -ETIMEDOUT;
+ } else {
+ printk(KERN_ERR PFX "ctx failed\n");
+ retval = -EFAULT;
+ }
+ goto exit;
+ break;
+ }
+ if (ctx->in_rid) {
+ struct ezusb_packet *ans = ctx->buf;
+ int exp_len;
+
+ if (ans->hermes_len != 0)
+ exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12;
+ else
+ exp_len = 14;
+
+ if (exp_len != ctx->buf_length) {
+ err("%s: length mismatch for RID 0x%04x: "
+ "expected %d, got %d", __func__,
+ ctx->in_rid, exp_len, ctx->buf_length);
+ retval = -EIO;
+ goto exit;
+ }
+
+ if (ans_buff)
+ memcpy(ans_buff, ans->data,
+ min_t(int, exp_len, ans_size));
+ if (ans_length)
+ *ans_length = le16_to_cpu(ans->hermes_len);
+ }
+ exit:
+ ezusb_request_context_put(ctx);
+ return retval;
+}
+
+static int ezusb_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *data)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ u16 frame_type;
+ struct request_context *ctx;
+
+ if (length == 0)
+ return -EINVAL;
+
+ length = HERMES_RECLEN_TO_BYTES(length);
+
+ /* On memory mapped devices HERMES_RID_CNFGROUPADDRESSES can be
+ * set to be empty, but the USB bridge doesn't like it */
+ if (length == 0)
+ return 0;
+
+ ctx = ezusb_alloc_ctx(upriv, rid, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (rid == EZUSB_RID_TX)
+ frame_type = EZUSB_FRAME_DATA;
+ else
+ frame_type = EZUSB_FRAME_CONTROL;
+
+ return ezusb_access_ltv(upriv, ctx, length, data, frame_type,
+ NULL, 0, NULL);
+}
+
+static int ezusb_read_ltv(hermes_t *hw, int bap, u16 rid,
+ unsigned bufsize, u16 *length, void *buf)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ if ((bufsize < 0) || (bufsize % 2))
+ return -EINVAL;
+
+ ctx = ezusb_alloc_ctx(upriv, rid, rid);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL,
+ buf, bufsize, length);
+}
+
+static int ezusb_doicmd_wait(hermes_t *hw, u16 cmd, u16 parm0, u16 parm1,
+ u16 parm2, struct hermes_response *resp)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ __le16 data[4] = {
+ cpu_to_le16(cmd),
+ cpu_to_le16(parm0),
+ cpu_to_le16(parm1),
+ cpu_to_le16(parm2),
+ };
+ dbg("0x%04X, parm0 0x%04X, parm1 0x%04X, parm2 0x%04X",
+ cmd, parm0, parm1, parm2);
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ __le16 data[4] = {
+ cpu_to_le16(cmd),
+ cpu_to_le16(parm0),
+ 0,
+ 0,
+ };
+ dbg("0x%04X, parm0 0x%04X", cmd, parm0);
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_bap_pread(struct hermes *hw, int bap,
+ void *buf, int len, u16 id, u16 offset)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct ezusb_packet *ans = (void *) upriv->read_urb->transfer_buffer;
+ int actual_length = upriv->read_urb->actual_length;
+
+ if (id == EZUSB_RID_RX) {
+ if ((sizeof(*ans) + offset + len) > actual_length) {
+ printk(KERN_ERR PFX "BAP read beyond buffer end "
+ "in rx frame\n");
+ return -EINVAL;
+ }
+ memcpy(buf, ans->data + offset, len);
+ return 0;
+ }
+
+ if (EZUSB_IS_INFO(id)) {
+ /* Include 4 bytes for length/type */
+ if ((sizeof(*ans) + offset + len - 4) > actual_length) {
+ printk(KERN_ERR PFX "BAP read beyond buffer end "
+ "in info frame\n");
+ return -EFAULT;
+ }
+ memcpy(buf, ans->data + offset - 4, len);
+ } else {
+ printk(KERN_ERR PFX "Unexpected fid 0x%04x\n", id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ezusb_read_pda(struct hermes *hw, __le16 *pda,
+ u32 pda_addr, u16 pda_len)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+ __le16 data[] = {
+ cpu_to_le16(pda_addr & 0xffff),
+ cpu_to_le16(pda_len - 4)
+ };
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_READ_PDA, EZUSB_RID_READ_PDA);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* wl_lkm does not include PDA size in the PDA area.
+ * We will pad the information into pda, so other routines
+ * don't have to be modified */
+ pda[0] = cpu_to_le16(pda_len - 2);
+ /* Includes CFG_PROD_DATA but not itself */
+ pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, &pda[2], pda_len - 4,
+ NULL);
+}
+
+static int ezusb_program_init(struct hermes *hw, u32 entry_point)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+ __le32 data = cpu_to_le32(entry_point);
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_INIT, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_program_end(struct hermes *hw)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_END, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, 0, NULL,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_program_bytes(struct hermes *hw, const char *buf,
+ u32 addr, u32 len)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+ __le32 data = cpu_to_le32(addr);
+ int err;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_SET_ADDR, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+ if (err)
+ return err;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_BYTES, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, len, buf,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_program(struct hermes *hw, const char *buf,
+ u32 addr, u32 len)
+{
+ u32 ch_addr;
+ u32 ch_len;
+ int err = 0;
+
+ /* We can only send 2048 bytes out of the bulk xmit at a time,
+ * so we have to split any programming into chunks of <2048
+ * bytes. */
+
+ ch_len = (len < MAX_DL_SIZE) ? len : MAX_DL_SIZE;
+ ch_addr = addr;
+
+ while (ch_addr < (addr + len)) {
+ pr_debug("Programming subblock of length %d "
+ "to address 0x%08x. Data @ %p\n",
+ ch_len, ch_addr, &buf[ch_addr - addr]);
+
+ err = ezusb_program_bytes(hw, &buf[ch_addr - addr],
+ ch_addr, ch_len);
+ if (err)
+ break;
+
+ ch_addr += ch_len;
+ ch_len = ((addr + len - ch_addr) < MAX_DL_SIZE) ?
+ (addr + len - ch_addr) : MAX_DL_SIZE;
+ }
+
+ return err;
+}
+
+static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct orinoco_private *priv = ndev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ struct ezusb_priv *upriv = priv->card;
+ u8 mic[MICHAEL_MIC_LEN+1];
+ int err = 0;
+ int tx_control;
+ unsigned long flags;
+ struct request_context *ctx;
+ u8 *buf;
+ int tx_size;
+
+ if (!netif_running(dev)) {
+ printk(KERN_ERR "%s: Tx on stopped device!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (netif_queue_stopped(dev)) {
+ printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_ERR
+ "%s: ezusb_xmit() called while hw_unavailable\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!netif_carrier_ok(dev) ||
+ (priv->iw_mode == NL80211_IFTYPE_MONITOR)) {
+ /* Oops, the firmware hasn't established a connection,
+ silently drop the packet (this seems to be the
+ safest approach). */
+ goto drop;
+ }
+
+ /* Check packet length */
+ if (skb->len < ETH_HLEN)
+ goto drop;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
+ if (!ctx)
+ goto busy;
+
+ memset(ctx->buf, 0, BULK_BUF_SIZE);
+ buf = ctx->buf->data;
+
+ tx_control = 0;
+
+ err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
+ &mic[0]);
+ if (err)
+ goto drop;
+
+ {
+ __le16 *tx_cntl = (__le16 *)buf;
+ *tx_cntl = cpu_to_le16(tx_control);
+ buf += sizeof(*tx_cntl);
+ }
+
+ memcpy(buf, skb->data, skb->len);
+ buf += skb->len;
+
+ if (tx_control & HERMES_TXCTRL_MIC) {
+ u8 *m = mic;
+ /* Mic has been offset so it can be copied to an even
+ * address. We're copying eveything anyway, so we
+ * don't need to copy that first byte. */
+ if (skb->len % 2)
+ m++;
+ memcpy(buf, m, MICHAEL_MIC_LEN);
+ buf += MICHAEL_MIC_LEN;
+ }
+
+ /* Finally, we actually initiate the send */
+ netif_stop_queue(dev);
+
+ /* The card may behave better if we send evenly sized usb transfers */
+ tx_size = ALIGN(buf - ctx->buf->data, 2);
+
+ err = ezusb_access_ltv(upriv, ctx, tx_size, NULL,
+ EZUSB_FRAME_DATA, NULL, 0, NULL);
+
+ if (err) {
+ netif_start_queue(dev);
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Error %d transmitting packet\n",
+ dev->name, err);
+ goto busy;
+ }
+
+ dev->trans_start = jiffies;
+ stats->tx_bytes += skb->len;
+ goto ok;
+
+ drop:
+ stats->tx_errors++;
+ stats->tx_dropped++;
+
+ ok:
+ orinoco_unlock(priv, &flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+
+ busy:
+ orinoco_unlock(priv, &flags);
+ return NETDEV_TX_BUSY;
+}
+
+static int ezusb_allocate(struct hermes *hw, u16 size, u16 *fid)
+{
+ *fid = EZUSB_RID_TX;
+ return 0;
+}
+
+
+static int ezusb_hard_reset(struct orinoco_private *priv)
+{
+ struct ezusb_priv *upriv = priv->card;
+ int retval = ezusb_8051_cpucs(upriv, 1);
+
+ if (retval < 0) {
+ err("Failed to reset");
+ return retval;
+ }
+
+ retval = ezusb_8051_cpucs(upriv, 0);
+ if (retval < 0) {
+ err("Failed to unreset");
+ return retval;
+ }
+
+ dbg("sending control message");
+ retval = usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_TRIGER,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_OUT, 0x0, 0x0, NULL, 0,
+ DEF_TIMEOUT);
+ if (retval < 0) {
+ err("EZUSB_REQUEST_TRIGER failed retval %d", retval);
+ return retval;
+ }
+#if 0
+ dbg("Sending EZUSB_REQUEST_TRIG_AC");
+ retval = usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_TRIG_AC,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_OUT, 0x00FA, 0x0, NULL, 0,
+ DEF_TIMEOUT);
+ if (retval < 0) {
+ err("EZUSB_REQUEST_TRIG_AC failed retval %d", retval);
+ return retval;
+ }
+#endif
+
+ return 0;
+}
+
+
+static int ezusb_init(hermes_t *hw)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ int retval;
+
+ BUG_ON(in_interrupt());
+ BUG_ON(!upriv);
+
+ upriv->reply_count = 0;
+ /* Write the MAGIC number on the simulated registers to keep
+ * orinoco.c happy */
+ hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
+ hermes_write_regn(hw, RXFID, EZUSB_RID_RX);
+
+ usb_kill_urb(upriv->read_urb);
+ ezusb_submit_in_urb(upriv);
+
+ retval = ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1,
+ HERMES_BYTES_TO_RECLEN(2), "\x10\x00");
+ if (retval < 0) {
+ printk(KERN_ERR PFX "EZUSB_RID_INIT1 error %d\n", retval);
+ return retval;
+ }
+
+ retval = ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL);
+ if (retval < 0) {
+ printk(KERN_ERR PFX "HERMES_CMD_INIT error %d\n", retval);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void ezusb_bulk_in_callback(struct urb *urb)
+{
+ struct ezusb_priv *upriv = (struct ezusb_priv *) urb->context;
+ struct ezusb_packet *ans = urb->transfer_buffer;
+ u16 crc;
+ u16 hermes_rid;
+
+ if (upriv->udev == NULL) {
+ dbg("disconnected");
+ return;
+ }
+
+ if (urb->status == -ETIMEDOUT) {
+ /* When a device gets unplugged we get this every time
+ * we resubmit, flooding the logs. Since we don't use
+ * USB timeouts, it shouldn't happen any other time*/
+ pr_warning("%s: urb timed out, not resubmiting", __func__);
+ return;
+ }
+ if (urb->status == -ECONNABORTED) {
+ pr_warning("%s: connection abort, resubmiting urb",
+ __func__);
+ goto resubmit;
+ }
+ if ((urb->status == -EILSEQ)
+ || (urb->status == -ENOENT)
+ || (urb->status == -ECONNRESET)) {
+ dbg("status %d, not resubmiting", urb->status);
+ return;
+ }
+ if (urb->status)
+ dbg("status: %d length: %d",
+ urb->status, urb->actual_length);
+ if (urb->actual_length < sizeof(*ans)) {
+ err("%s: short read, ignoring", __func__);
+ goto resubmit;
+ }
+ crc = build_crc(ans);
+ if (le16_to_cpu(ans->crc) != crc) {
+ err("CRC error, ignoring packet");
+ goto resubmit;
+ }
+
+ hermes_rid = le16_to_cpu(ans->hermes_rid);
+ if ((hermes_rid != EZUSB_RID_RX) && !EZUSB_IS_INFO(hermes_rid)) {
+ ezusb_request_in_callback(upriv, urb);
+ } else if (upriv->dev) {
+ struct net_device *dev = upriv->dev;
+ struct orinoco_private *priv = ndev_priv(dev);
+ hermes_t *hw = &priv->hw;
+
+ if (hermes_rid == EZUSB_RID_RX) {
+ __orinoco_ev_rx(dev, hw);
+ } else {
+ hermes_write_regn(hw, INFOFID,
+ le16_to_cpu(ans->hermes_rid));
+ __orinoco_ev_info(dev, hw);
+ }
+ }
+
+ resubmit:
+ if (upriv->udev)
+ ezusb_submit_in_urb(upriv);
+}
+
+static inline void ezusb_delete(struct ezusb_priv *upriv)
+{
+ struct net_device *dev;
+ struct list_head *item;
+ struct list_head *tmp_item;
+ unsigned long flags;
+
+ BUG_ON(in_interrupt());
+ BUG_ON(!upriv);
+
+ dev = upriv->dev;
+ mutex_lock(&upriv->mtx);
+
+ upriv->udev = NULL; /* No timer will be rearmed from here */
+
+ usb_kill_urb(upriv->read_urb);
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ list_for_each_safe(item, tmp_item, &upriv->req_active) {
+ struct request_context *ctx;
+ int err;
+
+ ctx = list_entry(item, struct request_context, list);
+ atomic_inc(&ctx->refcount);
+
+ ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
+ err = usb_unlink_urb(ctx->outurb);
+
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ if (err == -EINPROGRESS)
+ wait_for_completion(&ctx->done);
+
+ del_timer_sync(&ctx->timer);
+ /* FIXME: there is an slight chance for the irq handler to
+ * be running */
+ if (!list_empty(&ctx->list))
+ ezusb_ctx_complete(ctx);
+
+ ezusb_request_context_put(ctx);
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ }
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ list_for_each_safe(item, tmp_item, &upriv->req_pending)
+ ezusb_ctx_complete(list_entry(item,
+ struct request_context, list));
+
+ if (upriv->read_urb->status == -EINPROGRESS)
+ printk(KERN_ERR PFX "Some URB in progress\n");
+
+ mutex_unlock(&upriv->mtx);
+
+ kfree(upriv->read_urb->transfer_buffer);
+ if (upriv->bap_buf != NULL)
+ kfree(upriv->bap_buf);
+ if (upriv->read_urb != NULL)
+ usb_free_urb(upriv->read_urb);
+ if (upriv->dev) {
+ struct orinoco_private *priv = ndev_priv(upriv->dev);
+ orinoco_if_del(priv);
+ free_orinocodev(priv);
+ }
+}
+
+static void ezusb_lock_irqsave(spinlock_t *lock,
+ unsigned long *flags) __acquires(lock)
+{
+ spin_lock_bh(lock);
+}
+
+static void ezusb_unlock_irqrestore(spinlock_t *lock,
+ unsigned long *flags) __releases(lock)
+{
+ spin_unlock_bh(lock);
+}
+
+static void ezusb_lock_irq(spinlock_t *lock) __acquires(lock)
+{
+ spin_lock_bh(lock);
+}
+
+static void ezusb_unlock_irq(spinlock_t *lock) __releases(lock)
+{
+ spin_unlock_bh(lock);
+}
+
+static const struct hermes_ops ezusb_ops = {
+ .init = ezusb_init,
+ .cmd_wait = ezusb_docmd_wait,
+ .init_cmd_wait = ezusb_doicmd_wait,
+ .allocate = ezusb_allocate,
+ .read_ltv = ezusb_read_ltv,
+ .write_ltv = ezusb_write_ltv,
+ .bap_pread = ezusb_bap_pread,
+ .read_pda = ezusb_read_pda,
+ .program_init = ezusb_program_init,
+ .program_end = ezusb_program_end,
+ .program = ezusb_program,
+ .lock_irqsave = ezusb_lock_irqsave,
+ .unlock_irqrestore = ezusb_unlock_irqrestore,
+ .lock_irq = ezusb_lock_irq,
+ .unlock_irq = ezusb_unlock_irq,
+};
+
+static const struct net_device_ops ezusb_netdev_ops = {
+ .ndo_open = orinoco_open,
+ .ndo_stop = orinoco_stop,
+ .ndo_start_xmit = ezusb_xmit,
+ .ndo_set_multicast_list = orinoco_set_multicast_list,
+ .ndo_change_mtu = orinoco_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = orinoco_tx_timeout,
+ .ndo_get_stats = orinoco_get_stats,
+};
+
+static int ezusb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct orinoco_private *priv;
+ hermes_t *hw;
+ struct ezusb_priv *upriv = NULL;
+ struct usb_interface_descriptor *iface_desc;
+ struct usb_endpoint_descriptor *ep;
+ const struct firmware *fw_entry;
+ int retval = 0;
+ int i;
+
+ priv = alloc_orinocodev(sizeof(*upriv), &udev->dev,
+ ezusb_hard_reset, NULL);
+ if (!priv) {
+ err("Couldn't allocate orinocodev");
+ goto exit;
+ }
+
+ hw = &priv->hw;
+
+ upriv = priv->card;
+
+ mutex_init(&upriv->mtx);
+ spin_lock_init(&upriv->reply_count_lock);
+
+ spin_lock_init(&upriv->req_lock);
+ INIT_LIST_HEAD(&upriv->req_pending);
+ INIT_LIST_HEAD(&upriv->req_active);
+
+ upriv->udev = udev;
+
+ hw->iobase = (void __force __iomem *) &upriv->hermes_reg_fake;
+ hw->reg_spacing = HERMES_16BIT_REGSPACING;
+ hw->priv = upriv;
+ hw->ops = &ezusb_ops;
+
+ /* set up the endpoint information */
+ /* check out the endpoints */
+
+ iface_desc = &interface->altsetting[0].desc;
+ for (i = 0; i < iface_desc->bNumEndpoints; ++i) {
+ ep = &interface->altsetting[0].endpoint[i].desc;
+
+ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ == USB_DIR_IN) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_BULK)) {
+ /* we found a bulk in endpoint */
+ if (upriv->read_urb != NULL) {
+ pr_warning("Found a second bulk in ep, ignored");
+ continue;
+ }
+
+ upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!upriv->read_urb) {
+ err("No free urbs available");
+ goto error;
+ }
+ if (le16_to_cpu(ep->wMaxPacketSize) != 64)
+ pr_warning("bulk in: wMaxPacketSize!= 64");
+ if (ep->bEndpointAddress != (2 | USB_DIR_IN))
+ pr_warning("bulk in: bEndpointAddress: %d",
+ ep->bEndpointAddress);
+ upriv->read_pipe = usb_rcvbulkpipe(udev,
+ ep->
+ bEndpointAddress);
+ upriv->read_urb->transfer_buffer =
+ kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
+ if (!upriv->read_urb->transfer_buffer) {
+ err("Couldn't allocate IN buffer");
+ goto error;
+ }
+ }
+
+ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ == USB_DIR_OUT) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_BULK)) {
+ /* we found a bulk out endpoint */
+ if (upriv->bap_buf != NULL) {
+ pr_warning("Found a second bulk out ep, ignored");
+ continue;
+ }
+
+ if (le16_to_cpu(ep->wMaxPacketSize) != 64)
+ pr_warning("bulk out: wMaxPacketSize != 64");
+ if (ep->bEndpointAddress != 2)
+ pr_warning("bulk out: bEndpointAddress: %d",
+ ep->bEndpointAddress);
+ upriv->write_pipe = usb_sndbulkpipe(udev,
+ ep->
+ bEndpointAddress);
+ upriv->bap_buf = kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
+ if (!upriv->bap_buf) {
+ err("Couldn't allocate bulk_out_buffer");
+ goto error;
+ }
+ }
+ }
+ if (!upriv->bap_buf || !upriv->read_urb) {
+ err("Didn't find the required bulk endpoints");
+ goto error;
+ }
+
+ if (request_firmware(&fw_entry, "orinoco_ezusb_fw",
+ &interface->dev) == 0) {
+ firmware.size = fw_entry->size;
+ firmware.code = fw_entry->data;
+ }
+ if (firmware.size && firmware.code) {
+ ezusb_firmware_download(upriv, &firmware);
+ } else {
+ err("No firmware to download");
+ goto error;
+ }
+
+ if (ezusb_hard_reset(priv) < 0) {
+ err("Cannot reset the device");
+ goto error;
+ }
+
+ /* If the firmware is already downloaded orinoco.c will call
+ * ezusb_init but if the firmware is not already there, that will make
+ * the kernel very unstable, so we try initializing here and quit in
+ * case of error */
+ if (ezusb_init(hw) < 0) {
+ err("Couldn't initialize the device");
+ err("Firmware may not be downloaded or may be wrong.");
+ goto error;
+ }
+
+ /* Initialise the main driver */
+ if (orinoco_init(priv) != 0) {
+ err("orinoco_init() failed\n");
+ goto error;
+ }
+
+ if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
+ upriv->dev = NULL;
+ err("%s: orinoco_if_add() failed", __func__);
+ goto error;
+ }
+ upriv->dev = priv->ndev;
+
+ goto exit;
+
+ error:
+ ezusb_delete(upriv);
+ if (upriv->dev) {
+ /* upriv->dev was 0, so ezusb_delete() didn't free it */
+ free_orinocodev(priv);
+ }
+ upriv = NULL;
+ retval = -EFAULT;
+ exit:
+ if (fw_entry) {
+ firmware.code = NULL;
+ firmware.size = 0;
+ release_firmware(fw_entry);
+ }
+ usb_set_intfdata(interface, upriv);
+ return retval;
+}
+
+
+static void ezusb_disconnect(struct usb_interface *intf)
+{
+ struct ezusb_priv *upriv = usb_get_intfdata(intf);
+ usb_set_intfdata(intf, NULL);
+ ezusb_delete(upriv);
+ printk(KERN_INFO PFX "Disconnected\n");
+}
+
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver orinoco_driver = {
+ .name = DRIVER_NAME,
+ .probe = ezusb_probe,
+ .disconnect = ezusb_disconnect,
+ .id_table = ezusb_table,
+};
+
+/* Can't be declared "const" or the whole __initdata section will
+ * become const */
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Manuel Estrada Sainz)";
+
+static int __init ezusb_module_init(void)
+{
+ int err;
+
+ printk(KERN_DEBUG "%s\n", version);
+
+ /* register this driver with the USB subsystem */
+ err = usb_register(&orinoco_driver);
+ if (err < 0) {
+ printk(KERN_ERR PFX "usb_register failed, error %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit ezusb_module_exit(void)
+{
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&orinoco_driver);
+}
+
+
+module_init(ezusb_module_init);
+module_exit(ezusb_module_exit);
+
+MODULE_AUTHOR("Manuel Estrada Sainz");
+MODULE_DESCRIPTION
+ ("Driver for Orinoco wireless LAN cards using EZUSB bridge");
+MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 330d42d..4300d9d 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -127,7 +127,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
- u8 *ie;
+ const u8 *ie;
u64 timestamp;
s32 signal;
u16 capability;
@@ -136,7 +136,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
int chan, freq;
ie_len = len - sizeof(*bss);
- ie = orinoco_get_ie(bss->data, ie_len, WLAN_EID_DS_PARAMS);
+ ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
chan = ie ? ie[2] : 0;
freq = ieee80211_dsss_chan_to_freq(chan);
channel = ieee80211_get_channel(wiphy, freq);
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index 41b9ce4..b51a9ad 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -337,6 +337,7 @@ spectrum_cs_config(struct pcmcia_device *link)
goto failed;
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
+ hw->eeprom_pda = true;
/*
* This actually configures the PCMCIA socket -- setting up
@@ -359,7 +360,7 @@ spectrum_cs_config(struct pcmcia_device *link)
/* Register an interface with the stack */
if (orinoco_if_add(priv, link->io.BasePort1,
- link->irq) != 0) {
+ link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
}
@@ -384,9 +385,9 @@ spectrum_cs_release(struct pcmcia_device *link)
/* We're committed to taking the device away now, so mark the
* hardware as unavailable */
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
pcmcia_disable_device(link);
if (priv->hw.iobase)
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index fbcc6e1..5775124 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -458,7 +458,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Fast channel change - no commit if successful */
hermes_t *hw = &priv->hw;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_SET_CHANNEL,
chan, NULL);
}
@@ -538,125 +538,6 @@ static int orinoco_ioctl_setsens(struct net_device *dev,
return -EINPROGRESS; /* Call commit handler */
}
-static int orinoco_ioctl_setrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rrq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int val = rrq->value;
- unsigned long flags;
-
- if (rrq->disabled)
- val = 2347;
-
- if ((val < 0) || (val > 2347))
- return -EINVAL;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- priv->rts_thresh = val;
- orinoco_unlock(priv, &flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int orinoco_ioctl_getrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rrq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
-
- rrq->value = priv->rts_thresh;
- rrq->disabled = (rrq->value == 2347);
- rrq->fixed = 1;
-
- return 0;
-}
-
-static int orinoco_ioctl_setfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *frq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int err = -EINPROGRESS; /* Call commit handler */
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if (priv->has_mwo) {
- if (frq->disabled)
- priv->mwo_robust = 0;
- else {
- if (frq->fixed)
- printk(KERN_WARNING "%s: Fixed fragmentation "
- "is not supported on this firmware. "
- "Using MWO robust instead.\n",
- dev->name);
- priv->mwo_robust = 1;
- }
- } else {
- if (frq->disabled)
- priv->frag_thresh = 2346;
- else {
- if ((frq->value < 256) || (frq->value > 2346))
- err = -EINVAL;
- else
- /* must be even */
- priv->frag_thresh = frq->value & ~0x1;
- }
- }
-
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_getfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *frq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
- int err;
- u16 val;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if (priv->has_mwo) {
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFMWOROBUST_AGERE,
- &val);
- if (err)
- val = 0;
-
- frq->value = val ? 2347 : 0;
- frq->disabled = !val;
- frq->fixed = 0;
- } else {
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
- &val);
- if (err)
- val = 0;
-
- frq->value = val;
- frq->disabled = (val >= 2346);
- frq->fixed = 1;
- }
-
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
static int orinoco_ioctl_setrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq,
@@ -1201,60 +1082,6 @@ static int orinoco_ioctl_set_mlme(struct net_device *dev,
return ret;
}
-static int orinoco_ioctl_getretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rrq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
- int err = 0;
- u16 short_limit, long_limit, lifetime;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
- &short_limit);
- if (err)
- goto out;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
- &long_limit);
- if (err)
- goto out;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
- &lifetime);
- if (err)
- goto out;
-
- rrq->disabled = 0; /* Can't be disabled */
-
- /* Note : by default, display the retry number */
- if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
- rrq->flags = IW_RETRY_LIFETIME;
- rrq->value = lifetime * 1000; /* ??? */
- } else {
- /* By default, display the min number */
- if ((rrq->flags & IW_RETRY_LONG)) {
- rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
- rrq->value = long_limit;
- } else {
- rrq->flags = IW_RETRY_LIMIT;
- rrq->value = short_limit;
- if (short_limit != long_limit)
- rrq->flags |= IW_RETRY_SHORT;
- }
- }
-
- out:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
static int orinoco_ioctl_reset(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
@@ -1446,8 +1273,8 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
- err = hermes_read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
- extra);
+ err = hw->ops->read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
+ extra);
if (err)
goto out;
@@ -1506,46 +1333,44 @@ static const struct iw_priv_args orinoco_privtab[] = {
* Structures to export the Wireless Handlers
*/
-#define STD_IW_HANDLER(id, func) \
- [IW_IOCTL_IDX(id)] = (iw_handler) func
static const iw_handler orinoco_handler[] = {
- STD_IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit),
- STD_IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname),
- STD_IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq),
- STD_IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq),
- STD_IW_HANDLER(SIOCSIWMODE, cfg80211_wext_siwmode),
- STD_IW_HANDLER(SIOCGIWMODE, cfg80211_wext_giwmode),
- STD_IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens),
- STD_IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens),
- STD_IW_HANDLER(SIOCGIWRANGE, cfg80211_wext_giwrange),
- STD_IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
- STD_IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
- STD_IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
- STD_IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
- STD_IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap),
- STD_IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap),
- STD_IW_HANDLER(SIOCSIWSCAN, cfg80211_wext_siwscan),
- STD_IW_HANDLER(SIOCGIWSCAN, cfg80211_wext_giwscan),
- STD_IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid),
- STD_IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid),
- STD_IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate),
- STD_IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate),
- STD_IW_HANDLER(SIOCSIWRTS, orinoco_ioctl_setrts),
- STD_IW_HANDLER(SIOCGIWRTS, orinoco_ioctl_getrts),
- STD_IW_HANDLER(SIOCSIWFRAG, orinoco_ioctl_setfrag),
- STD_IW_HANDLER(SIOCGIWFRAG, orinoco_ioctl_getfrag),
- STD_IW_HANDLER(SIOCGIWRETRY, orinoco_ioctl_getretry),
- STD_IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode),
- STD_IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode),
- STD_IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower),
- STD_IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower),
- STD_IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
- STD_IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
- STD_IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
- STD_IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
- STD_IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
- STD_IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
- STD_IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
+ IW_HANDLER(SIOCSIWCOMMIT, (iw_handler)orinoco_ioctl_commit),
+ IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
+ IW_HANDLER(SIOCSIWFREQ, (iw_handler)orinoco_ioctl_setfreq),
+ IW_HANDLER(SIOCGIWFREQ, (iw_handler)orinoco_ioctl_getfreq),
+ IW_HANDLER(SIOCSIWMODE, (iw_handler)cfg80211_wext_siwmode),
+ IW_HANDLER(SIOCGIWMODE, (iw_handler)cfg80211_wext_giwmode),
+ IW_HANDLER(SIOCSIWSENS, (iw_handler)orinoco_ioctl_setsens),
+ IW_HANDLER(SIOCGIWSENS, (iw_handler)orinoco_ioctl_getsens),
+ IW_HANDLER(SIOCGIWRANGE, (iw_handler)cfg80211_wext_giwrange),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWAP, (iw_handler)orinoco_ioctl_setwap),
+ IW_HANDLER(SIOCGIWAP, (iw_handler)orinoco_ioctl_getwap),
+ IW_HANDLER(SIOCSIWSCAN, (iw_handler)cfg80211_wext_siwscan),
+ IW_HANDLER(SIOCGIWSCAN, (iw_handler)cfg80211_wext_giwscan),
+ IW_HANDLER(SIOCSIWESSID, (iw_handler)orinoco_ioctl_setessid),
+ IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid),
+ IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate),
+ IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate),
+ IW_HANDLER(SIOCSIWRTS, (iw_handler)cfg80211_wext_siwrts),
+ IW_HANDLER(SIOCGIWRTS, (iw_handler)cfg80211_wext_giwrts),
+ IW_HANDLER(SIOCSIWFRAG, (iw_handler)cfg80211_wext_siwfrag),
+ IW_HANDLER(SIOCGIWFRAG, (iw_handler)cfg80211_wext_giwfrag),
+ IW_HANDLER(SIOCGIWRETRY, (iw_handler)cfg80211_wext_giwretry),
+ IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode),
+ IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode),
+ IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower),
+ IW_HANDLER(SIOCGIWPOWER, (iw_handler)orinoco_ioctl_getpower),
+ IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
+ IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
+ IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
+ IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
};
@@ -1553,15 +1378,15 @@ static const iw_handler orinoco_handler[] = {
Added typecasting since we no longer use iwreq_data -- Moustafa
*/
static const iw_handler orinoco_private_handler[] = {
- [0] = (iw_handler) orinoco_ioctl_reset,
- [1] = (iw_handler) orinoco_ioctl_reset,
- [2] = (iw_handler) orinoco_ioctl_setport3,
- [3] = (iw_handler) orinoco_ioctl_getport3,
- [4] = (iw_handler) orinoco_ioctl_setpreamble,
- [5] = (iw_handler) orinoco_ioctl_getpreamble,
- [6] = (iw_handler) orinoco_ioctl_setibssport,
- [7] = (iw_handler) orinoco_ioctl_getibssport,
- [9] = (iw_handler) orinoco_ioctl_getrid,
+ [0] = (iw_handler)orinoco_ioctl_reset,
+ [1] = (iw_handler)orinoco_ioctl_reset,
+ [2] = (iw_handler)orinoco_ioctl_setport3,
+ [3] = (iw_handler)orinoco_ioctl_getport3,
+ [4] = (iw_handler)orinoco_ioctl_setpreamble,
+ [5] = (iw_handler)orinoco_ioctl_getpreamble,
+ [6] = (iw_handler)orinoco_ioctl_setibssport,
+ [7] = (iw_handler)orinoco_ioctl_getibssport,
+ [9] = (iw_handler)orinoco_ioctl_getrid,
};
const struct iw_handler_def orinoco_handler_def = {
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index a7cb9eb..c072f41 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -546,7 +546,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_BEACON_FILTER |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index c24067f..07c4528 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -132,7 +132,7 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
- struct sk_buff **rx_buf)
+ struct sk_buff **rx_buf, u32 index)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
@@ -140,7 +140,7 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
idx = le32_to_cpu(ring_control->host_idx[ring_index]);
limit = idx;
- limit -= le32_to_cpu(ring_control->device_idx[ring_index]);
+ limit -= index;
limit = ring_limit - limit;
i = idx % ring_limit;
@@ -232,7 +232,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
i %= ring_limit;
}
- p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
+ p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
}
static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
@@ -445,10 +445,10 @@ static int p54p_open(struct ieee80211_hw *dev)
priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
- ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
+ ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
- ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
+ ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
P54P_READ(ring_control_base);
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 743a6c6..d5b197b 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -875,7 +875,6 @@ static void p54u_stop(struct ieee80211_hw *dev)
the hardware is still usable next time we want to start it.
until then, we just stop listening to the hardware.. */
p54u_free_urbs(dev);
- return;
}
static int __devinit p54u_probe(struct usb_interface *intf,
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 6605799..4e68910 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -38,7 +38,7 @@ static void p54_dump_tx_queue(struct p54_common *priv)
u32 largest_hole = 0, free;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
- printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) --- \n",
+ printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) ---\n",
wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue));
prev_addr = priv->rx_start;
@@ -350,7 +350,6 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
rx_status->flag |= RX_FLAG_MMIC_ERROR;
rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
- rx_status->noise = priv->noise;
if (hdr->rate & 0x10)
rx_status->flag |= RX_FLAG_SHORTPRE;
if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index a45818e..8d1190c 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -210,8 +210,6 @@ prism54_update_stats(struct work_struct *work)
priv->local_iwstatistics.discard.retries = r.u;
mutex_unlock(&priv->stats_lock);
-
- return;
}
struct iw_statistics *
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 689d59a..2c8cc95 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -228,14 +228,14 @@ islpci_interrupt(int irq, void *config)
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS,
- "IRQ: Identification register 0x%p 0x%x \n", device, reg);
+ "IRQ: Identification register 0x%p 0x%x\n", device, reg);
#endif
/* check for each bit in the register separately */
if (reg & ISL38XX_INT_IDENT_UPDATE) {
#if VERBOSE > SHOW_ERROR_MESSAGES
/* Queue has been updated */
- DEBUG(SHOW_TRACING, "IRQ: Update flag \n");
+ DEBUG(SHOW_TRACING, "IRQ: Update flag\n");
DEBUG(SHOW_QUEUE_INDEXES,
"CB drv Qs: [%i][%i][%i][%i][%i][%i]\n",
@@ -301,7 +301,7 @@ islpci_interrupt(int irq, void *config)
ISL38XX_CB_RX_DATA_LQ) != 0) {
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
- "Received frame in Data Low Queue \n");
+ "Received frame in Data Low Queue\n");
#endif
islpci_eth_receive(priv);
}
@@ -326,7 +326,7 @@ islpci_interrupt(int irq, void *config)
/* Device has been initialized */
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
- "IRQ: Init flag, device initialized \n");
+ "IRQ: Init flag, device initialized\n");
#endif
wake_up(&priv->reset_done);
}
@@ -334,7 +334,7 @@ islpci_interrupt(int irq, void *config)
if (reg & ISL38XX_INT_IDENT_SLEEP) {
/* Device intends to move to powersave state */
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "IRQ: Sleep flag \n");
+ DEBUG(SHOW_TRACING, "IRQ: Sleep flag\n");
#endif
isl38xx_handle_sleep_request(priv->control_block,
&powerstate,
@@ -344,7 +344,7 @@ islpci_interrupt(int irq, void *config)
if (reg & ISL38XX_INT_IDENT_WAKEUP) {
/* Device has been woken up to active state */
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "IRQ: Wakeup flag \n");
+ DEBUG(SHOW_TRACING, "IRQ: Wakeup flag\n");
#endif
isl38xx_handle_wakeup(priv->control_block,
@@ -635,7 +635,7 @@ islpci_alloc_memory(islpci_private *priv)
ioremap(pci_resource_start(priv->pdev, 0),
ISL38XX_PCI_MEM_SIZE))) {
/* error in remapping the PCI device memory address range */
- printk(KERN_ERR "PCI memory remapping failed \n");
+ printk(KERN_ERR "PCI memory remapping failed\n");
return -1;
}
@@ -902,7 +902,7 @@ islpci_setup(struct pci_dev *pdev)
if (register_netdev(ndev)) {
DEBUG(SHOW_ERROR_MESSAGES,
- "ERROR: register_netdev() failed \n");
+ "ERROR: register_netdev() failed\n");
goto do_islpci_free_memory;
}
@@ -946,7 +946,7 @@ islpci_set_state(islpci_private *priv, islpci_state_t new_state)
if (!priv->state_off)
priv->state = new_state;
break;
- };
+ }
#if 0
printk(KERN_DEBUG "%s: state transition %d -> %d (off#%d)\n",
priv->ndev->name, old_state, new_state, priv->state_off);
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index ac99eaa..2fc52bc 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -90,7 +90,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
u32 curr_frag;
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
#endif
/* lock the driver code */
@@ -141,7 +141,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
}
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "memmove %p %p %i \n", skb->data,
+ DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
src, skb->len);
#endif
} else {
@@ -224,8 +224,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
priv->data_low_tx_full = 1;
}
- /* set the transmission time */
- ndev->trans_start = jiffies;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
@@ -320,7 +318,7 @@ islpci_eth_receive(islpci_private *priv)
int discard = 0;
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
#endif
/* the device has written an Ethernet frame in the data area
@@ -432,7 +430,7 @@ islpci_eth_receive(islpci_private *priv)
skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
if (unlikely(skb == NULL)) {
/* error allocating an sk_buff structure elements */
- DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb \n");
+ DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
break;
}
skb_reserve(skb, (4 - (long) skb->data) & 0x03);
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index adb2897..a5224f6 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -114,7 +114,7 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
#endif
while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
@@ -212,7 +212,7 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
{
pimfor_header_t *h = buf.mem;
DEBUG(SHOW_PIMFOR_FRAMES,
- "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x \n",
+ "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
h->operation, oid, h->device_id, h->flags, length);
/* display the buffer contents for debugging */
@@ -280,7 +280,7 @@ islpci_mgt_receive(struct net_device *ndev)
u32 curr_frag;
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n");
#endif
/* Only once per interrupt, determine fragment range to
@@ -339,7 +339,7 @@ islpci_mgt_receive(struct net_device *ndev)
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_PIMFOR_FRAMES,
- "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
+ "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
header->operation, header->oid, header->device_id,
header->flags, header->length);
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index d66933d..9b796ca 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -820,7 +820,7 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
for (i = 0; i < list->nr; i++)
k += snprintf(str + k, PRIV_STR_SIZE - k,
- "bss[%u] : \nage=%u\nchannel=%u\n"
+ "bss[%u] :\nage=%u\nchannel=%u\n"
"capinfo=0x%X\nrates=0x%X\n"
"basic_rates=0x%X\n",
i, list->bsslist[i].age,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index f7d2a34..abff893 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -546,7 +546,7 @@ static int ray_init(struct net_device *dev)
local->fw_ver = local->startup_res.firmware_version[0];
local->fw_bld = local->startup_res.firmware_version[1];
local->fw_var = local->startup_res.firmware_version[2];
- dev_dbg(&link->dev, "ray_init firmware version %d.%d \n", local->fw_ver,
+ dev_dbg(&link->dev, "ray_init firmware version %d.%d\n", local->fw_ver,
local->fw_bld);
local->tib_length = 0x20;
@@ -726,8 +726,6 @@ static void verify_dl_startup(u_long data)
start_net((u_long) local);
else
join_net((u_long) local);
-
- return;
} /* end verify_dl_startup */
/*===========================================================================*/
@@ -755,7 +753,6 @@ static void start_net(u_long data)
return;
}
local->card_status = CARD_DOING_ACQ;
- return;
} /* end start_net */
/*===========================================================================*/
@@ -786,7 +783,6 @@ static void join_net(u_long data)
return;
}
local->card_status = CARD_DOING_ACQ;
- return;
}
/*============================================================================
@@ -932,7 +928,6 @@ static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
case XMIT_MSG_BAD:
case XMIT_OK:
default:
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
}
@@ -1103,10 +1098,10 @@ static const struct ethtool_ops netdev_ethtool_ops = {
/*
* Wireless Handler : get protocol name
*/
-static int ray_get_name(struct net_device *dev,
- struct iw_request_info *info, char *cwrq, char *extra)
+static int ray_get_name(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- strcpy(cwrq, "IEEE 802.11-FH");
+ strcpy(wrqu->name, "IEEE 802.11-FH");
return 0;
}
@@ -1114,9 +1109,8 @@ static int ray_get_name(struct net_device *dev,
/*
* Wireless Handler : set frequency
*/
-static int ray_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *fwrq, char *extra)
+static int ray_set_freq(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
@@ -1126,10 +1120,10 @@ static int ray_set_freq(struct net_device *dev,
return -EBUSY;
/* Setting by channel number */
- if ((fwrq->m > USA_HOP_MOD) || (fwrq->e > 0))
+ if ((wrqu->freq.m > USA_HOP_MOD) || (wrqu->freq.e > 0))
err = -EOPNOTSUPP;
else
- local->sparm.b5.a_hop_pattern = fwrq->m;
+ local->sparm.b5.a_hop_pattern = wrqu->freq.m;
return err;
}
@@ -1138,14 +1132,13 @@ static int ray_set_freq(struct net_device *dev,
/*
* Wireless Handler : get frequency
*/
-static int ray_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *fwrq, char *extra)
+static int ray_get_freq(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- fwrq->m = local->sparm.b5.a_hop_pattern;
- fwrq->e = 0;
+ wrqu->freq.m = local->sparm.b5.a_hop_pattern;
+ wrqu->freq.e = 0;
return 0;
}
@@ -1153,9 +1146,8 @@ static int ray_get_freq(struct net_device *dev,
/*
* Wireless Handler : set ESSID
*/
-static int ray_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+static int ray_set_essid(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
@@ -1164,19 +1156,17 @@ static int ray_set_essid(struct net_device *dev,
return -EBUSY;
/* Check if we asked for `any' */
- if (dwrq->flags == 0) {
+ if (wrqu->essid.flags == 0)
/* Corey : can you do that ? */
return -EOPNOTSUPP;
- } else {
- /* Check the size of the string */
- if (dwrq->length > IW_ESSID_MAX_SIZE) {
- return -E2BIG;
- }
- /* Set the ESSID in the card */
- memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
- memcpy(local->sparm.b5.a_current_ess_id, extra, dwrq->length);
- }
+ /* Check the size of the string */
+ if (wrqu->essid.length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+
+ /* Set the ESSID in the card */
+ memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
+ memcpy(local->sparm.b5.a_current_ess_id, extra, wrqu->essid.length);
return -EINPROGRESS; /* Call commit handler */
}
@@ -1185,9 +1175,8 @@ static int ray_set_essid(struct net_device *dev,
/*
* Wireless Handler : get ESSID
*/
-static int ray_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+static int ray_get_essid(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
@@ -1195,8 +1184,8 @@ static int ray_get_essid(struct net_device *dev,
memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
/* Push it out ! */
- dwrq->length = strlen(extra);
- dwrq->flags = 1; /* active */
+ wrqu->essid.length = strlen(extra);
+ wrqu->essid.flags = 1; /* active */
return 0;
}
@@ -1205,14 +1194,13 @@ static int ray_get_essid(struct net_device *dev,
/*
* Wireless Handler : get AP address
*/
-static int ray_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *awrq, char *extra)
+static int ray_get_wap(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- memcpy(awrq->sa_data, local->bss_id, ETH_ALEN);
- awrq->sa_family = ARPHRD_ETHER;
+ memcpy(wrqu->ap_addr.sa_data, local->bss_id, ETH_ALEN);
+ wrqu->ap_addr.sa_family = ARPHRD_ETHER;
return 0;
}
@@ -1221,9 +1209,8 @@ static int ray_get_wap(struct net_device *dev,
/*
* Wireless Handler : set Bit-Rate
*/
-static int ray_set_rate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_set_rate(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
@@ -1232,15 +1219,15 @@ static int ray_set_rate(struct net_device *dev,
return -EBUSY;
/* Check if rate is in range */
- if ((vwrq->value != 1000000) && (vwrq->value != 2000000))
+ if ((wrqu->bitrate.value != 1000000) && (wrqu->bitrate.value != 2000000))
return -EINVAL;
/* Hack for 1.5 Mb/s instead of 2 Mb/s */
if ((local->fw_ver == 0x55) && /* Please check */
- (vwrq->value == 2000000))
+ (wrqu->bitrate.value == 2000000))
local->net_default_tx_rate = 3;
else
- local->net_default_tx_rate = vwrq->value / 500000;
+ local->net_default_tx_rate = wrqu->bitrate.value / 500000;
return 0;
}
@@ -1249,17 +1236,16 @@ static int ray_set_rate(struct net_device *dev,
/*
* Wireless Handler : get Bit-Rate
*/
-static int ray_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_get_rate(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
if (local->net_default_tx_rate == 3)
- vwrq->value = 2000000; /* Hum... */
+ wrqu->bitrate.value = 2000000; /* Hum... */
else
- vwrq->value = local->net_default_tx_rate * 500000;
- vwrq->fixed = 0; /* We are in auto mode */
+ wrqu->bitrate.value = local->net_default_tx_rate * 500000;
+ wrqu->bitrate.fixed = 0; /* We are in auto mode */
return 0;
}
@@ -1268,19 +1254,18 @@ static int ray_get_rate(struct net_device *dev,
/*
* Wireless Handler : set RTS threshold
*/
-static int ray_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_set_rts(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- int rthr = vwrq->value;
+ int rthr = wrqu->rts.value;
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* if(wrq->u.rts.fixed == 0) we should complain */
- if (vwrq->disabled)
+ if (wrqu->rts.disabled)
rthr = 32767;
else {
if ((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
@@ -1296,16 +1281,15 @@ static int ray_set_rts(struct net_device *dev,
/*
* Wireless Handler : get RTS threshold
*/
-static int ray_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_get_rts(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- vwrq->value = (local->sparm.b5.a_rts_threshold[0] << 8)
+ wrqu->rts.value = (local->sparm.b5.a_rts_threshold[0] << 8)
+ local->sparm.b5.a_rts_threshold[1];
- vwrq->disabled = (vwrq->value == 32767);
- vwrq->fixed = 1;
+ wrqu->rts.disabled = (wrqu->rts.value == 32767);
+ wrqu->rts.fixed = 1;
return 0;
}
@@ -1314,19 +1298,18 @@ static int ray_get_rts(struct net_device *dev,
/*
* Wireless Handler : set Fragmentation threshold
*/
-static int ray_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_set_frag(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- int fthr = vwrq->value;
+ int fthr = wrqu->frag.value;
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* if(wrq->u.frag.fixed == 0) should complain */
- if (vwrq->disabled)
+ if (wrqu->frag.disabled)
fthr = 32767;
else {
if ((fthr < 256) || (fthr > 2347)) /* To check out ! */
@@ -1342,16 +1325,15 @@ static int ray_set_frag(struct net_device *dev,
/*
* Wireless Handler : get Fragmentation threshold
*/
-static int ray_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_get_frag(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- vwrq->value = (local->sparm.b5.a_frag_threshold[0] << 8)
+ wrqu->frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
+ local->sparm.b5.a_frag_threshold[1];
- vwrq->disabled = (vwrq->value == 32767);
- vwrq->fixed = 1;
+ wrqu->frag.disabled = (wrqu->frag.value == 32767);
+ wrqu->frag.fixed = 1;
return 0;
}
@@ -1360,8 +1342,8 @@ static int ray_get_frag(struct net_device *dev,
/*
* Wireless Handler : set Mode of Operation
*/
-static int ray_set_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq, char *extra)
+static int ray_set_mode(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
@@ -1371,7 +1353,7 @@ static int ray_set_mode(struct net_device *dev,
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
- switch (*uwrq) {
+ switch (wrqu->mode) {
case IW_MODE_ADHOC:
card_mode = 0;
/* Fall through */
@@ -1389,15 +1371,15 @@ static int ray_set_mode(struct net_device *dev,
/*
* Wireless Handler : get Mode of Operation
*/
-static int ray_get_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq, char *extra)
+static int ray_get_mode(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
if (local->sparm.b5.a_network_type)
- *uwrq = IW_MODE_INFRA;
+ wrqu->mode = IW_MODE_INFRA;
else
- *uwrq = IW_MODE_ADHOC;
+ wrqu->mode = IW_MODE_ADHOC;
return 0;
}
@@ -1406,16 +1388,15 @@ static int ray_get_mode(struct net_device *dev,
/*
* Wireless Handler : get range info
*/
-static int ray_get_range(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+static int ray_get_range(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
- memset((char *)range, 0, sizeof(struct iw_range));
+ memset(range, 0, sizeof(struct iw_range));
/* Set the length (very important for backward compatibility) */
- dwrq->length = sizeof(struct iw_range);
+ wrqu->data.length = sizeof(struct iw_range);
/* Set the Wireless Extension versions */
range->we_version_compiled = WIRELESS_EXT;
@@ -1438,8 +1419,7 @@ static int ray_get_range(struct net_device *dev,
/*
* Wireless Private Handler : set framing mode
*/
-static int ray_set_framing(struct net_device *dev,
- struct iw_request_info *info,
+static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
translate = *(extra); /* Set framing mode */
@@ -1451,8 +1431,7 @@ static int ray_set_framing(struct net_device *dev,
/*
* Wireless Private Handler : get framing mode
*/
-static int ray_get_framing(struct net_device *dev,
- struct iw_request_info *info,
+static int ray_get_framing(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
*(extra) = translate;
@@ -1464,8 +1443,7 @@ static int ray_get_framing(struct net_device *dev,
/*
* Wireless Private Handler : get country
*/
-static int ray_get_country(struct net_device *dev,
- struct iw_request_info *info,
+static int ray_get_country(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
*(extra) = country;
@@ -1477,10 +1455,9 @@ static int ray_get_country(struct net_device *dev,
/*
* Commit handler : called after a bunch of SET operations
*/
-static int ray_commit(struct net_device *dev, struct iw_request_info *info, /* NULL */
- void *zwrq, /* NULL */
- char *extra)
-{ /* NULL */
+static int ray_commit(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
return 0;
}
@@ -1521,28 +1498,28 @@ static iw_stats *ray_get_wireless_stats(struct net_device *dev)
*/
static const iw_handler ray_handler[] = {
- [SIOCSIWCOMMIT - SIOCIWFIRST] = (iw_handler) ray_commit,
- [SIOCGIWNAME - SIOCIWFIRST] = (iw_handler) ray_get_name,
- [SIOCSIWFREQ - SIOCIWFIRST] = (iw_handler) ray_set_freq,
- [SIOCGIWFREQ - SIOCIWFIRST] = (iw_handler) ray_get_freq,
- [SIOCSIWMODE - SIOCIWFIRST] = (iw_handler) ray_set_mode,
- [SIOCGIWMODE - SIOCIWFIRST] = (iw_handler) ray_get_mode,
- [SIOCGIWRANGE - SIOCIWFIRST] = (iw_handler) ray_get_range,
+ IW_HANDLER(SIOCSIWCOMMIT, ray_commit),
+ IW_HANDLER(SIOCGIWNAME, ray_get_name),
+ IW_HANDLER(SIOCSIWFREQ, ray_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, ray_get_freq),
+ IW_HANDLER(SIOCSIWMODE, ray_set_mode),
+ IW_HANDLER(SIOCGIWMODE, ray_get_mode),
+ IW_HANDLER(SIOCGIWRANGE, ray_get_range),
#ifdef WIRELESS_SPY
- [SIOCSIWSPY - SIOCIWFIRST] = (iw_handler) iw_handler_set_spy,
- [SIOCGIWSPY - SIOCIWFIRST] = (iw_handler) iw_handler_get_spy,
- [SIOCSIWTHRSPY - SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy,
- [SIOCGIWTHRSPY - SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy,
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
#endif /* WIRELESS_SPY */
- [SIOCGIWAP - SIOCIWFIRST] = (iw_handler) ray_get_wap,
- [SIOCSIWESSID - SIOCIWFIRST] = (iw_handler) ray_set_essid,
- [SIOCGIWESSID - SIOCIWFIRST] = (iw_handler) ray_get_essid,
- [SIOCSIWRATE - SIOCIWFIRST] = (iw_handler) ray_set_rate,
- [SIOCGIWRATE - SIOCIWFIRST] = (iw_handler) ray_get_rate,
- [SIOCSIWRTS - SIOCIWFIRST] = (iw_handler) ray_set_rts,
- [SIOCGIWRTS - SIOCIWFIRST] = (iw_handler) ray_get_rts,
- [SIOCSIWFRAG - SIOCIWFIRST] = (iw_handler) ray_set_frag,
- [SIOCGIWFRAG - SIOCIWFIRST] = (iw_handler) ray_get_frag,
+ IW_HANDLER(SIOCGIWAP, ray_get_wap),
+ IW_HANDLER(SIOCSIWESSID, ray_set_essid),
+ IW_HANDLER(SIOCGIWESSID, ray_get_essid),
+ IW_HANDLER(SIOCSIWRATE, ray_set_rate),
+ IW_HANDLER(SIOCGIWRATE, ray_get_rate),
+ IW_HANDLER(SIOCSIWRTS, ray_set_rts),
+ IW_HANDLER(SIOCGIWRTS, ray_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, ray_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, ray_get_frag),
};
#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
@@ -1550,9 +1527,9 @@ static const iw_handler ray_handler[] = {
#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
static const iw_handler ray_private_handler[] = {
- [0] = (iw_handler) ray_set_framing,
- [1] = (iw_handler) ray_get_framing,
- [3] = (iw_handler) ray_get_country,
+ [0] = ray_set_framing,
+ [1] = ray_get_framing,
+ [3] = ray_get_country,
};
static const struct iw_priv_args ray_private_args[] = {
@@ -1636,7 +1613,6 @@ static int ray_dev_close(struct net_device *dev)
static void ray_reset(struct net_device *dev)
{
pr_debug("ray_reset entered\n");
- return;
}
/*===========================================================================*/
@@ -1883,17 +1859,17 @@ static void ray_update_multi_list(struct net_device *dev, int all)
writeb(0xff, &pccs->var);
local->num_multi = 0xff;
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i = 0;
/* Copy the kernel's list of MC addresses to card */
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy_toio(p, dmi->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy_toio(p, ha->addr, ETH_ALEN);
dev_dbg(&link->dev,
"ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
- dmi->dmi_addr[0], dmi->dmi_addr[1],
- dmi->dmi_addr[2], dmi->dmi_addr[3],
- dmi->dmi_addr[4], dmi->dmi_addr[5]);
+ ha->addr[0], ha->addr[1],
+ ha->addr[2], ha->addr[3],
+ ha->addr[4], ha->addr[5]);
p += ETH_ALEN;
i++;
}
@@ -2242,7 +2218,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
(dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
FCS_LEN)) {
pr_debug(
- "ray_cs invalid packet length %d received \n",
+ "ray_cs invalid packet length %d received\n",
rx_len);
return;
}
@@ -2253,7 +2229,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
(dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
FCS_LEN)) {
pr_debug(
- "ray_cs invalid packet length %d received \n",
+ "ray_cs invalid packet length %d received\n",
rx_len);
return;
}
@@ -2761,11 +2737,11 @@ static int ray_cs_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Hop dwell = %d Kus\n",
pfh->dwell_time[0] +
256 * pfh->dwell_time[1]);
- seq_printf(m, "Hop set = %d \n",
+ seq_printf(m, "Hop set = %d\n",
pfh->hop_set);
- seq_printf(m, "Hop pattern = %d \n",
+ seq_printf(m, "Hop pattern = %d\n",
pfh->hop_pattern);
- seq_printf(m, "Hop index = %d \n",
+ seq_printf(m, "Hop index = %d\n",
pfh->hop_index);
p += p[1] + 2;
} else {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 1de5b22..2d28908 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -118,6 +118,7 @@ MODULE_PARM_DESC(workaround_interval,
#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d)
#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e)
#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f)
+#define OID_802_11_CAPABILITY cpu_to_le32(0x0d010122)
#define OID_802_11_PMKID cpu_to_le32(0x0d010123)
#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203)
#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204)
@@ -359,6 +360,30 @@ struct ndis_80211_assoc_info {
__le32 offset_resp_ies;
} __attribute__((packed));
+struct ndis_80211_auth_encr_pair {
+ __le32 auth_mode;
+ __le32 encr_mode;
+} __attribute__((packed));
+
+struct ndis_80211_capability {
+ __le32 length;
+ __le32 version;
+ __le32 num_pmkids;
+ __le32 num_auth_encr_pair;
+ struct ndis_80211_auth_encr_pair auth_encr_pair[0];
+} __attribute__((packed));
+
+struct ndis_80211_bssid_info {
+ u8 bssid[6];
+ u8 pmkid[16];
+};
+
+struct ndis_80211_pmkid {
+ __le32 length;
+ __le32 bssid_info_count;
+ struct ndis_80211_bssid_info bssid_info[0];
+};
+
/*
* private data
*/
@@ -477,13 +502,7 @@ struct rndis_wlan_private {
/* encryption stuff */
int encr_tx_key_index;
struct rndis_wlan_encr_key encr_keys[4];
- enum nl80211_auth_type wpa_auth_type;
int wpa_version;
- int wpa_keymgmt;
- int wpa_ie_len;
- u8 *wpa_ie;
- int wpa_cipher_pair;
- int wpa_cipher_group;
u8 command_buffer[COMMAND_BUFFER_SIZE];
};
@@ -516,7 +535,7 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev);
-static int rndis_set_channel(struct wiphy *wiphy,
+static int rndis_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, enum nl80211_channel_type channel_type);
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
@@ -535,6 +554,14 @@ static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo);
+static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa);
+
+static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa);
+
+static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev);
+
static struct cfg80211_ops rndis_config_ops = {
.change_virtual_intf = rndis_change_virtual_intf,
.scan = rndis_scan,
@@ -551,6 +578,9 @@ static struct cfg80211_ops rndis_config_ops = {
.set_default_key = rndis_set_default_key,
.get_station = rndis_get_station,
.dump_station = rndis_dump_station,
+ .set_pmksa = rndis_set_pmksa,
+ .del_pmksa = rndis_del_pmksa,
+ .flush_pmksa = rndis_flush_pmksa,
};
static void *rndis_wiphy_privid = &rndis_wiphy_privid;
@@ -705,6 +735,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
struct rndis_query_c *get_c;
} u;
int ret, buflen;
+ int resplen, respoffs, copylen;
buflen = *len + sizeof(*u.get);
if (buflen < CONTROL_BUFFER_SIZE)
@@ -734,11 +765,34 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
le32_to_cpu(u.get_c->status));
if (ret == 0) {
- memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
+ resplen = le32_to_cpu(u.get_c->len);
+ respoffs = le32_to_cpu(u.get_c->offset) + 8;
- ret = le32_to_cpu(u.get_c->len);
- if (ret > *len)
- *len = ret;
+ if (respoffs > buflen) {
+ /* Device returned data offset outside buffer, error. */
+ netdev_dbg(dev->net, "%s(%s): received invalid "
+ "data offset: %d > %d\n", __func__,
+ oid_to_string(oid), respoffs, buflen);
+
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ if ((resplen + respoffs) > buflen) {
+ /* Device would have returned more data if buffer would
+ * have been big enough. Copy just the bits that we got.
+ */
+ copylen = buflen - respoffs;
+ } else {
+ copylen = resplen;
+ }
+
+ if (copylen > *len)
+ copylen = *len;
+
+ memcpy(data, u.buf + respoffs, copylen);
+
+ *len = resplen;
ret = rndis_error_status(u.get_c->status);
if (ret < 0)
@@ -747,6 +801,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
le32_to_cpu(u.get_c->status), ret);
}
+exit_unlock:
mutex_unlock(&priv->command_lock);
if (u.buf != priv->command_buffer)
@@ -1092,8 +1147,6 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
}
priv->wpa_version = wpa_version;
- priv->wpa_auth_type = auth_type;
- priv->wpa_keymgmt = keymgmt;
return 0;
}
@@ -1118,7 +1171,6 @@ static int set_priv_filter(struct usbnet *usbdev)
static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tmp;
int encr_mode, ret;
@@ -1147,8 +1199,6 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
return ret;
}
- priv->wpa_cipher_pair = pairwise;
- priv->wpa_cipher_group = groupwise;
return 0;
}
@@ -1496,7 +1546,7 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
static void set_multicast_list(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
__le32 filter, basefilter;
int ret;
char *mc_addrs = NULL;
@@ -1535,9 +1585,9 @@ static void set_multicast_list(struct usbnet *usbdev)
return;
}
- netdev_for_each_mc_addr(mclist, usbdev->net)
+ netdev_for_each_mc_addr(ha, usbdev->net)
memcpy(mc_addrs + i++ * ETH_ALEN,
- mclist->dmi_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
}
netif_addr_unlock_bh(usbdev->net);
@@ -1569,6 +1619,194 @@ set_filter:
le32_to_cpu(filter), ret);
}
+#ifdef DEBUG
+static void debug_print_pmkids(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ const char *func_str)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ int i, len, count, max_pmkids, entry_len;
+
+ max_pmkids = priv->wdev.wiphy->max_num_pmkids;
+ len = le32_to_cpu(pmkids->length);
+ count = le32_to_cpu(pmkids->bssid_info_count);
+
+ entry_len = (count > 0) ? (len - sizeof(*pmkids)) / count : -1;
+
+ netdev_dbg(usbdev->net, "%s(): %d PMKIDs (data len: %d, entry len: "
+ "%d)\n", func_str, count, len, entry_len);
+
+ if (count > max_pmkids)
+ count = max_pmkids;
+
+ for (i = 0; i < count; i++) {
+ u32 *tmp = (u32 *)pmkids->bssid_info[i].pmkid;
+
+ netdev_dbg(usbdev->net, "%s(): bssid: %pM, "
+ "pmkid: %08X:%08X:%08X:%08X\n",
+ func_str, pmkids->bssid_info[i].bssid,
+ cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
+ }
+}
+#else
+static void debug_print_pmkids(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ const char *func_str)
+{
+ return;
+}
+#endif
+
+static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ struct ndis_80211_pmkid *pmkids;
+ int len, ret, max_pmkids;
+
+ max_pmkids = priv->wdev.wiphy->max_num_pmkids;
+ len = sizeof(*pmkids) + max_pmkids * sizeof(pmkids->bssid_info[0]);
+
+ pmkids = kzalloc(len, GFP_KERNEL);
+ if (!pmkids)
+ return ERR_PTR(-ENOMEM);
+
+ pmkids->length = cpu_to_le32(len);
+ pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
+
+ ret = rndis_query_oid(usbdev, OID_802_11_PMKID, pmkids, &len);
+ if (ret < 0) {
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d)"
+ " -> %d\n", __func__, len, max_pmkids, ret);
+
+ kfree(pmkids);
+ return ERR_PTR(ret);
+ }
+
+ if (le32_to_cpu(pmkids->bssid_info_count) > max_pmkids)
+ pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
+
+ debug_print_pmkids(usbdev, pmkids, __func__);
+
+ return pmkids;
+}
+
+static int set_device_pmkids(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids)
+{
+ int ret, len, num_pmkids;
+
+ num_pmkids = le32_to_cpu(pmkids->bssid_info_count);
+ len = sizeof(*pmkids) + num_pmkids * sizeof(pmkids->bssid_info[0]);
+ pmkids->length = cpu_to_le32(len);
+
+ debug_print_pmkids(usbdev, pmkids, __func__);
+
+ ret = rndis_set_oid(usbdev, OID_802_11_PMKID, pmkids,
+ le32_to_cpu(pmkids->length));
+ if (ret < 0) {
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d) -> %d"
+ "\n", __func__, len, num_pmkids, ret);
+ }
+
+ kfree(pmkids);
+ return ret;
+}
+
+static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ struct cfg80211_pmksa *pmksa,
+ int max_pmkids)
+{
+ int i, len, count, newlen, err;
+
+ len = le32_to_cpu(pmkids->length);
+ count = le32_to_cpu(pmkids->bssid_info_count);
+
+ if (count > max_pmkids)
+ count = max_pmkids;
+
+ for (i = 0; i < count; i++)
+ if (!compare_ether_addr(pmkids->bssid_info[i].bssid,
+ pmksa->bssid))
+ break;
+
+ /* pmkid not found */
+ if (i == count) {
+ netdev_dbg(usbdev->net, "%s(): bssid not found (%pM)\n",
+ __func__, pmksa->bssid);
+ err = -ENOENT;
+ goto error;
+ }
+
+ for (; i + 1 < count; i++)
+ pmkids->bssid_info[i] = pmkids->bssid_info[i + 1];
+
+ count--;
+ newlen = sizeof(*pmkids) + count * sizeof(pmkids->bssid_info[0]);
+
+ pmkids->length = cpu_to_le32(newlen);
+ pmkids->bssid_info_count = cpu_to_le32(count);
+
+ return pmkids;
+error:
+ kfree(pmkids);
+ return ERR_PTR(err);
+}
+
+static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ struct cfg80211_pmksa *pmksa,
+ int max_pmkids)
+{
+ int i, err, len, count, newlen;
+
+ len = le32_to_cpu(pmkids->length);
+ count = le32_to_cpu(pmkids->bssid_info_count);
+
+ if (count > max_pmkids)
+ count = max_pmkids;
+
+ /* update with new pmkid */
+ for (i = 0; i < count; i++) {
+ if (compare_ether_addr(pmkids->bssid_info[i].bssid,
+ pmksa->bssid))
+ continue;
+
+ memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid,
+ WLAN_PMKID_LEN);
+
+ return pmkids;
+ }
+
+ /* out of space, return error */
+ if (i == max_pmkids) {
+ netdev_dbg(usbdev->net, "%s(): out of space\n", __func__);
+ err = -ENOSPC;
+ goto error;
+ }
+
+ /* add new pmkid */
+ newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]);
+
+ pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
+ if (!pmkids) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ pmkids->length = cpu_to_le32(newlen);
+ pmkids->bssid_info_count = cpu_to_le32(count + 1);
+
+ memcpy(pmkids->bssid_info[count].bssid, pmksa->bssid, ETH_ALEN);
+ memcpy(pmkids->bssid_info[count].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
+
+ return pmkids;
+error:
+ kfree(pmkids);
+ return ERR_PTR(err);
+}
+
/*
* cfg80211 ops
*/
@@ -2053,7 +2291,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
return deauthenticate(usbdev);
}
-static int rndis_set_channel(struct wiphy *wiphy,
+static int rndis_set_channel(struct wiphy *wiphy, struct net_device *netdev,
struct ieee80211_channel *chan, enum nl80211_channel_type channel_type)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
@@ -2179,6 +2417,78 @@ static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+ struct usbnet *usbdev = priv->usbdev;
+ struct ndis_80211_pmkid *pmkids;
+ u32 *tmp = (u32 *)pmksa->pmkid;
+
+ netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
+ pmksa->bssid,
+ cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
+
+ pmkids = get_device_pmkids(usbdev);
+ if (IS_ERR(pmkids)) {
+ /* couldn't read PMKID cache from device */
+ return PTR_ERR(pmkids);
+ }
+
+ pmkids = update_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
+ if (IS_ERR(pmkids)) {
+ /* not found, list full, etc */
+ return PTR_ERR(pmkids);
+ }
+
+ return set_device_pmkids(usbdev, pmkids);
+}
+
+static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+ struct usbnet *usbdev = priv->usbdev;
+ struct ndis_80211_pmkid *pmkids;
+ u32 *tmp = (u32 *)pmksa->pmkid;
+
+ netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
+ pmksa->bssid,
+ cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
+
+ pmkids = get_device_pmkids(usbdev);
+ if (IS_ERR(pmkids)) {
+ /* Couldn't read PMKID cache from device */
+ return PTR_ERR(pmkids);
+ }
+
+ pmkids = remove_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
+ if (IS_ERR(pmkids)) {
+ /* not found, etc */
+ return PTR_ERR(pmkids);
+ }
+
+ return set_device_pmkids(usbdev, pmkids);
+}
+
+static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+ struct usbnet *usbdev = priv->usbdev;
+ struct ndis_80211_pmkid pmkid;
+
+ netdev_dbg(usbdev->net, "%s()\n", __func__);
+
+ memset(&pmkid, 0, sizeof(pmkid));
+
+ pmkid.length = cpu_to_le32(sizeof(pmkid));
+ pmkid.bssid_info_count = cpu_to_le32(0);
+
+ return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid));
+}
+
/*
* workers, indication handlers, device poller
*/
@@ -2523,12 +2833,14 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
}
}
-static int rndis_wlan_get_caps(struct usbnet *usbdev)
+static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
{
struct {
__le32 num_items;
__le32 items[8];
} networks_supported;
+ struct ndis_80211_capability *caps;
+ u8 caps_buf[sizeof(*caps) + sizeof(caps->auth_encr_pair) * 16];
int len, retval, i, n;
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
@@ -2556,6 +2868,21 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev)
}
}
+ /* get device 802.11 capabilities, number of PMKIDs */
+ caps = (struct ndis_80211_capability *)caps_buf;
+ len = sizeof(caps_buf);
+ retval = rndis_query_oid(usbdev, OID_802_11_CAPABILITY, caps, &len);
+ if (retval >= 0) {
+ netdev_dbg(usbdev->net, "OID_802_11_CAPABILITY -> len %d, "
+ "ver %d, pmkids %d, auth-encr-pairs %d\n",
+ le32_to_cpu(caps->length),
+ le32_to_cpu(caps->version),
+ le32_to_cpu(caps->num_pmkids),
+ le32_to_cpu(caps->num_auth_encr_pair));
+ wiphy->max_num_pmkids = le32_to_cpu(caps->num_pmkids);
+ } else
+ wiphy->max_num_pmkids = 0;
+
return retval;
}
@@ -2803,7 +3130,7 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
wiphy->max_scan_ssids = 1;
/* TODO: fill-out band/encr information based on priv->caps */
- rndis_wlan_get_caps(usbdev);
+ rndis_wlan_get_caps(usbdev, wiphy);
memcpy(priv->channels, rndis_channels, sizeof(rndis_channels));
memcpy(priv->rates, rndis_rates, sizeof(rndis_rates));
@@ -2863,9 +3190,6 @@ static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf)
flush_workqueue(priv->workqueue);
destroy_workqueue(priv->workqueue);
- if (priv && priv->wpa_ie_len)
- kfree(priv->wpa_ie);
-
rndis_unbind(usbdev, intf);
wiphy_unregister(priv->wdev.wiphy);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 5239e08..eea1ef2 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -87,7 +87,7 @@ if RT2800PCI
config RT2800PCI_RT30XX
bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices"
- default n
+ default y
---help---
This adds support for rt30xx wireless chipset family to the
rt2800pci driver.
@@ -156,7 +156,7 @@ if RT2800USB
config RT2800USB_RT30XX
bool "rt2800usb - Include support for rt30xx (USB) devices"
- default n
+ default y
---help---
This adds support for rt30xx wireless chipset family to the
rt2800usb driver.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 5f5204b..4ba7b038 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -526,6 +526,10 @@ static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
rt2x00pci_register_write(rt2x00dev, CSR20, reg);
+ } else {
+ rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
+ rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR20, reg);
}
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1003,19 +1007,19 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
- __le32 *txd = skbdesc->desc;
+ __le32 *txd = entry_priv->desc;
u32 word;
/*
* Start writing the descriptor words.
*/
- rt2x00_desc_read(entry_priv->desc, 1, &word);
+ rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
- rt2x00_desc_write(entry_priv->desc, 1, word);
+ rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
- rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, skb->len);
- rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, txdesc->length);
+ rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, txdesc->length);
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
@@ -1036,6 +1040,11 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1);
rt2x00_desc_write(txd, 4, word);
+ /*
+ * Writing TXD word 0 must the last to prevent a race condition with
+ * the device, whereby the device may take hold of the TXD before we
+ * finished updating it.
+ */
rt2x00_desc_read(txd, 0, &word);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
@@ -1051,12 +1060,19 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_desc_write(txd, 0, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt2400pci_write_beacon(struct queue_entry *entry)
+static void rt2400pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
@@ -1072,20 +1088,19 @@ static void rt2400pci_write_beacon(struct queue_entry *entry)
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- /*
- * Replace rt2x00lib allocated descriptor with the
- * pointer to the _real_ hardware descriptor.
- * After that, map the beacon to DMA and update the
- * descriptor.
- */
- memcpy(entry_priv->desc, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry_priv->desc;
-
rt2x00queue_map_txskb(rt2x00dev, entry->skb);
rt2x00_desc_read(entry_priv->desc, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
+ rt2x00_set_field32(&reg, CSR14_TBCN, 1);
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
@@ -1093,17 +1108,6 @@ static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
- if (queue == QID_BEACON) {
- rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
- rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
- rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- }
- return;
- }
-
rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 2a73f59..89d132d 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -574,6 +574,10 @@ static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
rt2x00pci_register_write(rt2x00dev, CSR20, reg);
+ } else {
+ rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
+ rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR20, reg);
}
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1161,15 +1165,15 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
- __le32 *txd = skbdesc->desc;
+ __le32 *txd = entry_priv->desc;
u32 word;
/*
* Start writing the descriptor words.
*/
- rt2x00_desc_read(entry_priv->desc, 1, &word);
+ rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
- rt2x00_desc_write(entry_priv->desc, 1, word);
+ rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
@@ -1190,6 +1194,11 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
rt2x00_desc_write(txd, 10, word);
+ /*
+ * Writing TXD word 0 must the last to prevent a race condition with
+ * the device, whereby the device may take hold of the TXD before we
+ * finished updating it.
+ */
rt2x00_desc_read(txd, 0, &word);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
@@ -1205,15 +1214,22 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
rt2x00_desc_write(txd, 0, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt2500pci_write_beacon(struct queue_entry *entry)
+static void rt2500pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
@@ -1229,20 +1245,19 @@ static void rt2500pci_write_beacon(struct queue_entry *entry)
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- /*
- * Replace rt2x00lib allocated descriptor with the
- * pointer to the _real_ hardware descriptor.
- * After that, map the beacon to DMA and update the
- * descriptor.
- */
- memcpy(entry_priv->desc, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry_priv->desc;
-
rt2x00queue_map_txskb(rt2x00dev, entry->skb);
rt2x00_desc_read(entry_priv->desc, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
+ rt2x00_set_field32(&reg, CSR14_TBCN, 1);
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
@@ -1250,17 +1265,6 @@ static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
- if (queue == QID_BEACON) {
- rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
- rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
- rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- }
- return;
- }
-
rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 8ebb705..9ae96a6 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -649,6 +649,10 @@ static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1);
rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
+ } else {
+ rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg);
+ rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0);
+ rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
}
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1030,12 +1034,30 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txd = skbdesc->desc;
+ __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE);
u32 word;
/*
* Start writing the descriptor words.
*/
+ rt2x00_desc_read(txd, 0, &word);
+ rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit);
+ rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_ACK,
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_OFDM,
+ (txdesc->rate_mode == RATE_MODE_OFDM));
+ rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
+ test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
+ rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
+ rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
+ rt2x00_desc_write(txd, 0, word);
+
rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs);
@@ -1055,23 +1077,11 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
_rt2x00_desc_write(txd, 4, skbdesc->iv[1]);
}
- rt2x00_desc_read(txd, 0, &word);
- rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit);
- rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_ACK,
- test_bit(ENTRY_TXD_ACK, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_OFDM,
- (txdesc->rate_mode == RATE_MODE_OFDM));
- rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
- test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
- rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
- rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
- rt2x00_desc_write(txd, 0, word);
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
@@ -1079,22 +1089,15 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
*/
static void rt2500usb_beacondone(struct urb *urb);
-static void rt2500usb_write_beacon(struct queue_entry *entry)
+static void rt2500usb_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint);
int length;
- u16 reg;
-
- /*
- * Add the descriptor in front of the skb.
- */
- skb_push(entry->skb, entry->queue->desc_size);
- memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry->skb->data;
+ u16 reg, reg0;
/*
* Disable beaconing while we are reloading the beacon data,
@@ -1105,6 +1108,11 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
/*
+ * Take the descriptor in front of the skb into account.
+ */
+ skb_push(entry->skb, TXD_DESC_SIZE);
+
+ /*
* USB devices cannot blindly pass the skb->len as the
* length of the data to usb_fill_bulk_urb. Pass the skb
* to the driver to determine what the length should be.
@@ -1129,6 +1137,26 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
* Send out the guardian byte.
*/
usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
+ rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
+ reg0 = reg;
+ rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
+ /*
+ * Beacon generation will fail initially.
+ * To prevent this we need to change the TXRX_CSR19
+ * register several times (reg0 is the same as reg
+ * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0
+ * and 1 in reg).
+ */
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
}
static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
@@ -1145,37 +1173,6 @@ static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- u16 reg, reg0;
-
- if (queue != QID_BEACON) {
- rt2x00usb_kick_tx_queue(rt2x00dev, queue);
- return;
- }
-
- rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
- if (!rt2x00_get_field16(reg, TXRX_CSR19_BEACON_GEN)) {
- rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
- rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
- reg0 = reg;
- rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
- /*
- * Beacon generation will fail initially.
- * To prevent this we need to change the TXRX_CSR19
- * register several times (reg0 is the same as reg
- * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0
- * and 1 in reg).
- */
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
- }
-}
-
/*
* RX control handlers
*/
@@ -1210,11 +1207,9 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER);
- if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR))
- rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY;
- }
+ rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER);
+ if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR))
+ rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY;
if (rxdesc->cipher != CIPHER_NONE) {
_rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
@@ -1644,11 +1639,6 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
unsigned int i;
/*
- * Disable powersaving as default.
- */
- rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- /*
* Initialize all hw fields.
*/
rt2x00dev->hw->flags =
@@ -1781,7 +1771,7 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
.write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt2500usb_write_beacon,
.get_tx_data_len = rt2500usb_get_tx_data_len,
- .kick_tx_queue = rt2500usb_kick_tx_queue,
+ .kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.fill_rxdone = rt2500usb_fill_rxdone,
.config_shared_key = rt2500usb_config_key,
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 74c0433..2aa0375 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -56,15 +56,20 @@
#define RF3021 0x0007
#define RF3022 0x0008
#define RF3052 0x0009
+#define RF3320 0x000b
/*
- * Chipset version.
+ * Chipset revisions.
*/
-#define RT2860C_VERSION 0x0100
-#define RT2860D_VERSION 0x0101
-#define RT2880E_VERSION 0x0200
-#define RT2883_VERSION 0x0300
-#define RT3070_VERSION 0x0200
+#define REV_RT2860C 0x0100
+#define REV_RT2860D 0x0101
+#define REV_RT2870D 0x0101
+#define REV_RT2872E 0x0200
+#define REV_RT3070E 0x0200
+#define REV_RT3070F 0x0201
+#define REV_RT3071E 0x0211
+#define REV_RT3090E 0x0211
+#define REV_RT3390E 0x0211
/*
* Signal information.
@@ -90,13 +95,19 @@
#define NUM_TX_QUEUES 4
/*
- * USB registers.
+ * Registers.
*/
/*
+ * OPT_14: Unknown register used by rt3xxx devices.
+ */
+#define OPT_14_CSR 0x0114
+#define OPT_14_CSR_BIT0 FIELD32(0x00000001)
+
+/*
* INT_SOURCE_CSR: Interrupt source register.
* Write one to clear corresponding bit.
- * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
+ * TX_FIFO_STATUS: FIFO Statistics is full, sw should read TX_STA_FIFO
*/
#define INT_SOURCE_CSR 0x0200
#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
@@ -398,6 +409,31 @@
#define EFUSE_DATA3 0x059c
/*
+ * LDO_CFG0
+ */
+#define LDO_CFG0 0x05d4
+#define LDO_CFG0_DELAY3 FIELD32(0x000000ff)
+#define LDO_CFG0_DELAY2 FIELD32(0x0000ff00)
+#define LDO_CFG0_DELAY1 FIELD32(0x00ff0000)
+#define LDO_CFG0_BGSEL FIELD32(0x03000000)
+#define LDO_CFG0_LDO_CORE_VLEVEL FIELD32(0x1c000000)
+#define LD0_CFG0_LDO25_LEVEL FIELD32(0x60000000)
+#define LDO_CFG0_LDO25_LARGEA FIELD32(0x80000000)
+
+/*
+ * GPIO_SWITCH
+ */
+#define GPIO_SWITCH 0x05dc
+#define GPIO_SWITCH_0 FIELD32(0x00000001)
+#define GPIO_SWITCH_1 FIELD32(0x00000002)
+#define GPIO_SWITCH_2 FIELD32(0x00000004)
+#define GPIO_SWITCH_3 FIELD32(0x00000008)
+#define GPIO_SWITCH_4 FIELD32(0x00000010)
+#define GPIO_SWITCH_5 FIELD32(0x00000020)
+#define GPIO_SWITCH_6 FIELD32(0x00000040)
+#define GPIO_SWITCH_7 FIELD32(0x00000080)
+
+/*
* MAC Control/Status Registers(CSR).
* Some values are set in TU, whereas 1 TU == 1024 us.
*/
@@ -809,7 +845,7 @@
* TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
*/
#define TX_BAND_CFG 0x132c
-#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001)
+#define TX_BAND_CFG_HT40_MINUS FIELD32(0x00000001)
#define TX_BAND_CFG_A FIELD32(0x00000002)
#define TX_BAND_CFG_BG FIELD32(0x00000004)
@@ -1483,7 +1519,7 @@ struct mac_iveiv_entry {
* BBP 3: RX Antenna
*/
#define BBP3_RX_ANTENNA FIELD8(0x18)
-#define BBP3_HT40_PLUS FIELD8(0x20)
+#define BBP3_HT40_MINUS FIELD8(0x20)
/*
* BBP 4: Bandwidth
@@ -1492,14 +1528,32 @@ struct mac_iveiv_entry {
#define BBP4_BANDWIDTH FIELD8(0x18)
/*
+ * BBP 138: Unknown
+ */
+#define BBP138_RX_ADC1 FIELD8(0x02)
+#define BBP138_RX_ADC2 FIELD8(0x04)
+#define BBP138_TX_DAC1 FIELD8(0x20)
+#define BBP138_TX_DAC2 FIELD8(0x40)
+
+/*
* RFCSR registers
* The wordsize of the RFCSR is 8 bits.
*/
/*
+ * RFCSR 1:
+ */
+#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
+#define RFCSR1_RX0_PD FIELD8(0x04)
+#define RFCSR1_TX0_PD FIELD8(0x08)
+#define RFCSR1_RX1_PD FIELD8(0x10)
+#define RFCSR1_TX1_PD FIELD8(0x20)
+
+/*
* RFCSR 6:
*/
-#define RFCSR6_R FIELD8(0x03)
+#define RFCSR6_R1 FIELD8(0x03)
+#define RFCSR6_R2 FIELD8(0x40)
/*
* RFCSR 7:
@@ -1512,6 +1566,33 @@ struct mac_iveiv_entry {
#define RFCSR12_TX_POWER FIELD8(0x1f)
/*
+ * RFCSR 13:
+ */
+#define RFCSR13_TX_POWER FIELD8(0x1f)
+
+/*
+ * RFCSR 15:
+ */
+#define RFCSR15_TX_LO2_EN FIELD8(0x08)
+
+/*
+ * RFCSR 17:
+ */
+#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
+#define RFCSR17_TX_LO1_EN FIELD8(0x08)
+#define RFCSR17_R FIELD8(0x20)
+
+/*
+ * RFCSR 20:
+ */
+#define RFCSR20_RX_LO1_EN FIELD8(0x08)
+
+/*
+ * RFCSR 21:
+ */
+#define RFCSR21_RX_LO2_EN FIELD8(0x08)
+
+/*
* RFCSR 22:
*/
#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
@@ -1522,6 +1603,14 @@ struct mac_iveiv_entry {
#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
/*
+ * RFCSR 27:
+ */
+#define RFCSR27_R1 FIELD8(0x03)
+#define RFCSR27_R2 FIELD8(0x04)
+#define RFCSR27_R3 FIELD8(0x30)
+#define RFCSR27_R4 FIELD8(0x40)
+
+/*
* RFCSR 30:
*/
#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
@@ -1603,6 +1692,8 @@ struct mac_iveiv_entry {
#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
+#define EEPROM_NIC_ANT_DIVERSITY FIELD16(0x0800)
+#define EEPROM_NIC_DAC_TEST FIELD16(0x8000)
/*
* EEPROM frequency
@@ -1659,6 +1750,12 @@ struct mac_iveiv_entry {
#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
/*
+ * EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2).
+ */
+#define EEPROM_TXMIXER_GAIN_BG 0x0024
+#define EEPROM_TXMIXER_GAIN_BG_VAL FIELD16(0x0007)
+
+/*
* EEPROM RSSI A offset
*/
#define EEPROM_RSSI_A 0x0025
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index c015ce9..db4250d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -41,9 +41,6 @@
#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
#include "rt2x00usb.h"
#endif
-#if defined(CONFIG_RT2X00_LIB_PCI) || defined(CONFIG_RT2X00_LIB_PCI_MODULE)
-#include "rt2x00pci.h"
-#endif
#include "rt2800lib.h"
#include "rt2800.h"
#include "rt2800usb.h"
@@ -76,6 +73,23 @@ MODULE_LICENSE("GPL");
rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \
H2M_MAILBOX_CSR_OWNER, (__reg))
+static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
+{
+ /* check for rt2872 on SoC */
+ if (!rt2x00_is_soc(rt2x00dev) ||
+ !rt2x00_rt(rt2x00dev, RT2872))
+ return false;
+
+ /* we know for sure that these rf chipsets are used on rt305x boards */
+ if (rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022))
+ return true;
+
+ NOTICE(rt2x00dev, "Unknown RF chipset on rt305x\n");
+ return false;
+}
+
static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, const u8 value)
{
@@ -268,6 +282,104 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
+void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc)
+{
+ __le32 *txwi = (__le32 *)(skb->data - TXWI_DESC_SIZE);
+ u32 word;
+
+ /*
+ * Initialize TX Info descriptor
+ */
+ rt2x00_desc_read(txwi, 0, &word);
+ rt2x00_set_field32(&word, TXWI_W0_FRAG,
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
+ rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
+ rt2x00_set_field32(&word, TXWI_W0_TS,
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_AMPDU,
+ test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
+ rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->txop);
+ rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
+ rt2x00_set_field32(&word, TXWI_W0_BW,
+ test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
+ test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
+ rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
+ rt2x00_desc_write(txwi, 0, word);
+
+ rt2x00_desc_read(txwi, 1, &word);
+ rt2x00_set_field32(&word, TXWI_W1_ACK,
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W1_NSEQ,
+ test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
+ rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
+ test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
+ txdesc->key_idx : 0xff);
+ rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
+ txdesc->length);
+ rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->queue + 1);
+ rt2x00_desc_write(txwi, 1, word);
+
+ /*
+ * Always write 0 to IV/EIV fields, hardware will insert the IV
+ * from the IVEIV register when TXD_W3_WIV is set to 0.
+ * When TXD_W3_WIV is set to 1 it will use the IV data
+ * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
+ * crypto entry in the registers should be used to encrypt the frame.
+ */
+ _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
+ _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
+}
+EXPORT_SYMBOL_GPL(rt2800_write_txwi);
+
+void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *rxdesc)
+{
+ __le32 *rxwi = (__le32 *) skb->data;
+ u32 word;
+
+ rt2x00_desc_read(rxwi, 0, &word);
+
+ rxdesc->cipher = rt2x00_get_field32(word, RXWI_W0_UDF);
+ rxdesc->size = rt2x00_get_field32(word, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
+
+ rt2x00_desc_read(rxwi, 1, &word);
+
+ if (rt2x00_get_field32(word, RXWI_W1_SHORT_GI))
+ rxdesc->flags |= RX_FLAG_SHORT_GI;
+
+ if (rt2x00_get_field32(word, RXWI_W1_BW))
+ rxdesc->flags |= RX_FLAG_40MHZ;
+
+ /*
+ * Detect RX rate, always use MCS as signal type.
+ */
+ rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
+ rxdesc->signal = rt2x00_get_field32(word, RXWI_W1_MCS);
+ rxdesc->rate_mode = rt2x00_get_field32(word, RXWI_W1_PHYMODE);
+
+ /*
+ * Mask of 0x8 bit to remove the short preamble flag.
+ */
+ if (rxdesc->rate_mode == RATE_MODE_CCK)
+ rxdesc->signal &= ~0x8;
+
+ rt2x00_desc_read(rxwi, 2, &word);
+
+ rxdesc->rssi =
+ (rt2x00_get_field32(word, RXWI_W2_RSSI0) +
+ rt2x00_get_field32(word, RXWI_W2_RSSI1)) / 2;
+
+ /*
+ * Remove RXWI descriptor from start of buffer.
+ */
+ skb_pull(skb, RXWI_DESC_SIZE);
+}
+EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -360,11 +472,6 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
- rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
- rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
return 0;
@@ -610,10 +717,6 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
{
u32 reg;
- rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
- rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
- rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
-
rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
!!erp->short_preamble);
@@ -632,15 +735,10 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
- rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
@@ -718,10 +816,10 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
rt2x00dev->lna_gain = lna_gain;
}
-static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_conf *conf,
- struct rf_channel *rf,
- struct channel_info *info)
+static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
{
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
@@ -787,10 +885,10 @@ static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
rt2800_rf_write(rt2x00dev, 4, rf->rf4);
}
-static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_conf *conf,
- struct rf_channel *rf,
- struct channel_info *info)
+static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
{
u8 rfcsr;
@@ -798,7 +896,7 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
+ rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
@@ -806,6 +904,11 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
TXPOWER_G_TO_DEV(info->tx_power1));
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
+ rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
+ TXPOWER_G_TO_DEV(info->tx_power2));
+ rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
+
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -827,15 +930,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
unsigned int tx_pin;
u8 bbp;
- if ((rt2x00_rt(rt2x00dev, RT3070) ||
- rt2x00_rt(rt2x00dev, RT3090)) &&
- (rt2x00_rf(rt2x00dev, RF2020) ||
- rt2x00_rf(rt2x00dev, RF3020) ||
- rt2x00_rf(rt2x00dev, RF3021) ||
- rt2x00_rf(rt2x00dev, RF3022)))
- rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
+ if (rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022))
+ rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
else
- rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
+ rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
/*
* Change BBP settings
@@ -863,7 +964,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
}
rt2800_register_read(rt2x00dev, TX_BAND_CFG, &reg);
- rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
+ rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_MINUS, conf_is_ht40_minus(conf));
rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
@@ -896,11 +997,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 4, bbp);
rt2800_bbp_read(rt2x00dev, 3, &bbp);
- rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
+ rt2x00_set_field8(&bbp, BBP3_HT40_MINUS, conf_is_ht40_minus(conf));
rt2800_bbp_write(rt2x00dev, 3, bbp);
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
if (conf_is_ht40(conf)) {
rt2800_bbp_write(rt2x00dev, 69, 0x1a);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -988,10 +1088,6 @@ static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
libconf->conf->short_frame_max_tx_count);
rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
libconf->conf->long_frame_max_tx_count);
- rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
- rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
- rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
- rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
}
@@ -1015,13 +1111,13 @@ static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
} else {
- rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
-
rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
+
+ rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
}
}
@@ -1062,9 +1158,10 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) == RT3070_VERSION))
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390))
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -1095,8 +1192,7 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count)
{
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) == RT2860C_VERSION))
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
return;
/*
@@ -1114,8 +1210,17 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner);
int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
+ u16 eeprom;
unsigned int i;
+ rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+ rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
if (rt2x00_is_usb(rt2x00dev)) {
/*
* Wait until BBP and RF are ready.
@@ -1135,8 +1240,25 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
reg & ~0x00002000);
- } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
+ } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
+ /*
+ * Reset DMA indexes
+ */
+ rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+ rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+ rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+ rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+ }
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
@@ -1181,12 +1303,42 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
+ rt2800_config_filter(rt2x00dev, FIF_ALLMULTI);
+
+ rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
+ rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, 9);
+ rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
+ rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
+
+ if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
- rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x0000002c);
+ else
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x0000000f);
+ } else {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ }
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, reg);
+ } else if (rt2x00_rt(rt2x00dev, RT3070)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000002c);
+ } else {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ }
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -1205,19 +1357,15 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
+ rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 32);
rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
- if ((rt2x00_rt(rt2x00dev, RT2872) &&
- (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION)) ||
- rt2x00_rt(rt2x00dev, RT2880) ||
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) ||
rt2x00_rt(rt2x00dev, RT2883) ||
- rt2x00_rt(rt2x00dev, RT2890) ||
- rt2x00_rt(rt2x00dev, RT3052) ||
- (rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) < RT3070_VERSION)))
+ rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E))
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
else
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1225,38 +1373,61 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
+ rt2800_register_read(rt2x00dev, LED_CFG, &reg);
+ rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, 70);
+ rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, 30);
+ rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
+ rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
+ rt2800_register_write(rt2x00dev, LED_CFG, reg);
+
rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
+ rt2800_register_read(rt2x00dev, TX_RTY_CFG, &reg);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, 15);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 31);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
+ rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
+
rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 1);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE, 1);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, 1);
rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, 1);
rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
@@ -1269,11 +1440,13 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
- rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL,
+ !rt2x00_is_usb(rt2x00dev));
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
@@ -1281,6 +1454,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
@@ -1293,6 +1467,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
@@ -1305,6 +1480,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
if (rt2x00_is_usb(rt2x00dev)) {
@@ -1334,6 +1510,22 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
+
+ /*
+ * Usually the CCK SIFS time should be set to 10 and the OFDM SIFS
+ * time should be set to 16. However, the original Ralink driver uses
+ * 16 for both and indeed using a value of 10 for CCK SIFS results in
+ * connection problems with 11g + CTS protection. Hence, use the same
+ * defaults as the Ralink driver: 16 for both, CCK and OFDM SIFS.
+ */
+ rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, 16);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, 16);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, 314);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
+ rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
+
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
/*
@@ -1481,45 +1673,79 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
+ if (rt2800_is_305x_soc(rt2x00dev))
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
+
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
- rt2800_bbp_write(rt2x00dev, 69, 0x12);
+
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
+ rt2800_bbp_write(rt2x00dev, 69, 0x16);
+ rt2800_bbp_write(rt2x00dev, 73, 0x12);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
+ }
+
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- rt2800_bbp_write(rt2x00dev, 73, 0x10);
- rt2800_bbp_write(rt2x00dev, 81, 0x37);
+
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_bbp_write(rt2x00dev, 79, 0x13);
+ rt2800_bbp_write(rt2x00dev, 80, 0x05);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
+ } else if (rt2800_is_305x_soc(rt2x00dev)) {
+ rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+ rt2800_bbp_write(rt2x00dev, 80, 0x08);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 81, 0x37);
+ }
+
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
- rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D) ||
+ rt2x00_rt_rev(rt2x00dev, RT2870, REV_RT2870D))
+ rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ else
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
rt2800_bbp_write(rt2x00dev, 86, 0x00);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x00);
- rt2800_bbp_write(rt2x00dev, 103, 0x00);
- rt2800_bbp_write(rt2x00dev, 105, 0x05);
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
- rt2800_bbp_write(rt2x00dev, 69, 0x16);
- rt2800_bbp_write(rt2x00dev, 73, 0x12);
- }
-
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) > RT2860D_VERSION))
- rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+ rt2800_is_305x_soc(rt2x00dev))
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+ else
+ rt2800_bbp_write(rt2x00dev, 103, 0x00);
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
- rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- rt2800_bbp_write(rt2x00dev, 84, 0x99);
+ if (rt2800_is_305x_soc(rt2x00dev))
+ rt2800_bbp_write(rt2x00dev, 105, 0x01);
+ else
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- }
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
- if (rt2x00_rt(rt2x00dev, RT3052)) {
- rt2800_bbp_write(rt2x00dev, 31, 0x08);
- rt2800_bbp_write(rt2x00dev, 78, 0x0e);
- rt2800_bbp_write(rt2x00dev, 80, 0x08);
+ if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_bbp_read(rt2x00dev, 138, &value);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
+ value |= 0x20;
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
+ value &= ~0x02;
+
+ rt2800_bbp_write(rt2x00dev, 138, value);
}
+
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@@ -1598,19 +1824,16 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
{
u8 rfcsr;
u8 bbp;
+ u32 reg;
+ u16 eeprom;
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) != RT3070_VERSION))
+ if (!rt2x00_rt(rt2x00dev, RT3070) &&
+ !rt2x00_rt(rt2x00dev, RT3071) &&
+ !rt2x00_rt(rt2x00dev, RT3090) &&
+ !rt2x00_rt(rt2x00dev, RT3390) &&
+ !rt2800_is_305x_soc(rt2x00dev))
return 0;
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
- if (!rt2x00_rf(rt2x00dev, RF3020) &&
- !rt2x00_rf(rt2x00dev, RF3021) &&
- !rt2x00_rf(rt2x00dev, RF3022))
- return 0;
- }
-
/*
* Init RF calibration.
*/
@@ -1621,13 +1844,15 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- if (rt2x00_is_usb(rt2x00dev)) {
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090)) {
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
- rt2800_rfcsr_write(rt2x00dev, 10, 0x71);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
@@ -1640,9 +1865,41 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
- rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
- } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
+ } else if (rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x62);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x8b);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x34);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x61);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x3b);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x94);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x5c);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xb2);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0xf6);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x14);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x41);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
+ } else if (rt2800_is_305x_soc(rt2x00dev)) {
rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
@@ -1673,15 +1930,57 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
+ return 0;
+ }
+
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+ } else if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090)) {
+ rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
+ rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
+
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+ else
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
+ }
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+ } else if (rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
+ rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
}
/*
* Set RX Filter calibration for 20MHz and 40MHz
*/
- rt2x00dev->calibration[0] =
- rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
- rt2x00dev->calibration[1] =
- rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
+ if (rt2x00_rt(rt2x00dev, RT3070)) {
+ rt2x00dev->calibration[0] =
+ rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
+ rt2x00dev->calibration[1] =
+ rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
+ } else if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00dev->calibration[0] =
+ rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x13);
+ rt2x00dev->calibration[1] =
+ rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
+ }
/*
* Set back to initial state
@@ -1699,6 +1998,81 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
rt2800_bbp_write(rt2x00dev, 4, bbp);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E))
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
+
+ rt2800_register_read(rt2x00dev, OPT_14_CSR, &reg);
+ rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
+ rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
+ rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
+ }
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
+ rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
+ rt2x00_get_field16(eeprom,
+ EEPROM_TXMIXER_GAIN_BG_VAL));
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ if (rt2x00_rt(rt2x00dev, RT3090)) {
+ rt2800_bbp_read(rt2x00dev, 138, &bbp);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
+ rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
+ rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1);
+
+ rt2800_bbp_write(rt2x00dev, 138, bbp);
+ }
+
+ if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 15, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR15_TX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 15, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR20_RX_LO1_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR21_RX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
+ }
+
+ if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) {
+ rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
+ rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR27_R2, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR27_R3, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR27_R4, 0);
+ rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_init_rfcsr);
@@ -1774,10 +2148,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
} else if (rt2x00_rt(rt2x00dev, RT2860) ||
rt2x00_rt(rt2x00dev, RT2870) ||
- rt2x00_rt(rt2x00dev, RT2872) ||
- rt2x00_rt(rt2x00dev, RT2880) ||
- (rt2x00_rt(rt2x00dev, RT2883) &&
- (rt2x00_rev(rt2x00dev) < RT2883_VERSION))) {
+ rt2x00_rt(rt2x00dev, RT2872)) {
/*
* There is a max of 2 RX streams for RT28x0 series
*/
@@ -1882,10 +2253,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
if (!rt2x00_rt(rt2x00dev, RT2860) &&
!rt2x00_rt(rt2x00dev, RT2870) &&
!rt2x00_rt(rt2x00dev, RT2872) &&
- !rt2x00_rt(rt2x00dev, RT2880) &&
!rt2x00_rt(rt2x00dev, RT2883) &&
- !rt2x00_rt(rt2x00dev, RT2890) &&
- !rt2x00_rt(rt2x00dev, RT3052) &&
!rt2x00_rt(rt2x00dev, RT3070) &&
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
@@ -1954,7 +2322,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
/*
- * RF value list for rt28x0
+ * RF value list for rt28xx
* Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
*/
static const struct rf_channel rf_vals[] = {
@@ -2029,10 +2397,10 @@ static const struct rf_channel rf_vals[] = {
};
/*
- * RF value list for rt3070
- * Supports: 2.4 GHz
+ * RF value list for rt3xxx
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052)
*/
-static const struct rf_channel rf_vals_302x[] = {
+static const struct rf_channel rf_vals_3x[] = {
{1, 241, 2, 2 },
{2, 241, 2, 7 },
{3, 242, 2, 2 },
@@ -2047,6 +2415,51 @@ static const struct rf_channel rf_vals_302x[] = {
{12, 246, 2, 7 },
{13, 247, 2, 2 },
{14, 248, 2, 4 },
+
+ /* 802.11 UNI / HyperLan 2 */
+ {36, 0x56, 0, 4},
+ {38, 0x56, 0, 6},
+ {40, 0x56, 0, 8},
+ {44, 0x57, 0, 0},
+ {46, 0x57, 0, 2},
+ {48, 0x57, 0, 4},
+ {52, 0x57, 0, 8},
+ {54, 0x57, 0, 10},
+ {56, 0x58, 0, 0},
+ {60, 0x58, 0, 4},
+ {62, 0x58, 0, 6},
+ {64, 0x58, 0, 8},
+
+ /* 802.11 HyperLan 2 */
+ {100, 0x5b, 0, 8},
+ {102, 0x5b, 0, 10},
+ {104, 0x5c, 0, 0},
+ {108, 0x5c, 0, 4},
+ {110, 0x5c, 0, 6},
+ {112, 0x5c, 0, 8},
+ {116, 0x5d, 0, 0},
+ {118, 0x5d, 0, 2},
+ {120, 0x5d, 0, 4},
+ {124, 0x5d, 0, 8},
+ {126, 0x5d, 0, 10},
+ {128, 0x5e, 0, 0},
+ {132, 0x5e, 0, 4},
+ {134, 0x5e, 0, 6},
+ {136, 0x5e, 0, 8},
+ {140, 0x5f, 0, 0},
+
+ /* 802.11 UNII */
+ {149, 0x5f, 0, 9},
+ {151, 0x5f, 0, 11},
+ {153, 0x60, 0, 1},
+ {157, 0x60, 0, 5},
+ {159, 0x60, 0, 7},
+ {161, 0x60, 0, 9},
+ {165, 0x61, 0, 1},
+ {167, 0x61, 0, 3},
+ {169, 0x61, 0, 5},
+ {171, 0x61, 0, 7},
+ {173, 0x61, 0, 9},
};
int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
@@ -2087,11 +2500,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
if (rt2x00_rf(rt2x00dev, RF2820) ||
- rt2x00_rf(rt2x00dev, RF2720) ||
- rt2x00_rf(rt2x00dev, RF3052)) {
+ rt2x00_rf(rt2x00dev, RF2720)) {
spec->num_channels = 14;
spec->channels = rf_vals;
- } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2850) ||
+ rt2x00_rf(rt2x00dev, RF2750)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
@@ -2099,8 +2512,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022)) {
- spec->num_channels = ARRAY_SIZE(rf_vals_302x);
- spec->channels = rf_vals_302x;
+ spec->num_channels = 14;
+ spec->channels = rf_vals_3x;
+ } else if (rt2x00_rf(rt2x00dev, RF3052)) {
+ spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ spec->num_channels = ARRAY_SIZE(rf_vals_3x);
+ spec->channels = rf_vals_3x;
}
/*
@@ -2111,8 +2528,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
else
spec->ht.ht_supported = false;
+ /*
+ * Don't set IEEE80211_HT_CAP_SUP_WIDTH_20_40 for now as it causes
+ * reception problems with HT40 capable 11n APs
+ */
spec->ht.cap =
- IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index ebabeae..94de999 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -111,6 +111,9 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
const u8 command, const u8 token,
const u8 arg0, const u8 arg1);
+void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc);
+void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *txdesc);
+
extern const struct rt2x00debug rt2800_rt2x00debug;
int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 91cce2d..b2f2327 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -60,6 +60,12 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
unsigned int i;
u32 reg;
+ /*
+ * SOC devices don't support MCU requests.
+ */
+ if (rt2x00_is_soc(rt2x00dev))
+ return;
+
for (i = 0; i < 200; i++) {
rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
@@ -341,19 +347,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
struct queue_entry_priv_pci *entry_priv;
u32 reg;
- rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
- rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
-
/*
* Initialize registers.
*/
@@ -620,64 +613,31 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
+static int rt2800pci_write_tx_data(struct queue_entry* entry,
+ struct txentry_desc *txdesc)
+{
+ int ret;
+
+ ret = rt2x00pci_write_tx_data(entry, txdesc);
+ if (ret)
+ return ret;
+
+ rt2800_write_txwi(entry->skb, txdesc);
+
+ return 0;
+}
+
+
static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txd = skbdesc->desc;
- __le32 *txwi = (__le32 *)(skb->data - rt2x00dev->ops->extra_tx_headroom);
+ struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
+ __le32 *txd = entry_priv->desc;
u32 word;
/*
- * Initialize TX Info descriptor
- */
- rt2x00_desc_read(txwi, 0, &word);
- rt2x00_set_field32(&word, TXWI_W0_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
- rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
- rt2x00_set_field32(&word, TXWI_W0_TS,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_AMPDU,
- test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
- rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
- rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
- rt2x00_set_field32(&word, TXWI_W0_BW,
- test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
- test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
- rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
- rt2x00_desc_write(txwi, 0, word);
-
- rt2x00_desc_read(txwi, 1, &word);
- rt2x00_set_field32(&word, TXWI_W1_ACK,
- test_bit(ENTRY_TXD_ACK, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_NSEQ,
- test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
- rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
- test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
- txdesc->key_idx : 0xff);
- rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
- skb->len - txdesc->l2pad);
- rt2x00_set_field32(&word, TXWI_W1_PACKETID,
- skbdesc->entry->queue->qid + 1);
- rt2x00_desc_write(txwi, 1, word);
-
- /*
- * Always write 0 to IV/EIV fields, hardware will insert the IV
- * from the IVEIV register when TXD_W3_WIV is set to 0.
- * When TXD_W3_WIV is set to 1 it will use the IV data
- * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
- * crypto entry in the registers should be used to encrypt the frame.
- */
- _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
- _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
-
- /*
* The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
* must contains a TXWI structure + 802.11 header + padding + 802.11
* data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
@@ -698,15 +658,14 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
!test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W1_BURST,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W1_SD_LEN0,
- rt2x00dev->ops->extra_tx_headroom);
+ rt2x00_set_field32(&word, TXD_W1_SD_LEN0, TXWI_DESC_SIZE);
rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
- skbdesc->skb_dma + rt2x00dev->ops->extra_tx_headroom);
+ skbdesc->skb_dma + TXWI_DESC_SIZE);
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
@@ -714,15 +673,21 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
rt2x00_desc_write(txd, 3, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt2800pci_write_beacon(struct queue_entry *entry)
+static void rt2800pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
u32 reg;
@@ -735,15 +700,25 @@ static void rt2800pci_write_beacon(struct queue_entry *entry)
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
- * Write entire beacon with descriptor to register.
+ * Add the TXWI for the beacon to the skb.
+ */
+ rt2800_write_txwi(entry->skb, txdesc);
+ skb_push(entry->skb, TXWI_DESC_SIZE);
+
+ /*
+ * Write entire beacon with TXWI to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
- rt2800_register_multiwrite(rt2x00dev,
- beacon_base,
- skbdesc->desc, skbdesc->desc_len);
- rt2800_register_multiwrite(rt2x00dev,
- beacon_base + skbdesc->desc_len,
- entry->skb->data, entry->skb->len);
+ rt2800_register_multiwrite(rt2x00dev, beacon_base,
+ entry->skb->data, entry->skb->len);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
* Clean up beacon skb.
@@ -757,18 +732,6 @@ static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
struct data_queue *queue;
unsigned int idx, qidx = 0;
- u32 reg;
-
- if (queue_idx == QID_BEACON) {
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- }
- return;
- }
if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
return;
@@ -811,34 +774,21 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *rxd = entry_priv->desc;
- __le32 *rxwi = (__le32 *)entry->skb->data;
- u32 rxd3;
- u32 rxwi0;
- u32 rxwi1;
- u32 rxwi2;
- u32 rxwi3;
-
- rt2x00_desc_read(rxd, 3, &rxd3);
- rt2x00_desc_read(rxwi, 0, &rxwi0);
- rt2x00_desc_read(rxwi, 1, &rxwi1);
- rt2x00_desc_read(rxwi, 2, &rxwi2);
- rt2x00_desc_read(rxwi, 3, &rxwi3);
-
- if (rt2x00_get_field32(rxd3, RXD_W3_CRC_ERROR))
+ u32 word;
+
+ rt2x00_desc_read(rxd, 3, &word);
+
+ if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- /*
- * Unfortunately we don't know the cipher type used during
- * decryption. This prevents us from correct providing
- * correct statistics through debugfs.
- */
- rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
- rxdesc->cipher_status =
- rt2x00_get_field32(rxd3, RXD_W3_CIPHER_ERROR);
- }
+ /*
+ * Unfortunately we don't know the cipher type used during
+ * decryption. This prevents us from correct providing
+ * correct statistics through debugfs.
+ */
+ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
- if (rt2x00_get_field32(rxd3, RXD_W3_DECRYPTED)) {
+ if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. Unfortunately the descriptor doesn't contain
@@ -853,51 +803,22 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
- if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
+ if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD))
+ if (rt2x00_get_field32(word, RXD_W3_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
- rxdesc->flags |= RX_FLAG_SHORT_GI;
-
- if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
- rxdesc->flags |= RX_FLAG_40MHZ;
-
/*
- * Detect RX rate, always use MCS as signal type.
+ * Process the RXWI structure that is at the start of the buffer.
*/
- rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
- rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
- rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
-
- /*
- * Mask of 0x8 bit to remove the short preamble flag.
- */
- if (rxdesc->rate_mode == RATE_MODE_CCK)
- rxdesc->signal &= ~0x8;
-
- rxdesc->rssi =
- (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
- rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
-
- rxdesc->noise =
- (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
- rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
-
- rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
+ rt2800_process_rxwi(entry->skb, rxdesc);
/*
* Set RX IDX in register to inform hardware that we have handled
* this entry and it is available for reuse again.
*/
rt2800_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx);
-
- /*
- * Remove TXWI descriptor from start of buffer.
- */
- skb_pull(entry->skb, RXWI_DESC_SIZE);
}
/*
@@ -907,14 +828,12 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
struct queue_entry *entry;
- struct queue_entry *entry_done;
- struct queue_entry_priv_pci *entry_priv;
+ __le32 *txwi;
struct txdone_entry_desc txdesc;
u32 word;
u32 reg;
u32 old_reg;
- unsigned int type;
- unsigned int index;
+ int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
u16 mcs, real_mcs;
/*
@@ -936,76 +855,89 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
old_reg = reg;
+ wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
+ ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
+ pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
+
/*
* Skip this entry when it contains an invalid
* queue identication number.
*/
- type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
- if (type >= QID_RX)
+ if (pid <= 0 || pid > QID_RX)
continue;
- queue = rt2x00queue_get_queue(rt2x00dev, type);
+ queue = rt2x00queue_get_queue(rt2x00dev, pid - 1);
if (unlikely(!queue))
continue;
/*
- * Skip this entry when it contains an invalid
- * index number.
+ * Inside each queue, we process each entry in a chronological
+ * order. We first check that the queue is not empty.
*/
- index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1;
- if (unlikely(index >= queue->limit))
+ if (rt2x00queue_empty(queue))
continue;
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- entry = &queue->entries[index];
- entry_priv = entry->priv_data;
- rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word);
+ /* Check if we got a match by looking at WCID/ACK/PID
+ * fields */
+ txwi = (__le32 *)(entry->skb->data -
+ rt2x00dev->ops->extra_tx_headroom);
- entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- while (entry != entry_done) {
- /*
- * Catch up.
- * Just report any entries we missed as failed.
- */
- WARNING(rt2x00dev,
- "TX status report missed for entry %d\n",
- entry_done->entry_idx);
+ rt2x00_desc_read(txwi, 1, &word);
+ tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+ tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
+ tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
- txdesc.flags = 0;
- __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
- txdesc.retry = 0;
-
- rt2x00lib_txdone(entry_done, &txdesc);
- entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- }
+ if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid))
+ WARNING(rt2x00dev, "invalid TX_STA_FIFO content\n");
/*
* Obtain the status about this packet.
*/
txdesc.flags = 0;
- if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS))
- __set_bit(TXDONE_SUCCESS, &txdesc.flags);
- else
- __set_bit(TXDONE_FAILURE, &txdesc.flags);
+ rt2x00_desc_read(txwi, 0, &word);
+ mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
+ real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
/*
* Ralink has a retry mechanism using a global fallback
- * table. We setup this fallback table to try immediate
- * lower rate for all rates. In the TX_STA_FIFO,
- * the MCS field contains the MCS used for the successfull
- * transmission. If the first transmission succeed,
- * we have mcs == tx_mcs. On the second transmission,
- * we have mcs = tx_mcs - 1. So the number of
- * retry is (tx_mcs - mcs).
+ * table. We setup this fallback table to try the immediate
+ * lower rate for all rates. In the TX_STA_FIFO, the MCS field
+ * always contains the MCS used for the last transmission, be
+ * it successful or not.
*/
- mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
- real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
+ if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) {
+ /*
+ * Transmission succeeded. The number of retries is
+ * mcs - real_mcs
+ */
+ __set_bit(TXDONE_SUCCESS, &txdesc.flags);
+ txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
+ } else {
+ /*
+ * Transmission failed. The number of retries is
+ * always 7 in this case (for a total number of 8
+ * frames sent).
+ */
+ __set_bit(TXDONE_FAILURE, &txdesc.flags);
+ txdesc.retry = 7;
+ }
+
__set_bit(TXDONE_FALLBACK, &txdesc.flags);
- txdesc.retry = mcs - min(mcs, real_mcs);
+
rt2x00lib_txdone(entry, &txdesc);
}
}
+static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+ struct ieee80211_conf conf = { .flags = 0 };
+ struct rt2x00lib_conf libconf = { .conf = &conf };
+
+ rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -1030,6 +962,9 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
rt2800pci_txdone(rt2x00dev);
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+ rt2800pci_wakeup(rt2x00dev);
+
return IRQ_HANDLED;
}
@@ -1128,7 +1063,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.reset_tuner = rt2800_reset_tuner,
.link_tuner = rt2800_link_tuner,
.write_tx_desc = rt2800pci_write_tx_desc,
- .write_tx_data = rt2x00pci_write_tx_data,
+ .write_tx_data = rt2800pci_write_tx_data,
.write_beacon = rt2800pci_write_beacon,
.kick_tx_queue = rt2800pci_kick_tx_queue,
.kill_tx_queue = rt2800pci_kill_tx_queue,
@@ -1184,6 +1119,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
+#ifdef CONFIG_RT2800PCI_PCI
static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1208,9 +1144,11 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
{ 0, }
};
+#endif /* CONFIG_RT2800PCI_PCI */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index d27d7d5..0f8b84b 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -400,60 +400,16 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txi = skbdesc->desc;
- __le32 *txwi = &txi[TXINFO_DESC_SIZE / sizeof(__le32)];
+ __le32 *txi = (__le32 *)(skb->data - TXWI_DESC_SIZE - TXINFO_DESC_SIZE);
u32 word;
/*
- * Initialize TX Info descriptor
+ * Initialize TXWI descriptor
*/
- rt2x00_desc_read(txwi, 0, &word);
- rt2x00_set_field32(&word, TXWI_W0_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
- rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
- rt2x00_set_field32(&word, TXWI_W0_TS,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_AMPDU,
- test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
- rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
- rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
- rt2x00_set_field32(&word, TXWI_W0_BW,
- test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
- test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
- rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
- rt2x00_desc_write(txwi, 0, word);
-
- rt2x00_desc_read(txwi, 1, &word);
- rt2x00_set_field32(&word, TXWI_W1_ACK,
- test_bit(ENTRY_TXD_ACK, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_NSEQ,
- test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
- rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
- test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
- txdesc->key_idx : 0xff);
- rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
- skb->len - txdesc->l2pad);
- rt2x00_set_field32(&word, TXWI_W1_PACKETID,
- skbdesc->entry->queue->qid + 1);
- rt2x00_desc_write(txwi, 1, word);
+ rt2800_write_txwi(skb, txdesc);
/*
- * Always write 0 to IV/EIV fields, hardware will insert the IV
- * from the IVEIV register when TXINFO_W0_WIV is set to 0.
- * When TXINFO_W0_WIV is set to 1 it will use the IV data
- * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
- * crypto entry in the registers should be used to encrypt the frame.
- */
- _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
- _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
-
- /*
- * Initialize TX descriptor
+ * Initialize TXINFO descriptor
*/
rt2x00_desc_read(txi, 0, &word);
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
@@ -466,26 +422,25 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_BURST,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_desc_write(txi, 0, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txi;
+ skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt2800usb_write_beacon(struct queue_entry *entry)
+static void rt2800usb_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
u32 reg;
/*
- * Add the descriptor in front of the skb.
- */
- skb_push(entry->skb, entry->queue->desc_size);
- memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry->skb->data;
-
- /*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
@@ -494,6 +449,12 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
+ * Add the TXWI for the beacon to the skb.
+ */
+ rt2800_write_txwi(entry->skb, txdesc);
+ skb_push(entry->skb, TXWI_DESC_SIZE);
+
+ /*
* Write entire beacon with descriptor to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
@@ -503,6 +464,14 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
REGISTER_TIMEOUT32(entry->skb->len));
/*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ /*
* Clean up the beacon skb.
*/
dev_kfree_skb(entry->skb);
@@ -524,84 +493,53 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt2800usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- u32 reg;
-
- if (queue != QID_BEACON) {
- rt2x00usb_kick_tx_queue(rt2x00dev, queue);
- return;
- }
-
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- }
-}
-
/*
* RX control handlers
*/
static void rt2800usb_fill_rxdone(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
__le32 *rxi = (__le32 *)entry->skb->data;
- __le32 *rxwi;
__le32 *rxd;
- u32 rxi0;
- u32 rxwi0;
- u32 rxwi1;
- u32 rxwi2;
- u32 rxwi3;
- u32 rxd0;
+ u32 word;
int rx_pkt_len;
/*
+ * Copy descriptor to the skbdesc->desc buffer, making it safe from
+ * moving of frame data in rt2x00usb.
+ */
+ memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
+
+ /*
* RX frame format is :
* | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
* |<------------ rx_pkt_len -------------->|
*/
- rt2x00_desc_read(rxi, 0, &rxi0);
- rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
-
- rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
+ rt2x00_desc_read(rxi, 0, &word);
+ rx_pkt_len = rt2x00_get_field32(word, RXINFO_W0_USB_DMA_RX_PKT_LEN);
/*
- * FIXME : we need to check for rx_pkt_len validity
+ * Remove the RXINFO structure from the sbk.
*/
- rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
+ skb_pull(entry->skb, RXINFO_DESC_SIZE);
/*
- * Copy descriptor to the skbdesc->desc buffer, making it safe from
- * moving of frame data in rt2x00usb.
+ * FIXME: we need to check for rx_pkt_len validity
*/
- memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
+ rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
/*
* It is now safe to read the descriptor on all architectures.
*/
- rt2x00_desc_read(rxwi, 0, &rxwi0);
- rt2x00_desc_read(rxwi, 1, &rxwi1);
- rt2x00_desc_read(rxwi, 2, &rxwi2);
- rt2x00_desc_read(rxwi, 3, &rxwi3);
- rt2x00_desc_read(rxd, 0, &rxd0);
+ rt2x00_desc_read(rxd, 0, &word);
- if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
+ if (rt2x00_get_field32(word, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
- rxdesc->cipher_status =
- rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
- }
+ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W0_CIPHER_ERROR);
- if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
+ if (rt2x00_get_field32(word, RXD_W0_DECRYPTED)) {
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. Unfortunately the descriptor doesn't contain
@@ -616,45 +554,21 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
- if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
+ if (rt2x00_get_field32(word, RXD_W0_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
+ if (rt2x00_get_field32(word, RXD_W0_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
- rxdesc->flags |= RX_FLAG_SHORT_GI;
-
- if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
- rxdesc->flags |= RX_FLAG_40MHZ;
-
/*
- * Detect RX rate, always use MCS as signal type.
+ * Remove RXD descriptor from end of buffer.
*/
- rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
- rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
- rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
+ skb_trim(entry->skb, rx_pkt_len);
/*
- * Mask of 0x8 bit to remove the short preamble flag.
+ * Process the RXWI structure.
*/
- if (rxdesc->rate_mode == RATE_MODE_CCK)
- rxdesc->signal &= ~0x8;
-
- rxdesc->rssi =
- (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
- rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
-
- rxdesc->noise =
- (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
- rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
-
- rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
-
- /*
- * Remove RXWI descriptor from start of buffer.
- */
- skb_pull(entry->skb, skbdesc->desc_len);
+ rt2800_process_rxwi(entry->skb, rxdesc);
}
/*
@@ -747,7 +661,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt2800usb_write_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
- .kick_tx_queue = rt2800usb_kick_tx_queue,
+ .kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.fill_rxdone = rt2800usb_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
@@ -806,6 +720,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Amit */
{ USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Askey */
@@ -841,13 +759,18 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
/* EnGenius */
- { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Hawking */
{ USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Linksys */
{ USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -876,6 +799,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
{ USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -905,8 +830,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AirTies */
{ USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* ASUS */
+ { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Conceptronic */
{ USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Corega */
@@ -916,20 +850,46 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Draytek */
+ { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Encore */
{ USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
/* EnGenius */
{ USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* I-O DATA */
{ USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
/* MSI */
{ USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Para */
+ { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Pegatron */
{ USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -944,14 +904,22 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
{ USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
#endif
#ifdef CONFIG_RT2800USB_RT35XX
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Askey */
{ USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Cisco */
@@ -966,37 +934,27 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
* Unclear what kind of devices these are (they aren't supported by the
- * vendor driver).
+ * vendor linux driver).
*/
- /* Allwin */
- { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Amigo */
{ USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Askey */
- { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
/* ASUS */
{ USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3322), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Buffalo */
@@ -1015,24 +973,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* EnGenius */
- { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Hawking */
- { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* I-O DATA */
- { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
/* LevelOne */
{ USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -1042,43 +989,23 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Motorola */
{ USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* MSI */
- { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Ovislink */
+ { USB_DEVICE(0x1b75, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Para */
- { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Pegatron */
{ USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Planex */
{ USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Sitecom */
- { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
{ USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xf511), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sweex */
{ USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index d1d8ae9..2bca6a7 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,8 +79,6 @@
*/
#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
-#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
-#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
/*
* TX Info structure
@@ -113,44 +111,6 @@
#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
/*
- * RX WI structure
- */
-
-/*
- * Word0
- */
-#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
-#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
-#define RXWI_W0_BSSID FIELD32(0x00001c00)
-#define RXWI_W0_UDF FIELD32(0x0000e000)
-#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
-#define RXWI_W0_TID FIELD32(0xf0000000)
-
-/*
- * Word1
- */
-#define RXWI_W1_FRAG FIELD32(0x0000000f)
-#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
-#define RXWI_W1_MCS FIELD32(0x007f0000)
-#define RXWI_W1_BW FIELD32(0x00800000)
-#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
-#define RXWI_W1_STBC FIELD32(0x06000000)
-#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
-
-/*
- * Word2
- */
-#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
-#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
-#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
-
-/*
- * Word3
- */
-#define RXWI_W3_SNR0 FIELD32(0x000000ff)
-#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
-
-/*
* RX descriptor format for RX Ring.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index d9daa9c..6c1ff4c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -177,16 +177,15 @@ struct rt2x00_chip {
#define RT2573 0x2573
#define RT2860 0x2860 /* 2.4GHz PCI/CB */
#define RT2870 0x2870
-#define RT2872 0x2872
-#define RT2880 0x2880 /* WSOC */
+#define RT2872 0x2872 /* WSOC */
#define RT2883 0x2883 /* WSOC */
-#define RT2890 0x2890 /* 2.4GHz PCIe */
-#define RT3052 0x3052 /* WSOC */
#define RT3070 0x3070
#define RT3071 0x3071
#define RT3090 0x3090 /* 2.4GHz PCIe */
#define RT3390 0x3390
#define RT3572 0x3572
+#define RT3593 0x3593 /* PCIe */
+#define RT3883 0x3883 /* WSOC */
u16 rf;
u16 rev;
@@ -550,8 +549,10 @@ struct rt2x00lib_ops {
void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct txentry_desc *txdesc);
- int (*write_tx_data) (struct queue_entry *entry);
- void (*write_beacon) (struct queue_entry *entry);
+ int (*write_tx_data) (struct queue_entry *entry,
+ struct txentry_desc *txdesc);
+ void (*write_beacon) (struct queue_entry *entry,
+ struct txentry_desc *txdesc);
int (*get_tx_data_len) (struct queue_entry *entry);
void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue);
@@ -930,12 +931,12 @@ static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
}
-static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
+static inline bool rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
{
return (rt2x00dev->chip.rt == rt);
}
-static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
+static inline bool rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
{
return (rt2x00dev->chip.rf == rf);
}
@@ -945,6 +946,24 @@ static inline u16 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
return rt2x00dev->chip.rev;
}
+static inline bool rt2x00_rt_rev(struct rt2x00_dev *rt2x00dev,
+ const u16 rt, const u16 rev)
+{
+ return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) == rev);
+}
+
+static inline bool rt2x00_rt_rev_lt(struct rt2x00_dev *rt2x00dev,
+ const u16 rt, const u16 rev)
+{
+ return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) < rev);
+}
+
+static inline bool rt2x00_rt_rev_gte(struct rt2x00_dev *rt2x00dev,
+ const u16 rt, const u16 rev)
+{
+ return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) >= rev);
+}
+
static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
enum rt2x00_chip_intf intf)
{
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index d291c78..583dacd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -128,6 +128,7 @@ void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
/* Pull buffer to correct size */
skb_pull(skb, txdesc->iv_len);
+ txdesc->length -= txdesc->iv_len;
/* IV/EIV data has officially been stripped */
skbdesc->flags |= SKBDESC_IV_STRIPPED;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 9569fb4..e9fe93f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -156,10 +156,11 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
enum rt2x00_dump_type type, struct sk_buff *skb)
{
struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
- struct skb_frame_desc *desc = get_skb_frame_desc(skb);
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct sk_buff *skbcopy;
struct rt2x00dump_hdr *dump_hdr;
struct timeval timestamp;
+ u32 data_len;
do_gettimeofday(&timestamp);
@@ -171,7 +172,11 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
return;
}
- skbcopy = alloc_skb(sizeof(*dump_hdr) + desc->desc_len + skb->len,
+ data_len = skb->len;
+ if (skbdesc->flags & SKBDESC_DESC_IN_SKB)
+ data_len -= skbdesc->desc_len;
+
+ skbcopy = alloc_skb(sizeof(*dump_hdr) + skbdesc->desc_len + data_len,
GFP_ATOMIC);
if (!skbcopy) {
DEBUG(rt2x00dev, "Failed to copy skb for dump.\n");
@@ -181,18 +186,20 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
dump_hdr = (struct rt2x00dump_hdr *)skb_put(skbcopy, sizeof(*dump_hdr));
dump_hdr->version = cpu_to_le32(DUMP_HEADER_VERSION);
dump_hdr->header_length = cpu_to_le32(sizeof(*dump_hdr));
- dump_hdr->desc_length = cpu_to_le32(desc->desc_len);
- dump_hdr->data_length = cpu_to_le32(skb->len);
+ dump_hdr->desc_length = cpu_to_le32(skbdesc->desc_len);
+ dump_hdr->data_length = cpu_to_le32(data_len);
dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt);
dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev);
dump_hdr->type = cpu_to_le16(type);
- dump_hdr->queue_index = desc->entry->queue->qid;
- dump_hdr->entry_index = desc->entry->entry_idx;
+ dump_hdr->queue_index = skbdesc->entry->queue->qid;
+ dump_hdr->entry_index = skbdesc->entry->entry_idx;
dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec);
- memcpy(skb_put(skbcopy, desc->desc_len), desc->desc, desc->desc_len);
+ if (!(skbdesc->flags & SKBDESC_DESC_IN_SKB))
+ memcpy(skb_put(skbcopy, skbdesc->desc_len), skbdesc->desc,
+ skbdesc->desc_len);
memcpy(skb_put(skbcopy, skb->len), skb->data, skb->len);
skb_queue_tail(&intf->frame_dump_skbqueue, skbcopy);
@@ -700,8 +707,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
exit:
rt2x00debug_deregister(rt2x00dev);
ERROR(rt2x00dev, "Failed to register debug handler.\n");
-
- return;
}
void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index eda73ba..3ae468c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -435,7 +435,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
rx_status->mactime = rxdesc.timestamp;
rx_status->rate_idx = rate_idx;
rx_status->signal = rxdesc.rssi;
- rx_status->noise = rxdesc.noise;
rx_status->flag = rxdesc.flags;
rx_status->antenna = rt2x00dev->link.ant.active.rx;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index 727019a..ed303b4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -62,11 +62,14 @@
* the tx event which has either succeeded or failed. A frame
* with this type should also have been reported with as a
* %DUMP_FRAME_TX frame.
+ * @DUMP_FRAME_BEACON: This beacon frame is queued for transmission to the
+ * hardware.
*/
enum rt2x00_dump_type {
DUMP_FRAME_RXDONE = 1,
DUMP_FRAME_TX = 2,
DUMP_FRAME_TXDONE = 3,
+ DUMP_FRAME_BEACON = 4,
};
/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index 34beb00..b818a43 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -79,7 +79,7 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
ERROR(rt2x00dev,
"Current firmware does not support detected chipset.\n");
goto exit;
- };
+ }
rt2x00dev->fw = fw;
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index 1056c92..5a40760 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -35,6 +35,7 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
if (tx_info->control.sta)
txdesc->mpdu_density =
@@ -66,4 +67,20 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
__set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
__set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
+
+ /*
+ * Determine IFS values
+ * - Use TXOP_BACKOFF for management frames
+ * - Use TXOP_SIFS for fragment bursts
+ * - Use TXOP_HTTXOP for everything else
+ *
+ * Note: rt2800 devices won't use CTS protection (if used)
+ * for frames not transmitted with TXOP_HTTXOP
+ */
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ txdesc->txop = TXOP_BACKOFF;
+ else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
+ txdesc->txop = TXOP_SIFS;
+ else
+ txdesc->txop = TXOP_HTTXOP;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index cf3f1c0..a016f7c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -63,11 +63,10 @@ EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
/*
* TX data handlers.
*/
-int rt2x00pci_write_tx_data(struct queue_entry *entry)
+int rt2x00pci_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct queue_entry_priv_pci *entry_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc;
/*
* This should not happen, we already checked the entry
@@ -82,13 +81,6 @@ int rt2x00pci_write_tx_data(struct queue_entry *entry)
return -EINVAL;
}
- /*
- * Fill in skb descriptor
- */
- skbdesc = get_skb_frame_desc(entry->skb);
- skbdesc->desc = entry_priv->desc;
- skbdesc->desc_len = entry->queue->desc_size;
-
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 8149ff6..51bcef3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -92,7 +92,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
* This function will initialize the DMA and skb descriptor
* to prepare the entry for the actual TX operation.
*/
-int rt2x00pci_write_tx_data(struct queue_entry *entry);
+int rt2x00pci_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc);
/**
* struct queue_entry_priv_pci: Per entry PCI specific information
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a0bd36f..20dbdd6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -334,12 +334,10 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
txdesc->aifs = entry->queue->aifs;
/*
- * Header and alignment information.
+ * Header and frame information.
*/
+ txdesc->length = entry->skb->len;
txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
- if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
- (entry->skb->len > txdesc->header_length))
- txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
/*
* Check whether this frame is to be acked.
@@ -423,6 +421,7 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
{
struct data_queue *queue = entry->queue;
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ enum rt2x00_dump_type dump_type;
rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
@@ -430,21 +429,26 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
* All processing on the frame has been completed, this means
* it is now ready to be dumped to userspace through debugfs.
*/
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
+ dump_type = (txdesc->queue == QID_BEACON) ?
+ DUMP_FRAME_BEACON : DUMP_FRAME_TX;
+ rt2x00debug_dump_frame(rt2x00dev, dump_type, entry->skb);
+}
+
+static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
+{
+ struct data_queue *queue = entry->queue;
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
/*
* Check if we need to kick the queue, there are however a few rules
- * 1) Don't kick beacon queue
- * 2) Don't kick unless this is the last in frame in a burst.
+ * 1) Don't kick unless this is the last in frame in a burst.
* When the burst flag is set, this frame is always followed
* by another frame which in some way are related to eachother.
* This is true for fragments, RTS or CTS-to-self frames.
- * 3) Rule 2 can be broken when the available entries
+ * 2) Rule 1 can be broken when the available entries
* in the queue are less then a certain threshold.
*/
- if (entry->queue->qid == QID_BEACON)
- return;
-
if (rt2x00queue_threshold(queue) ||
!test_bit(ENTRY_TXD_BURST, &txdesc->flags))
rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
@@ -526,7 +530,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
* call failed. Since we always return NETDEV_TX_OK to mac80211,
* this frame will simply be dropped.
*/
- if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
+ if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry,
+ &txdesc))) {
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
entry->skb = NULL;
return -EIO;
@@ -539,6 +544,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
rt2x00queue_index_inc(queue, Q_INDEX);
rt2x00queue_write_tx_descriptor(entry, &txdesc);
+ rt2x00queue_kick_tx_queue(entry, &txdesc);
return 0;
}
@@ -550,7 +556,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
struct rt2x00_intf *intf = vif_to_intf(vif);
struct skb_frame_desc *skbdesc;
struct txentry_desc txdesc;
- __le32 desc[16];
if (unlikely(!intf->beacon))
return -ENOBUFS;
@@ -583,19 +588,10 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
/*
- * For the descriptor we use a local array from where the
- * driver can move it to the correct location required for
- * the hardware.
- */
- memset(desc, 0, sizeof(desc));
-
- /*
* Fill in skb descriptor
*/
skbdesc = get_skb_frame_desc(intf->beacon->skb);
memset(skbdesc, 0, sizeof(*skbdesc));
- skbdesc->desc = desc;
- skbdesc->desc_len = intf->beacon->queue->desc_size;
skbdesc->entry = intf->beacon;
/*
@@ -604,12 +600,9 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
/*
- * Send beacon to hardware.
- * Also enable beacon generation, which might have been disabled
- * by the driver during the config_beacon() callback function.
+ * Send beacon to hardware and enable beacon genaration..
*/
- rt2x00dev->ops->lib->write_beacon(intf->beacon);
- rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
+ rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
mutex_unlock(&intf->beacon_skb_mutex);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index c1e482b..f791708 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -94,12 +94,15 @@ enum data_queue_qid {
* mac80211 but was stripped for processing by the driver.
* @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
* don't try to pass it back.
+ * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the
+ * skb, instead of in the desc field.
*/
enum skb_frame_desc_flags {
SKBDESC_DMA_MAPPED_RX = 1 << 0,
SKBDESC_DMA_MAPPED_TX = 1 << 1,
SKBDESC_IV_STRIPPED = 1 << 2,
SKBDESC_NOT_MAC80211 = 1 << 3,
+ SKBDESC_DESC_IN_SKB = 1 << 4,
};
/**
@@ -183,7 +186,6 @@ enum rxdone_entry_desc_flags {
* @timestamp: RX Timestamp
* @signal: Signal of the received frame.
* @rssi: RSSI of the received frame.
- * @noise: Measured noise during frame reception.
* @size: Data size of the received frame.
* @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
* @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
@@ -197,7 +199,6 @@ struct rxdone_entry_desc {
u64 timestamp;
int signal;
int rssi;
- int noise;
int size;
int flags;
int dev_flags;
@@ -287,8 +288,8 @@ enum txentry_desc_flags {
*
* @flags: Descriptor flags (See &enum queue_entry_flags).
* @queue: Queue identification (See &enum data_queue_qid).
+ * @length: Length of the entire frame.
* @header_length: Length of 802.11 header.
- * @l2pad: Amount of padding to align 802.11 payload to 4-byte boundrary.
* @length_high: PLCP length high word.
* @length_low: PLCP length low word.
* @signal: PLCP signal.
@@ -301,6 +302,7 @@ enum txentry_desc_flags {
* @retry_limit: Max number of retries.
* @aifs: AIFS value.
* @ifs: IFS value.
+ * @txop: IFS value for 11n capable chips.
* @cw_min: cwmin value.
* @cw_max: cwmax value.
* @cipher: Cipher type used for encryption.
@@ -313,8 +315,8 @@ struct txentry_desc {
enum data_queue_qid queue;
+ u16 length;
u16 header_length;
- u16 l2pad;
u16 length_high;
u16 length_low;
@@ -330,6 +332,7 @@ struct txentry_desc {
short retry_limit;
short aifs;
short ifs;
+ short txop;
short cw_min;
short cw_max;
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 603bfc0..b9fe948 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -101,6 +101,16 @@ enum ifs {
};
/*
+ * IFS backoff values for HT devices
+ */
+enum txop {
+ TXOP_HTTXOP = 0,
+ TXOP_PIFS = 1,
+ TXOP_SIFS = 2,
+ TXOP_BACKOFF = 3,
+};
+
+/*
* Cipher types for hardware encryption
*/
enum cipher {
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index f9a7f8b..bd1546b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -216,12 +216,12 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
rt2x00lib_txdone(entry, &txdesc);
}
-int rt2x00usb_write_tx_data(struct queue_entry *entry)
+int rt2x00usb_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc;
u32 length;
/*
@@ -231,13 +231,6 @@ int rt2x00usb_write_tx_data(struct queue_entry *entry)
memset(entry->skb->data, 0, entry->queue->desc_size);
/*
- * Fill in skb descriptor
- */
- skbdesc = get_skb_frame_desc(entry->skb);
- skbdesc->desc = entry->skb->data;
- skbdesc->desc_len = entry->queue->desc_size;
-
- /*
* USB devices cannot blindly pass the skb->len as the
* length of the data to usb_fill_bulk_urb. Pass the skb
* to the driver to determine what the length should be.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 3da6841..621d0f8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -376,7 +376,8 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
* This function will initialize the URB and skb descriptor
* to prepare the entry for the actual TX operation.
*/
-int rt2x00usb_write_tx_data(struct queue_entry *entry);
+int rt2x00usb_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc);
/**
* struct queue_entry_priv_usb: Per entry USB specific information
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 432e75f..2e3076f 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1764,7 +1764,8 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txd = skbdesc->desc;
+ struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
+ __le32 *txd = entry_priv->desc;
u32 word;
/*
@@ -1802,17 +1803,23 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
rt2x00_desc_write(txd, 5, word);
- rt2x00_desc_read(txd, 6, &word);
- rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
- skbdesc->skb_dma);
- rt2x00_desc_write(txd, 6, word);
+ if (txdesc->queue != QID_BEACON) {
+ rt2x00_desc_read(txd, 6, &word);
+ rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
+ skbdesc->skb_dma);
+ rt2x00_desc_write(txd, 6, word);
- if (skbdesc->desc_len > TXINFO_SIZE) {
rt2x00_desc_read(txd, 11, &word);
- rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skb->len);
+ rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0,
+ txdesc->length);
rt2x00_desc_write(txd, 11, word);
}
+ /*
+ * Writing TXD word 0 must the last to prevent a race condition with
+ * the device, whereby the device may take hold of the TXD before we
+ * finished updating it.
+ */
rt2x00_desc_read(txd, 0, &word);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
@@ -1832,20 +1839,28 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_BURST,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
rt2x00_desc_write(txd, 0, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len =
+ (txdesc->queue == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt61pci_write_beacon(struct queue_entry *entry)
+static void rt61pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ struct queue_entry_priv_pci *entry_priv = entry->priv_data;
unsigned int beacon_base;
u32 reg;
@@ -1861,14 +1876,25 @@ static void rt61pci_write_beacon(struct queue_entry *entry)
* Write entire beacon with descriptor to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
- rt2x00pci_register_multiwrite(rt2x00dev,
- beacon_base,
- skbdesc->desc, skbdesc->desc_len);
- rt2x00pci_register_multiwrite(rt2x00dev,
- beacon_base + skbdesc->desc_len,
+ rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
+ entry_priv->desc, TXINFO_SIZE);
+ rt2x00pci_register_multiwrite(rt2x00dev, beacon_base + TXINFO_SIZE,
entry->skb->data, entry->skb->len);
/*
+ * Enable beaconing again.
+ *
+ * For Wi-Fi faily generated beacons between participating
+ * stations. Set TBTT phase adaptive adjustment step to 8us.
+ */
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
+
+ rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
* Clean up beacon skb.
*/
dev_kfree_skb_any(entry->skb);
@@ -1880,23 +1906,6 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
- if (queue == QID_BEACON) {
- /*
- * For Wi-Fi faily generated beacons between participating
- * stations. Set TBTT phase adaptive adjustment step to 8us.
- */
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
-
- rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
- if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
- }
- return;
- }
-
rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK));
@@ -1968,12 +1977,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
- rxdesc->cipher_status =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
- }
+ rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
+ rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
if (rxdesc->cipher != CIPHER_NONE) {
_rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv[0]);
@@ -2118,6 +2123,14 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
}
}
+static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+ struct ieee80211_conf conf = { .flags = 0 };
+ struct rt2x00lib_conf libconf = { .conf = &conf };
+
+ rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -2165,6 +2178,12 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
rt2x00pci_register_write(rt2x00dev,
M2H_CMD_DONE_CSR, 0xffffffff);
+ /*
+ * 4 - MCU Autowakeup interrupt.
+ */
+ if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
+ rt61pci_wakeup(rt2x00dev);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index bb58d79..e35bd19 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -861,15 +861,15 @@ static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_SLEEP, REGISTER_TIMEOUT);
} else {
- rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
- USB_MODE_WAKEUP, REGISTER_TIMEOUT);
-
rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg);
rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0);
rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
+
+ rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
+ USB_MODE_WAKEUP, REGISTER_TIMEOUT);
}
}
@@ -1441,12 +1441,38 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txd = skbdesc->desc;
+ __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE);
u32 word;
/*
* Start writing the descriptor words.
*/
+ rt2x00_desc_read(txd, 0, &word);
+ rt2x00_set_field32(&word, TXD_W0_BURST,
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_VALID, 1);
+ rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_ACK,
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_OFDM,
+ (txdesc->rate_mode == RATE_MODE_OFDM));
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
+ test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
+ test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
+ test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
+ rt2x00_set_field32(&word, TXD_W0_BURST2,
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
+ rt2x00_desc_write(txd, 0, word);
+
rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue);
rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
@@ -1475,51 +1501,24 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
rt2x00_desc_write(txd, 5, word);
- rt2x00_desc_read(txd, 0, &word);
- rt2x00_set_field32(&word, TXD_W0_BURST,
- test_bit(ENTRY_TXD_BURST, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_VALID, 1);
- rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_ACK,
- test_bit(ENTRY_TXD_ACK, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_OFDM,
- (txdesc->rate_mode == RATE_MODE_OFDM));
- rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
- rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
- test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
- test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
- test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
- rt2x00_set_field32(&word, TXD_W0_BURST2,
- test_bit(ENTRY_TXD_BURST, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
- rt2x00_desc_write(txd, 0, word);
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt73usb_write_beacon(struct queue_entry *entry)
+static void rt73usb_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
u32 reg;
/*
- * Add the descriptor in front of the skb.
- */
- skb_push(entry->skb, entry->queue->desc_size);
- memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry->skb->data;
-
- /*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
@@ -1528,6 +1527,11 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
/*
+ * Take the descriptor in front of the skb into account.
+ */
+ skb_push(entry->skb, TXD_DESC_SIZE);
+
+ /*
* Write entire beacon with descriptor to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
@@ -1537,6 +1541,19 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
REGISTER_TIMEOUT32(entry->skb->len));
/*
+ * Enable beaconing again.
+ *
+ * For Wi-Fi faily generated beacons between participating stations.
+ * Set TBTT phase adaptive adjustment step to 8us (default 16us)
+ */
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
+
+ rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
* Clean up the beacon skb.
*/
dev_kfree_skb(entry->skb);
@@ -1557,31 +1574,6 @@ static int rt73usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- u32 reg;
-
- if (queue != QID_BEACON) {
- rt2x00usb_kick_tx_queue(rt2x00dev, queue);
- return;
- }
-
- /*
- * For Wi-Fi faily generated beacons between participating stations.
- * Set TBTT phase adaptive adjustment step to 8us (default 16us)
- */
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
-
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
- if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
- }
-}
-
/*
* RX control handlers
*/
@@ -1645,12 +1637,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
- rxdesc->cipher_status =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
- }
+ rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
+ rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
if (rxdesc->cipher != CIPHER_NONE) {
_rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
@@ -2266,7 +2254,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
.write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt73usb_write_beacon,
.get_tx_data_len = rt73usb_get_tx_data_len,
- .kick_tx_queue = rt73usb_kick_tx_queue,
+ .kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.fill_rxdone = rt73usb_fill_rxdone,
.config_shared_key = rt73usb_config_shared_key,
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/rtl818x/Kconfig
new file mode 100644
index 0000000..17d80fe
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/Kconfig
@@ -0,0 +1,88 @@
+#
+# RTL818X Wireless LAN device configuration
+#
+config RTL8180
+ tristate "Realtek 8180/8185 PCI support"
+ depends on MAC80211 && PCI && EXPERIMENTAL
+ select EEPROM_93CX6
+ ---help---
+ This is a driver for RTL8180 and RTL8185 based cards.
+ These are PCI based chips found in cards such as:
+
+ (RTL8185 802.11g)
+ A-Link WL54PC
+
+ (RTL8180 802.11b)
+ Belkin F5D6020 v3
+ Belkin F5D6020 v3
+ Dlink DWL-610
+ Dlink DWL-510
+ Netgear MA521
+ Level-One WPC-0101
+ Acer Aspire 1357 LMi
+ VCTnet PC-11B1
+ Ovislink AirLive WL-1120PCM
+ Mentor WL-PCI
+ Linksys WPC11 v4
+ TrendNET TEW-288PI
+ D-Link DWL-520 Rev D
+ Repotec RP-WP7126
+ TP-Link TL-WN250/251
+ Zonet ZEW1000
+ Longshine LCS-8031-R
+ HomeLine HLW-PCC200
+ GigaFast WF721-AEX
+ Planet WL-3553
+ Encore ENLWI-PCI1-NT
+ TrendNET TEW-266PC
+ Gigabyte GN-WLMR101
+ Siemens-fujitsu Amilo D1840W
+ Edimax EW-7126
+ PheeNet WL-11PCIR
+ Tonze PC-2100T
+ Planet WL-8303
+ Dlink DWL-650 v M1
+ Edimax EW-7106
+ Q-Tec 770WC
+ Topcom Skyr@cer 4011b
+ Roper FreeLan 802.11b (edition 2004)
+ Wistron Neweb Corp CB-200B
+ Pentagram HorNET
+ QTec 775WC
+ TwinMOS Booming B Series
+ Micronet SP906BB
+ Sweex LC700010
+ Surecom EP-9428
+ Safecom SWLCR-1100
+
+ Thanks to Realtek for their support!
+
+config RTL8187
+ tristate "Realtek 8187 and 8187B USB support"
+ depends on MAC80211 && USB
+ select EEPROM_93CX6
+ ---help---
+ This is a driver for RTL8187 and RTL8187B based cards.
+ These are USB based chips found in devices such as:
+
+ Netgear WG111v2
+ Level 1 WNC-0301USB
+ Micronet SP907GK V5
+ Encore ENUWI-G2
+ Trendnet TEW-424UB
+ ASUS P5B Deluxe/P5K Premium motherboards
+ Toshiba Satellite Pro series of laptops
+ Asus Wireless Link
+ Linksys WUSB54GC-EU v2
+ (v1 = rt73usb; v3 is rt2070-based,
+ use staging/rt3070 or try rt2800usb)
+
+ Thanks to Realtek for their support!
+
+# If possible, automatically enable LEDs for RTL8187.
+
+config RTL8187_LEDS
+ bool
+ depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
+ default y
+
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index de3844f..4baf0cf 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -55,6 +55,14 @@ struct rtl8180_tx_ring {
struct sk_buff_head queue;
};
+struct rtl8180_vif {
+ struct ieee80211_hw *dev;
+
+ /* beaconing */
+ struct delayed_work beacon_work;
+ bool enable_beacon;
+};
+
struct rtl8180_priv {
/* common between rtl818x drivers */
struct rtl818x_csr __iomem *map;
@@ -78,6 +86,9 @@ struct rtl8180_priv {
u32 anaparam;
u16 rfparam;
u8 csthreshold;
+
+ /* sequence # */
+ u16 seqno;
};
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 2131a44..515817d 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -188,6 +188,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.rates[0].count = (flags & 0xFF) + 1;
+ info->status.rates[1].idx = -1;
ieee80211_tx_status_irqsafe(dev, skb);
if (ring->entries - skb_queue_len(&ring->queue) == 2)
@@ -233,6 +234,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rtl8180_priv *priv = dev->priv;
struct rtl8180_tx_ring *ring;
struct rtl8180_tx_desc *entry;
@@ -284,6 +286,14 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
}
spin_lock_irqsave(&priv->lock, flags);
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ priv->seqno += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
+ }
+
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
entry = &ring->desc[idx];
@@ -297,7 +307,8 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
entry->flags = cpu_to_le32(tx_flags);
__skb_queue_tail(&ring->queue, skb);
if (ring->entries - skb_queue_len(&ring->queue) < 2)
- ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
+ ieee80211_stop_queue(dev, prio);
+
spin_unlock_irqrestore(&priv->lock, flags);
rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
@@ -652,10 +663,59 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
rtl8180_free_tx_ring(dev, i);
}
+static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
+void rtl8180_beacon_work(struct work_struct *work)
+{
+ struct rtl8180_vif *vif_priv =
+ container_of(work, struct rtl8180_vif, beacon_work.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)vif_priv, struct ieee80211_vif, drv_priv);
+ struct ieee80211_hw *dev = vif_priv->dev;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int err = 0;
+
+ /* don't overflow the tx ring */
+ if (ieee80211_queue_stopped(dev, 0))
+ goto resched;
+
+ /* grab a fresh beacon */
+ skb = ieee80211_beacon_get(dev, vif);
+
+ /*
+ * update beacon timestamp w/ TSF value
+ * TODO: make hardware update beacon timestamp
+ */
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+ mgmt->u.beacon.timestamp = cpu_to_le64(rtl8180_get_tsf(dev));
+
+ /* TODO: use actual beacon queue */
+ skb_set_queue_mapping(skb, 0);
+
+ err = rtl8180_tx(dev, skb);
+ WARN_ON(err);
+
+resched:
+ /*
+ * schedule next beacon
+ * TODO: use hardware support for beacon timing
+ */
+ schedule_delayed_work(&vif_priv->beacon_work,
+ usecs_to_jiffies(1024 * vif->bss_conf.beacon_int));
+}
+
static int rtl8180_add_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_vif *vif_priv;
/*
* We only support one active interface at a time.
@@ -665,6 +725,7 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
break;
default:
return -EOPNOTSUPP;
@@ -672,6 +733,12 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
priv->vif = vif;
+ /* Initialize driver private area */
+ vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
+ vif_priv->dev = dev;
+ INIT_DELAYED_WORK(&vif_priv->beacon_work, rtl8180_beacon_work);
+ vif_priv->enable_beacon = false;
+
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
le32_to_cpu(*(__le32 *)vif->addr));
@@ -705,8 +772,11 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
u32 changed)
{
struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_vif *vif_priv;
int i;
+ vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
+
if (changed & BSS_CHANGED_BSSID) {
for (i = 0; i < ETH_ALEN; i++)
rtl818x_iowrite8(priv, &priv->map->BSSID[i],
@@ -721,13 +791,22 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
}
if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
- priv->rf->conf_erp(dev, info);
+ priv->rf->conf_erp(dev, info);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ vif_priv->enable_beacon = info->enable_beacon;
+
+ if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON)) {
+ cancel_delayed_work_sync(&vif_priv->beacon_work);
+ if (vif_priv->enable_beacon)
+ schedule_work(&vif_priv->beacon_work.work);
+ }
}
-static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev, int mc_count,
- struct dev_addr_list *mc_list)
+static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev,
+ struct netdev_hw_addr_list *mc_list)
{
- return mc_count;
+ return netdev_hw_addr_list_count(mc_list);
}
static void rtl8180_configure_filter(struct ieee80211_hw *dev,
@@ -762,14 +841,6 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf);
}
-static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
-{
- struct rtl8180_priv *priv = dev->priv;
-
- return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
- (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
-}
-
static const struct ieee80211_ops rtl8180_ops = {
.tx = rtl8180_tx,
.start = rtl8180_start,
@@ -827,6 +898,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
const char *chip_name, *rf_name = NULL;
u32 reg;
u16 eeprom_val;
+ u8 mac_addr[ETH_ALEN];
err = pci_enable_device(pdev);
if (err) {
@@ -855,8 +927,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
goto err_free_reg;
}
- if ((err = pci_set_dma_mask(pdev, 0xFFFFFF00ULL)) ||
- (err = pci_set_consistent_dma_mask(pdev, 0xFFFFFF00ULL))) {
+ if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
+ (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
printk(KERN_ERR "%s (rtl8180): No suitable DMA available\n",
pci_name(pdev));
goto err_free_reg;
@@ -905,7 +977,9 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SIGNAL_UNSPEC;
- dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ dev->vif_data_size = sizeof(struct rtl8180_vif);
+ dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
dev->queues = 1;
dev->max_signal = 65;
@@ -987,12 +1061,13 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
}
- eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)dev->wiphy->perm_addr, 3);
- if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
+ eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)mac_addr, 3);
+ if (!is_valid_ether_addr(mac_addr)) {
printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
" randomly generated MAC addr\n", pci_name(pdev));
- random_ether_addr(dev->wiphy->perm_addr);
+ random_ether_addr(mac_addr);
}
+ SET_IEEE80211_PERM_ADDR(dev, mac_addr);
/* CCK TX power */
for (i = 0; i < 14; i += 2) {
@@ -1024,7 +1099,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
}
printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n",
- wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
+ wiphy_name(dev->wiphy), mac_addr,
chip_name, priv->rf->name);
return 0;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 1d30792..891b849 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1194,9 +1194,9 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
}
static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev,
- int mc_count, struct dev_addr_list *mc_list)
+ struct netdev_hw_addr_list *mc_list)
{
- return mc_count;
+ return netdev_hw_addr_list_count(mc_list);
}
static void rtl8187_configure_filter(struct ieee80211_hw *dev,
@@ -1333,6 +1333,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
u16 txpwr, reg;
u16 product_id = le16_to_cpu(udev->descriptor.idProduct);
int err, i;
+ u8 mac_addr[ETH_ALEN];
dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops);
if (!dev) {
@@ -1390,12 +1391,13 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
udelay(10);
eeprom_93cx6_multiread(&eeprom, RTL8187_EEPROM_MAC_ADDR,
- (__le16 __force *)dev->wiphy->perm_addr, 3);
- if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
+ (__le16 __force *)mac_addr, 3);
+ if (!is_valid_ether_addr(mac_addr)) {
printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly "
"generated MAC address\n");
- random_ether_addr(dev->wiphy->perm_addr);
+ random_ether_addr(mac_addr);
}
+ SET_IEEE80211_PERM_ADDR(dev, mac_addr);
channel = priv->channels;
for (i = 0; i < 3; i++) {
@@ -1526,7 +1528,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
skb_queue_head_init(&priv->b_tx_status.queue);
printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
- wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
+ wiphy_name(dev->wiphy), mac_addr,
chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask);
#ifdef CONFIG_RTL8187_LEDS
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 785e024..337fc7b 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -51,3 +51,27 @@ config WL1271
If you choose to build a module, it'll be called wl1271. Say N if
unsure.
+
+config WL1271_SPI
+ tristate "TI wl1271 SPI support"
+ depends on WL1271 && SPI_MASTER
+ ---help---
+ This module adds support for the SPI interface of adapters using
+ TI wl1271 chipset. Select this if your platform is using
+ the SPI bus.
+
+ If you choose to build a module, it'll be called wl1251_spi.
+ Say N if unsure.
+
+config WL1271_SDIO
+ tristate "TI wl1271 SDIO support"
+ depends on WL1271 && MMC && ARM
+ ---help---
+ This module adds support for the SDIO interface of adapters using
+ TI wl1271 chipset. Select this if your platform is using
+ the SDIO bus.
+
+ If you choose to build a module, it'll be called
+ wl1271_sdio. Say N if unsure.
+
+
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index f47ec94..27ddd2b 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -7,10 +7,12 @@ obj-$(CONFIG_WL1251) += wl1251.o
obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
-wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \
+wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \
wl1271_event.o wl1271_tx.o wl1271_rx.o \
wl1271_ps.o wl1271_acx.o wl1271_boot.o \
- wl1271_init.o wl1271_debugfs.o wl1271_io.o
+ wl1271_init.o wl1271_debugfs.o
wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
obj-$(CONFIG_WL1271) += wl1271.o
+obj-$(CONFIG_WL1271_SPI) += wl1271_spi.o
+obj-$(CONFIG_WL1271_SDIO) += wl1271_sdio.o
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 37c61c1..4f5f02a 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -256,6 +256,8 @@ struct wl1251_debugfs {
struct wl1251_if_operations {
void (*read)(struct wl1251 *wl, int addr, void *buf, size_t len);
void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len);
+ void (*read_elp)(struct wl1251 *wl, int addr, u32 *val);
+ void (*write_elp)(struct wl1251 *wl, int addr, u32 val);
void (*reset)(struct wl1251 *wl);
void (*enable_irq)(struct wl1251 *wl);
void (*disable_irq)(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index d5ac79a..2545123 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -497,7 +497,8 @@ int wl1251_boot(struct wl1251 *wl)
/* 2. start processing NVS file */
if (wl->use_eeprom) {
wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR);
- msleep(4000);
+ /* Wait for EEPROM NVS burst read to complete */
+ msleep(40);
wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM);
} else {
ret = wl1251_boot_upload_nvs(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_io.h b/drivers/net/wireless/wl12xx/wl1251_io.h
index b89d2ac..c545e9d 100644
--- a/drivers/net/wireless/wl12xx/wl1251_io.h
+++ b/drivers/net/wireless/wl12xx/wl1251_io.h
@@ -48,6 +48,26 @@ static inline void wl1251_write32(struct wl1251 *wl, int addr, u32 val)
wl->if_ops->write(wl, addr, &val, sizeof(u32));
}
+static inline u32 wl1251_read_elp(struct wl1251 *wl, int addr)
+{
+ u32 response;
+
+ if (wl->if_ops->read_elp)
+ wl->if_ops->read_elp(wl, addr, &response);
+ else
+ wl->if_ops->read(wl, addr, &response, sizeof(u32));
+
+ return response;
+}
+
+static inline void wl1251_write_elp(struct wl1251 *wl, int addr, u32 val)
+{
+ if (wl->if_ops->write_elp)
+ wl->if_ops->write_elp(wl, addr, val);
+ else
+ wl->if_ops->write(wl, addr, &val, sizeof(u32));
+}
+
/* Memory target IO, address is translated to partition 0 */
void wl1251_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len);
void wl1251_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 1c8226e..00b2428 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -147,8 +147,8 @@ static void wl1251_fw_wakeup(struct wl1251 *wl)
u32 elp_reg;
elp_reg = ELPCTRL_WAKE_UP;
- wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
- elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+ wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
+ elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
if (!(elp_reg & ELPCTRL_WLAN_READY))
wl1251_warning("WLAN not ready");
@@ -202,8 +202,8 @@ static int wl1251_chip_wakeup(struct wl1251 *wl)
goto out;
}
- /* No NVS from netlink, try to get it from the filesystem */
- if (wl->nvs == NULL) {
+ if (wl->nvs == NULL && !wl->use_eeprom) {
+ /* No NVS from netlink, try to get it from the filesystem */
ret = wl1251_fetch_nvs(wl);
if (ret < 0)
goto out;
@@ -857,6 +857,7 @@ out:
}
static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct wl1251 *wl = hw->priv;
@@ -1196,6 +1197,66 @@ static const struct ieee80211_ops wl1251_ops = {
.conf_tx = wl1251_op_conf_tx,
};
+static int wl1251_read_eeprom_byte(struct wl1251 *wl, off_t offset, u8 *data)
+{
+ unsigned long timeout;
+
+ wl1251_reg_write32(wl, EE_ADDR, offset);
+ wl1251_reg_write32(wl, EE_CTL, EE_CTL_READ);
+
+ /* EE_CTL_READ clears when data is ready */
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (1) {
+ if (!(wl1251_reg_read32(wl, EE_CTL) & EE_CTL_READ))
+ break;
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+ }
+
+ *data = wl1251_reg_read32(wl, EE_DATA);
+ return 0;
+}
+
+static int wl1251_read_eeprom(struct wl1251 *wl, off_t offset,
+ u8 *data, size_t len)
+{
+ size_t i;
+ int ret;
+
+ wl1251_reg_write32(wl, EE_START, 0);
+
+ for (i = 0; i < len; i++) {
+ ret = wl1251_read_eeprom_byte(wl, offset + i, &data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wl1251_read_eeprom_mac(struct wl1251 *wl)
+{
+ u8 mac[ETH_ALEN];
+ int i, ret;
+
+ wl1251_set_partition(wl, 0, 0, REGISTERS_BASE, REGISTERS_DOWN_SIZE);
+
+ ret = wl1251_read_eeprom(wl, 0x1c, mac, sizeof(mac));
+ if (ret < 0) {
+ wl1251_warning("failed to read MAC address from EEPROM");
+ return ret;
+ }
+
+ /* MAC is stored in reverse order */
+ for (i = 0; i < ETH_ALEN; i++)
+ wl->mac_addr[i] = mac[ETH_ALEN - i - 1];
+
+ return 0;
+}
+
static int wl1251_register_hw(struct wl1251 *wl)
{
int ret;
@@ -1231,7 +1292,6 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->channel_change_time = 10000;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_SUPPORTS_UAPSD;
@@ -1242,6 +1302,9 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->queues = 4;
+ if (wl->use_eeprom)
+ wl1251_read_eeprom_mac(wl);
+
ret = wl1251_register_hw(wl);
if (ret)
goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 851dfb6..b55cb2b 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -45,7 +45,7 @@ void wl1251_elp_work(struct work_struct *work)
goto out;
wl1251_debug(DEBUG_PSM, "chip to elp");
- wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
+ wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
wl->elp = true;
out:
@@ -79,9 +79,9 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
start = jiffies;
timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
- wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
+ wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
- elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+ elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
/*
* FIXME: we should wait for irq from chip but, as a temporary
@@ -93,7 +93,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
return -ETIMEDOUT;
}
msleep(1);
- elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+ elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
}
wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl12xx/wl1251_reg.h
index 0ca3b43..d16edd9 100644
--- a/drivers/net/wireless/wl12xx/wl1251_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1251_reg.h
@@ -46,7 +46,14 @@
#define SOR_CFG (REGISTERS_BASE + 0x0800)
#define ECPU_CTRL (REGISTERS_BASE + 0x0804)
#define HI_CFG (REGISTERS_BASE + 0x0808)
+
+/* EEPROM registers */
#define EE_START (REGISTERS_BASE + 0x080C)
+#define EE_CTL (REGISTERS_BASE + 0x2000)
+#define EE_DATA (REGISTERS_BASE + 0x2004)
+#define EE_ADDR (REGISTERS_BASE + 0x2008)
+
+#define EE_CTL_READ 2
#define CHIP_ID_B (REGISTERS_BASE + 0x5674)
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 6f229e0..8515158 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -74,12 +74,6 @@ static void wl1251_rx_status(struct wl1251 *wl,
status->signal = desc->rssi;
- /*
- * FIXME: guessing that snr needs to be divided by two, otherwise
- * the values don't make any sense
- */
- status->noise = desc->rssi - desc->snr / 2;
-
status->freq = ieee80211_channel_to_frequency(desc->channel);
status->flag |= RX_FLAG_TSFT;
@@ -189,6 +183,4 @@ void wl1251_rx(struct wl1251 *wl)
/* Finally, we need to ACK the RX */
wl1251_rx_ack(wl);
-
- return;
}
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index 9423f22..d234285 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -20,20 +20,14 @@
* Copyright (C) 2009 Bob Copeland (me@bobcopeland.com)
*/
#include <linux/module.h>
-#include <linux/crc7.h>
#include <linux/mod_devicetable.h>
-#include <linux/irq.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/platform_device.h>
+#include <linux/spi/wl12xx.h>
+#include <linux/irq.h>
#include "wl1251.h"
-#include "wl12xx_80211.h"
-#include "wl1251_reg.h"
-#include "wl1251_ps.h"
-#include "wl1251_io.h"
-#include "wl1251_tx.h"
-#include "wl1251_debugfs.h"
#ifndef SDIO_VENDOR_ID_TI
#define SDIO_VENDOR_ID_TI 0x104c
@@ -43,6 +37,8 @@
#define SDIO_DEVICE_ID_TI_WL1251 0x9066
#endif
+static struct wl12xx_platform_data *wl12xx_board_data;
+
static struct sdio_func *wl_to_func(struct wl1251 *wl)
{
return wl->if_priv;
@@ -65,7 +61,8 @@ static const struct sdio_device_id wl1251_devices[] = {
MODULE_DEVICE_TABLE(sdio, wl1251_devices);
-void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len)
+static void wl1251_sdio_read(struct wl1251 *wl, int addr,
+ void *buf, size_t len)
{
int ret;
struct sdio_func *func = wl_to_func(wl);
@@ -77,7 +74,8 @@ void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len)
sdio_release_host(func);
}
-void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len)
+static void wl1251_sdio_write(struct wl1251 *wl, int addr,
+ void *buf, size_t len)
{
int ret;
struct sdio_func *func = wl_to_func(wl);
@@ -89,7 +87,33 @@ void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len)
sdio_release_host(func);
}
-void wl1251_sdio_reset(struct wl1251 *wl)
+static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
+{
+ int ret = 0;
+ struct sdio_func *func = wl_to_func(wl);
+
+ sdio_claim_host(func);
+ *val = sdio_readb(func, addr, &ret);
+ sdio_release_host(func);
+
+ if (ret)
+ wl1251_error("sdio_readb failed (%d)", ret);
+}
+
+static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
+{
+ int ret = 0;
+ struct sdio_func *func = wl_to_func(wl);
+
+ sdio_claim_host(func);
+ sdio_writeb(func, val, addr, &ret);
+ sdio_release_host(func);
+
+ if (ret)
+ wl1251_error("sdio_writeb failed (%d)", ret);
+}
+
+static void wl1251_sdio_reset(struct wl1251 *wl)
{
}
@@ -111,19 +135,64 @@ static void wl1251_sdio_disable_irq(struct wl1251 *wl)
sdio_release_host(func);
}
-void wl1251_sdio_set_power(bool enable)
+/* Interrupts when using dedicated WLAN_IRQ pin */
+static irqreturn_t wl1251_line_irq(int irq, void *cookie)
+{
+ struct wl1251 *wl = cookie;
+
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void wl1251_enable_line_irq(struct wl1251 *wl)
{
+ return enable_irq(wl->irq);
}
-struct wl1251_if_operations wl1251_sdio_ops = {
+static void wl1251_disable_line_irq(struct wl1251 *wl)
+{
+ return disable_irq(wl->irq);
+}
+
+static void wl1251_sdio_set_power(bool enable)
+{
+}
+
+static struct wl1251_if_operations wl1251_sdio_ops = {
.read = wl1251_sdio_read,
.write = wl1251_sdio_write,
+ .write_elp = wl1251_sdio_write_elp,
+ .read_elp = wl1251_sdio_read_elp,
.reset = wl1251_sdio_reset,
- .enable_irq = wl1251_sdio_enable_irq,
- .disable_irq = wl1251_sdio_disable_irq,
};
-int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
+static int wl1251_platform_probe(struct platform_device *pdev)
+{
+ if (pdev->id != -1) {
+ wl1251_error("can only handle single device");
+ return -ENODEV;
+ }
+
+ wl12xx_board_data = pdev->dev.platform_data;
+ return 0;
+}
+
+/*
+ * Dummy platform_driver for passing platform_data to this driver,
+ * until we have a way to pass this through SDIO subsystem or
+ * some other way.
+ */
+static struct platform_driver wl1251_platform_driver = {
+ .driver = {
+ .name = "wl1251_data",
+ .owner = THIS_MODULE,
+ },
+ .probe = wl1251_platform_probe,
+};
+
+static int wl1251_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
{
int ret;
struct wl1251 *wl;
@@ -141,20 +210,50 @@ int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
goto release;
sdio_set_block_size(func, 512);
+ sdio_release_host(func);
SET_IEEE80211_DEV(hw, &func->dev);
wl->if_priv = func;
wl->if_ops = &wl1251_sdio_ops;
wl->set_power = wl1251_sdio_set_power;
- sdio_release_host(func);
+ if (wl12xx_board_data != NULL) {
+ wl->set_power = wl12xx_board_data->set_power;
+ wl->irq = wl12xx_board_data->irq;
+ wl->use_eeprom = wl12xx_board_data->use_eeprom;
+ }
+
+ if (wl->irq) {
+ ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
+ if (ret < 0) {
+ wl1251_error("request_irq() failed: %d", ret);
+ goto disable;
+ }
+
+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+ disable_irq(wl->irq);
+
+ wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+ wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
+
+ wl1251_info("using dedicated interrupt line");
+ } else {
+ wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
+ wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
+
+ wl1251_info("using SDIO interrupt");
+ }
+
ret = wl1251_init_ieee80211(wl);
if (ret)
- goto disable;
+ goto out_free_irq;
sdio_set_drvdata(func, wl);
return ret;
+out_free_irq:
+ if (wl->irq)
+ free_irq(wl->irq, wl);
disable:
sdio_claim_host(func);
sdio_disable_func(func);
@@ -167,6 +266,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
{
struct wl1251 *wl = sdio_get_drvdata(func);
+ if (wl->irq)
+ free_irq(wl->irq, wl);
wl1251_free_hw(wl);
sdio_claim_host(func);
@@ -186,6 +287,12 @@ static int __init wl1251_sdio_init(void)
{
int err;
+ err = platform_driver_register(&wl1251_platform_driver);
+ if (err) {
+ wl1251_error("failed to register platform driver: %d", err);
+ return err;
+ }
+
err = sdio_register_driver(&wl1251_sdio_driver);
if (err)
wl1251_error("failed to register sdio driver: %d", err);
@@ -195,6 +302,7 @@ static int __init wl1251_sdio_init(void)
static void __exit wl1251_sdio_exit(void)
{
sdio_unregister_driver(&wl1251_sdio_driver);
+ platform_driver_unregister(&wl1251_platform_driver);
wl1251_notice("unloaded");
}
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 3bfb59b..e814742 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -310,7 +310,7 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
static struct spi_driver wl1251_spi_driver = {
.driver = {
- .name = "wl1251",
+ .name = DRIVER_NAME,
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 97ea509..6f1b6b5 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -53,6 +53,9 @@ enum {
DEBUG_MAC80211 = BIT(11),
DEBUG_CMD = BIT(12),
DEBUG_ACX = BIT(13),
+ DEBUG_SDIO = BIT(14),
+ DEBUG_FILTERS = BIT(15),
+ DEBUG_ADHOC = BIT(16),
DEBUG_ALL = ~0,
};
@@ -110,6 +113,9 @@ enum {
#define WL1271_FW_NAME "wl1271-fw.bin"
#define WL1271_NVS_NAME "wl1271-nvs.bin"
+#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
+#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
+
/* NVS data structure */
#define WL1271_NVS_SECTION_SIZE 468
@@ -142,14 +148,7 @@ struct wl1271_nvs_file {
*/
#undef WL1271_80211A_ENABLED
-/*
- * FIXME: for the wl1271, a busy word count of 1 here will result in a more
- * optimal SPI interface. There is some SPI bug however, causing RXS time outs
- * with this mode occasionally on boot, so lets have three for now. A value of
- * three should make sure, that the chipset will always be ready, though this
- * will impact throughput and latencies slightly.
- */
-#define WL1271_BUSY_WORD_CNT 3
+#define WL1271_BUSY_WORD_CNT 1
#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
#define WL1271_ELP_HW_STATE_ASLEEP 0
@@ -334,11 +333,27 @@ struct wl1271_scan {
u8 probe_requests;
};
+struct wl1271_if_operations {
+ void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed);
+ void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed);
+ void (*reset)(struct wl1271 *wl);
+ void (*init)(struct wl1271 *wl);
+ void (*power)(struct wl1271 *wl, bool enable);
+ struct device* (*dev)(struct wl1271 *wl);
+ void (*enable_irq)(struct wl1271 *wl);
+ void (*disable_irq)(struct wl1271 *wl);
+};
+
struct wl1271 {
+ struct platform_device *plat_dev;
struct ieee80211_hw *hw;
bool mac80211_registered;
- struct spi_device *spi;
+ void *if_priv;
+
+ struct wl1271_if_operations *if_ops;
void (*set_power)(bool enable);
int irq;
@@ -357,6 +372,9 @@ struct wl1271 {
#define WL1271_FLAG_IN_ELP (6)
#define WL1271_FLAG_PSM (7)
#define WL1271_FLAG_PSM_REQUESTED (8)
+#define WL1271_FLAG_IRQ_PENDING (9)
+#define WL1271_FLAG_IRQ_RUNNING (10)
+#define WL1271_FLAG_IDLE (11)
unsigned long flags;
struct wl1271_partition_set part;
@@ -370,9 +388,12 @@ struct wl1271 {
size_t fw_len;
struct wl1271_nvs_file *nvs;
+ s8 hw_pg_ver;
+
u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
u8 bss_type;
+ u8 set_bss_type;
u8 ssid[IW_ESSID_MAX_SIZE + 1];
u8 ssid_len;
int channel;
@@ -382,13 +403,13 @@ struct wl1271 {
/* Accounting for allocated / available TX blocks on HW */
u32 tx_blocks_freed[NUM_TX_QUEUES];
u32 tx_blocks_available;
- u8 tx_results_count;
+ u32 tx_results_count;
/* Transmitted TX packets counter for chipset interface */
- int tx_packets_count;
+ u32 tx_packets_count;
/* Time-offset between host and chipset clocks */
- int time_offset;
+ s64 time_offset;
/* Session counter for the chipset */
int session_counter;
@@ -403,8 +424,7 @@ struct wl1271 {
/* Security sequence number counters */
u8 tx_security_last_seq;
- u16 tx_security_seq_16;
- u32 tx_security_seq_32;
+ s64 tx_security_seq;
/* FW Rx counter */
u32 rx_counter;
@@ -430,14 +450,19 @@ struct wl1271 {
/* currently configured rate set */
u32 sta_rate_set;
u32 basic_rate_set;
+ u32 basic_rate;
u32 rate_set;
/* The current band */
enum ieee80211_band band;
+ /* Beaconing interval (needed for ad-hoc) */
+ u32 beacon_int;
+
/* Default key (for WEP) */
u32 default_key;
+ unsigned int filters;
unsigned int rx_config;
unsigned int rx_filter;
@@ -450,10 +475,13 @@ struct wl1271 {
/* in dBm */
int power_level;
+ int rssi_thold;
+ int last_rssi_event;
+
struct wl1271_stats stats;
struct wl1271_debugfs debugfs;
- u32 buffer_32;
+ __le32 buffer_32;
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
@@ -465,6 +493,8 @@ struct wl1271 {
/* Current chipset configuration */
struct conf_drv_settings conf;
+ bool sg_enabled;
+
struct list_head list;
};
@@ -477,7 +507,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_DEFAULT_POWER_LEVEL 0
-#define WL1271_TX_QUEUE_MAX_LENGTH 20
+#define WL1271_TX_QUEUE_LOW_WATERMARK 10
+#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
on in case is has been shut down shortly before */
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 3087824..e19e2f8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -32,7 +32,6 @@
#include "wl1271.h"
#include "wl12xx_80211.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_ps.h"
int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
@@ -137,12 +136,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
goto out;
}
- /*
- * FIXME: This is a workaround needed while we don't the correct
- * calibration, to avoid distortions
- */
- /* acx->current_tx_power = power * 10; */
- acx->current_tx_power = 120;
+ acx->current_tx_power = power * 10;
ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
if (ret < 0) {
@@ -511,12 +505,17 @@ out:
return ret;
}
-int wl1271_acx_conn_monit_params(struct wl1271 *wl)
+#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
+
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
{
struct acx_conn_monit_params *acx;
+ u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
+ u32 timeout = ACX_CONN_MONIT_DISABLE_VALUE;
int ret;
- wl1271_debug(DEBUG_ACX, "acx connection monitor parameters");
+ wl1271_debug(DEBUG_ACX, "acx connection monitor parameters: %s",
+ enable ? "enabled" : "disabled");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
@@ -524,8 +523,13 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl)
goto out;
}
- acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold);
- acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout);
+ if (enable) {
+ threshold = wl->conf.conn.synch_fail_thold;
+ timeout = wl->conf.conn.bss_lose_timeout;
+ }
+
+ acx->synch_fail_thold = cpu_to_le32(threshold);
+ acx->bss_lose_timeout = cpu_to_le32(timeout);
ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
acx, sizeof(*acx));
@@ -541,7 +545,7 @@ out:
}
-int wl1271_acx_sg_enable(struct wl1271 *wl)
+int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable)
{
struct acx_bt_wlan_coex *pta;
int ret;
@@ -554,7 +558,10 @@ int wl1271_acx_sg_enable(struct wl1271 *wl)
goto out;
}
- pta->enable = SG_ENABLE;
+ if (enable)
+ pta->enable = wl->conf.sg.state;
+ else
+ pta->enable = CONF_SG_DISABLE;
ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta));
if (ret < 0) {
@@ -571,7 +578,7 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
{
struct acx_bt_wlan_coex_param *param;
struct conf_sg_settings *c = &wl->conf.sg;
- int ret;
+ int i, ret;
wl1271_debug(DEBUG_ACX, "acx sg cfg");
@@ -582,19 +589,9 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
}
/* BT-WLAN coext parameters */
- param->per_threshold = cpu_to_le32(c->per_threshold);
- param->max_scan_compensation_time =
- cpu_to_le32(c->max_scan_compensation_time);
- param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval);
- param->load_ratio = c->load_ratio;
- param->auto_ps_mode = c->auto_ps_mode;
- param->probe_req_compensation = c->probe_req_compensation;
- param->scan_window_compensation = c->scan_window_compensation;
- param->antenna_config = c->antenna_config;
- param->beacon_miss_threshold = c->beacon_miss_threshold;
- param->rate_adaptation_threshold =
- cpu_to_le32(c->rate_adaptation_threshold);
- param->rate_adaptation_snr = c->rate_adaptation_snr;
+ for (i = 0; i < CONF_SG_PARAMS_MAX; i++)
+ param->params[i] = cpu_to_le32(c->params[i]);
+ param->param_idx = CONF_SG_PARAMS_ALL;
ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
if (ret < 0) {
@@ -806,7 +803,7 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
/* configure one basic rate class */
idx = ACX_TX_BASIC_RATE;
- acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set);
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate);
acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
acx->rate_class[idx].aflags = c->aflags;
@@ -1143,3 +1140,129 @@ out:
kfree(acx);
return ret;
}
+
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
+{
+ struct wl1271_acx_keep_alive_mode *acx = NULL;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx keep alive mode: %d", enable);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->enabled = enable;
+
+ ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx keep alive mode failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
+{
+ struct wl1271_acx_keep_alive_config *acx = NULL;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx keep alive config");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
+ acx->index = index;
+ acx->tpl_validation = tpl_valid;
+ acx->trigger = ACX_KEEP_ALIVE_NO_TX;
+
+ ret = wl1271_cmd_configure(wl, ACX_SET_KEEP_ALIVE_CONFIG,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx keep alive config failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
+ s16 thold, u8 hyst)
+{
+ struct wl1271_acx_rssi_snr_trigger *acx = NULL;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx rssi snr trigger");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl->last_rssi_event = -1;
+
+ acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
+ acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
+ acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
+ if (enable)
+ acx->enable = WL1271_ACX_TRIG_ENABLE;
+ else
+ acx->enable = WL1271_ACX_TRIG_DISABLE;
+
+ acx->index = WL1271_ACX_TRIG_IDX_RSSI;
+ acx->dir = WL1271_ACX_TRIG_DIR_BIDIR;
+ acx->threshold = cpu_to_le16(thold);
+ acx->hysteresis = hyst;
+
+ ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_TRIGGER, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx rssi snr trigger setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
+{
+ struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
+ struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx rssi snr avg weights");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->rssi_beacon = c->avg_weight_rssi_beacon;
+ acx->rssi_data = c->avg_weight_rssi_data;
+ acx->snr_beacon = c->avg_weight_snr_beacon;
+ acx->snr_data = c->avg_weight_snr_data;
+
+ ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_WEIGHTS, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx rssi snr trigger weights failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index aeccc98..420e7e2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -392,81 +392,27 @@ struct acx_conn_monit_params {
__le32 bss_lose_timeout; /* number of TU's from synch fail */
} __attribute__ ((packed));
-enum {
- SG_ENABLE = 0,
- SG_DISABLE,
- SG_SENSE_NO_ACTIVITY,
- SG_SENSE_ACTIVE
-};
-
struct acx_bt_wlan_coex {
struct acx_header header;
- /*
- * 0 -> PTA enabled
- * 1 -> PTA disabled
- * 2 -> sense no active mode, i.e.
- * an interrupt is sent upon
- * BT activity.
- * 3 -> PTA is switched on in response
- * to the interrupt sending.
- */
u8 enable;
u8 pad[3];
} __attribute__ ((packed));
-struct acx_dco_itrim_params {
+struct acx_bt_wlan_coex_param {
struct acx_header header;
- u8 enable;
+ __le32 params[CONF_SG_PARAMS_MAX];
+ u8 param_idx;
u8 padding[3];
- __le32 timeout;
} __attribute__ ((packed));
-#define PTA_ANTENNA_TYPE_DEF (0)
-#define PTA_BT_HP_MAXTIME_DEF (2000)
-#define PTA_WLAN_HP_MAX_TIME_DEF (5000)
-#define PTA_SENSE_DISABLE_TIMER_DEF (1350)
-#define PTA_PROTECTIVE_RX_TIME_DEF (1500)
-#define PTA_PROTECTIVE_TX_TIME_DEF (1500)
-#define PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF (3000)
-#define PTA_SIGNALING_TYPE_DEF (1)
-#define PTA_AFH_LEVERAGE_ON_DEF (0)
-#define PTA_NUMBER_QUIET_CYCLE_DEF (0)
-#define PTA_MAX_NUM_CTS_DEF (3)
-#define PTA_NUMBER_OF_WLAN_PACKETS_DEF (2)
-#define PTA_NUMBER_OF_BT_PACKETS_DEF (2)
-#define PTA_PROTECTIVE_RX_TIME_FAST_DEF (1500)
-#define PTA_PROTECTIVE_TX_TIME_FAST_DEF (3000)
-#define PTA_CYCLE_TIME_FAST_DEF (8700)
-#define PTA_RX_FOR_AVALANCHE_DEF (5)
-#define PTA_ELP_HP_DEF (0)
-#define PTA_ANTI_STARVE_PERIOD_DEF (500)
-#define PTA_ANTI_STARVE_NUM_CYCLE_DEF (4)
-#define PTA_ALLOW_PA_SD_DEF (1)
-#define PTA_TIME_BEFORE_BEACON_DEF (6300)
-#define PTA_HPDM_MAX_TIME_DEF (1600)
-#define PTA_TIME_OUT_NEXT_WLAN_DEF (2550)
-#define PTA_AUTO_MODE_NO_CTS_DEF (0)
-#define PTA_BT_HP_RESPECTED_DEF (3)
-#define PTA_WLAN_RX_MIN_RATE_DEF (24)
-#define PTA_ACK_MODE_DEF (1)
-
-struct acx_bt_wlan_coex_param {
+struct acx_dco_itrim_params {
struct acx_header header;
- __le32 per_threshold;
- __le32 max_scan_compensation_time;
- __le16 nfs_sample_interval;
- u8 load_ratio;
- u8 auto_ps_mode;
- u8 probe_req_compensation;
- u8 scan_window_compensation;
- u8 antenna_config;
- u8 beacon_miss_threshold;
- __le32 rate_adaptation_threshold;
- s8 rate_adaptation_snr;
+ u8 enable;
u8 padding[3];
+ __le32 timeout;
} __attribute__ ((packed));
struct acx_energy_detection {
@@ -969,6 +915,84 @@ struct wl1271_acx_pm_config {
u8 padding[3];
} __attribute__ ((packed));
+struct wl1271_acx_keep_alive_mode {
+ struct acx_header header;
+
+ u8 enabled;
+ u8 padding[3];
+} __attribute__ ((packed));
+
+enum {
+ ACX_KEEP_ALIVE_NO_TX = 0,
+ ACX_KEEP_ALIVE_PERIOD_ONLY
+};
+
+enum {
+ ACX_KEEP_ALIVE_TPL_INVALID = 0,
+ ACX_KEEP_ALIVE_TPL_VALID
+};
+
+struct wl1271_acx_keep_alive_config {
+ struct acx_header header;
+
+ __le32 period;
+ u8 index;
+ u8 tpl_validation;
+ u8 trigger;
+ u8 padding;
+} __attribute__ ((packed));
+
+enum {
+ WL1271_ACX_TRIG_TYPE_LEVEL = 0,
+ WL1271_ACX_TRIG_TYPE_EDGE,
+};
+
+enum {
+ WL1271_ACX_TRIG_DIR_LOW = 0,
+ WL1271_ACX_TRIG_DIR_HIGH,
+ WL1271_ACX_TRIG_DIR_BIDIR,
+};
+
+enum {
+ WL1271_ACX_TRIG_ENABLE = 1,
+ WL1271_ACX_TRIG_DISABLE,
+};
+
+enum {
+ WL1271_ACX_TRIG_METRIC_RSSI_BEACON = 0,
+ WL1271_ACX_TRIG_METRIC_RSSI_DATA,
+ WL1271_ACX_TRIG_METRIC_SNR_BEACON,
+ WL1271_ACX_TRIG_METRIC_SNR_DATA,
+};
+
+enum {
+ WL1271_ACX_TRIG_IDX_RSSI = 0,
+ WL1271_ACX_TRIG_COUNT = 8,
+};
+
+struct wl1271_acx_rssi_snr_trigger {
+ struct acx_header header;
+
+ __le16 threshold;
+ __le16 pacing; /* 0 - 60000 ms */
+ u8 metric;
+ u8 type;
+ u8 dir;
+ u8 hysteresis;
+ u8 index;
+ u8 enable;
+ u8 padding[2];
+};
+
+struct wl1271_acx_rssi_snr_avg_weights {
+ struct acx_header header;
+
+ u8 rssi_beacon;
+ u8 rssi_data;
+ u8 snr_beacon;
+ u8 snr_data;
+};
+
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
ACX_MEM_CFG = 0x0003,
@@ -1017,8 +1041,8 @@ enum {
ACX_FRAG_CFG = 0x004F,
ACX_BET_ENABLE = 0x0050,
ACX_RSSI_SNR_TRIGGER = 0x0051,
- ACX_RSSI_SNR_WEIGHTS = 0x0051,
- ACX_KEEP_ALIVE_MODE = 0x0052,
+ ACX_RSSI_SNR_WEIGHTS = 0x0052,
+ ACX_KEEP_ALIVE_MODE = 0x0053,
ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
@@ -1058,8 +1082,8 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
-int wl1271_acx_conn_monit_params(struct wl1271 *wl);
-int wl1271_acx_sg_enable(struct wl1271 *wl);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
+int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
int wl1271_acx_sg_cfg(struct wl1271 *wl);
int wl1271_acx_cca_threshold(struct wl1271 *wl);
int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
@@ -1085,5 +1109,10 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
u8 version);
int wl1271_acx_pm_config(struct wl1271 *wl);
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
+ s16 thold, u8 hyst);
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 0243562..1a36d8a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -1,7 +1,7 @@
/*
* This file is part of wl1271
*
- * Copyright (C) 2008-2009 Nokia Corporation
+ * Copyright (C) 2008-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
@@ -27,7 +27,6 @@
#include "wl1271_acx.h"
#include "wl1271_reg.h"
#include "wl1271_boot.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_event.h"
@@ -230,6 +229,14 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_len = sizeof(wl->nvs->nvs);
nvs_ptr = (u8 *)wl->nvs->nvs;
+ /* update current MAC address to NVS */
+ nvs_ptr[11] = wl->mac_addr[0];
+ nvs_ptr[10] = wl->mac_addr[1];
+ nvs_ptr[6] = wl->mac_addr[2];
+ nvs_ptr[5] = wl->mac_addr[3];
+ nvs_ptr[4] = wl->mac_addr[4];
+ nvs_ptr[3] = wl->mac_addr[5];
+
/*
* Layout before the actual NVS tables:
* 1 byte : burst length.
@@ -300,7 +307,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
{
- enable_irq(wl->irq);
+ wl1271_enable_interrupts(wl);
wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
@@ -344,7 +351,7 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
static int wl1271_boot_run_firmware(struct wl1271 *wl)
{
int loop, ret;
- u32 chip_id, interrupt;
+ u32 chip_id, intr;
wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
@@ -361,15 +368,15 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
loop = 0;
while (loop++ < INIT_LOOP) {
udelay(INIT_LOOP_DELAY);
- interrupt = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+ intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
- if (interrupt == 0xffffffff) {
+ if (intr == 0xffffffff) {
wl1271_error("error reading hardware complete "
"init indication");
return -EIO;
}
/* check that ACX_INTR_INIT_COMPLETE is enabled */
- else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
+ else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
WL1271_ACX_INTR_INIT_COMPLETE);
break;
@@ -404,7 +411,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
/* unmask required mbox events */
wl->event_mask = BSS_LOSE_EVENT_ID |
SCAN_COMPLETE_EVENT_ID |
- PS_REPORT_EVENT_ID;
+ PS_REPORT_EVENT_ID |
+ JOIN_EVENT_COMPLETE_ID |
+ DISCONNECT_EVENT_COMPLETE_ID |
+ RSSI_SNR_TRIGGER_0_EVENT_ID;
ret = wl1271_event_unmask(wl);
if (ret < 0) {
@@ -431,11 +441,23 @@ static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
return 0;
}
+static void wl1271_boot_hw_version(struct wl1271 *wl)
+{
+ u32 fuse;
+
+ fuse = wl1271_top_reg_read(wl, REG_FUSE_DATA_2_1);
+ fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
+
+ wl->hw_pg_ver = (s8)fuse;
+}
+
int wl1271_boot(struct wl1271 *wl)
{
int ret = 0;
u32 tmp, clk, pause;
+ wl1271_boot_hw_version(wl);
+
if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4)
/* ref clk: 19.2/38.4/38.4-XTAL */
clk = 0x3;
@@ -445,11 +467,15 @@ int wl1271_boot(struct wl1271 *wl)
if (REF_CLOCK != 0) {
u16 val;
- /* Set clock type */
+ /* Set clock type (open drain) */
val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
val &= FREF_CLK_TYPE_BITS;
- val |= CLK_REQ_PRCM;
wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
+
+ /* Set clock pull mode (no pull) */
+ val = wl1271_top_reg_read(wl, OCP_REG_CLK_PULL);
+ val |= NO_PULL;
+ wl1271_top_reg_write(wl, OCP_REG_CLK_PULL, val);
} else {
u16 val;
/* Set clock polarity */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index 412443e..f829699 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -53,10 +53,16 @@ struct wl1271_static_data {
#define OCP_REG_POLARITY 0x0064
#define OCP_REG_CLK_TYPE 0x0448
#define OCP_REG_CLK_POLARITY 0x0cb2
+#define OCP_REG_CLK_PULL 0x0cb4
-#define CMD_MBOX_ADDRESS 0x407B4
+#define REG_FUSE_DATA_2_1 0x050a
+#define PG_VER_MASK 0x3c
+#define PG_VER_OFFSET 2
-#define POLARITY_LOW BIT(1)
+#define CMD_MBOX_ADDRESS 0x407B4
+
+#define POLARITY_LOW BIT(1)
+#define NO_PULL (BIT(14) | BIT(15))
#define FREF_CLK_TYPE_BITS 0xfffffe7f
#define CLK_REQ_PRCM 0x100
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index e7832f3..19393e2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -1,7 +1,7 @@
/*
* This file is part of wl1271
*
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
@@ -26,15 +26,18 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
+#include <linux/ieee80211.h>
#include <linux/slab.h>
#include "wl1271.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_acx.h"
#include "wl12xx_80211.h"
#include "wl1271_cmd.h"
+#include "wl1271_event.h"
+
+#define WL1271_CMD_FAST_POLL_COUNT 50
/*
* send command to firmware
@@ -52,6 +55,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
u32 intr;
int ret = 0;
u16 status;
+ u16 poll_count = 0;
cmd = buf;
cmd->id = cpu_to_le16(id);
@@ -73,7 +77,11 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
goto out;
}
- msleep(1);
+ poll_count++;
+ if (poll_count < WL1271_CMD_FAST_POLL_COUNT)
+ udelay(10);
+ else
+ msleep(1);
intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
}
@@ -249,7 +257,36 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
return ret;
}
-int wl1271_cmd_join(struct wl1271 *wl)
+/*
+ * Poll the mailbox event field until any of the bits in the mask is set or a
+ * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
+ */
+static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+{
+ u32 events_vector, event;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
+
+ do {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+
+ /* read from both event fields */
+ wl1271_read(wl, wl->mbox_ptr[0], &events_vector,
+ sizeof(events_vector), false);
+ event = events_vector & mask;
+ wl1271_read(wl, wl->mbox_ptr[1], &events_vector,
+ sizeof(events_vector), false);
+ event |= events_vector & mask;
+ } while (!event);
+
+ return 0;
+}
+
+int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
{
static bool do_cal = true;
struct wl1271_cmd_join *join;
@@ -280,30 +317,13 @@ int wl1271_cmd_join(struct wl1271 *wl)
join->rx_config_options = cpu_to_le32(wl->rx_config);
join->rx_filter_options = cpu_to_le32(wl->rx_filter);
- join->bss_type = wl->bss_type;
+ join->bss_type = bss_type;
+ join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- /*
- * FIXME: disable temporarily all filters because after commit
- * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
- * association. The filter logic needs to be implemented properly
- * and once that is done, this hack can be removed.
- */
- join->rx_config_options = cpu_to_le32(0);
- join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER);
-
- if (wl->band == IEEE80211_BAND_2GHZ)
- join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS |
- CONF_HW_BIT_RATE_2MBPS |
- CONF_HW_BIT_RATE_5_5MBPS |
- CONF_HW_BIT_RATE_11MBPS);
- else {
+ if (wl->band == IEEE80211_BAND_5GHZ)
join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
- join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS |
- CONF_HW_BIT_RATE_12MBPS |
- CONF_HW_BIT_RATE_24MBPS);
- }
- join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT);
+ join->beacon_interval = cpu_to_le16(wl->beacon_int);
join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
join->channel = wl->channel;
@@ -320,8 +340,7 @@ int wl1271_cmd_join(struct wl1271 *wl)
/* reset TX security counters */
wl->tx_security_last_seq = 0;
- wl->tx_security_seq_16 = 0;
- wl->tx_security_seq_32 = 0;
+ wl->tx_security_seq = 0;
ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
if (ret < 0) {
@@ -329,11 +348,9 @@ int wl1271_cmd_join(struct wl1271 *wl)
goto out_free;
}
- /*
- * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
- * simplify locking we just sleep instead, for now
- */
- msleep(10);
+ ret = wl1271_cmd_wait_for_event(wl, JOIN_EVENT_COMPLETE_ID);
+ if (ret < 0)
+ wl1271_error("cmd join event completion error");
out_free:
kfree(join);
@@ -465,7 +482,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
if (ret < 0) {
wl1271_error("tx %s cmd for channel %d failed",
enable ? "start" : "stop", cmd->channel);
- return ret;
+ goto out;
}
wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
@@ -499,7 +516,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send)
ps_params->ps_mode = ps_mode;
ps_params->send_null_data = send;
ps_params->retries = 5;
- ps_params->hang_over_period = 128;
+ ps_params->hang_over_period = 1;
ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -549,25 +566,29 @@ out:
return ret;
}
-int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 band,
- u8 probe_requests)
+int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 active_scan,
+ u8 high_prio, u8 band, u8 probe_requests)
{
struct wl1271_cmd_trigger_scan_to *trigger = NULL;
struct wl1271_cmd_scan *params = NULL;
struct ieee80211_channel *channels;
+ u32 rate;
int i, j, n_ch, ret;
u16 scan_options = 0;
u8 ieee_band;
- if (band == WL1271_SCAN_BAND_2_4_GHZ)
+ if (band == WL1271_SCAN_BAND_2_4_GHZ) {
ieee_band = IEEE80211_BAND_2GHZ;
- else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled())
+ rate = wl->conf.tx.basic_rate;
+ } else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled()) {
ieee_band = IEEE80211_BAND_2GHZ;
- else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled())
+ rate = wl->conf.tx.basic_rate;
+ } else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled()) {
ieee_band = IEEE80211_BAND_5GHZ;
- else
+ rate = wl->conf.tx.basic_rate_5;
+ } else
return -EINVAL;
if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
@@ -594,8 +615,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
params->params.scan_options = cpu_to_le16(scan_options);
params->params.num_probe_requests = probe_requests;
- /* Let the fw autodetect suitable tx_rate for probes */
- params->params.tx_rate = 0;
+ params->params.tx_rate = cpu_to_le32(rate);
params->params.tid_trigger = 0;
params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
@@ -622,12 +642,13 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
params->params.num_channels = j;
- if (len && ssid) {
- params->params.ssid_len = len;
- memcpy(params->params.ssid, ssid, len);
+ if (ssid_len && ssid) {
+ params->params.ssid_len = ssid_len;
+ memcpy(params->params.ssid, ssid, ssid_len);
}
- ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band);
+ ret = wl1271_cmd_build_probe_req(wl, ssid, ssid_len,
+ ie, ie_len, ieee_band);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
@@ -658,9 +679,9 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
wl->scan.active = active_scan;
wl->scan.high_prio = high_prio;
wl->scan.probe_requests = probe_requests;
- if (len && ssid) {
- wl->scan.ssid_len = len;
- memcpy(wl->scan.ssid, ssid, len);
+ if (ssid_len && ssid) {
+ wl->scan.ssid_len = ssid_len;
+ memcpy(wl->scan.ssid, ssid, ssid_len);
} else
wl->scan.ssid_len = 0;
}
@@ -675,11 +696,12 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
out:
kfree(params);
+ kfree(trigger);
return ret;
}
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
- void *buf, size_t buf_len)
+ void *buf, size_t buf_len, int index, u32 rates)
{
struct wl1271_cmd_template_set *cmd;
int ret = 0;
@@ -697,9 +719,10 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
cmd->len = cpu_to_le16(buf_len);
cmd->template_type = template_id;
- cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates);
+ cmd->enabled_rates = cpu_to_le32(rates);
cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
+ cmd->index = index;
if (buf)
memcpy(cmd->template_data, buf, buf_len);
@@ -717,155 +740,129 @@ out:
return ret;
}
-static int wl1271_build_basic_rates(u8 *rates, u8 band)
+int wl1271_cmd_build_null_data(struct wl1271 *wl)
{
- u8 index = 0;
-
- if (band == IEEE80211_BAND_2GHZ) {
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
- } else if (band == IEEE80211_BAND_5GHZ) {
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
- } else {
- wl1271_error("build_basic_rates invalid band: %d", band);
- }
+ struct sk_buff *skb = NULL;
+ int size;
+ void *ptr;
+ int ret = -ENOMEM;
- return index;
-}
-static int wl1271_build_extended_rates(u8 *rates, u8 band)
-{
- u8 index = 0;
-
- if (band == IEEE80211_BAND_2GHZ) {
- rates[index++] = IEEE80211_OFDM_RATE_6MB;
- rates[index++] = IEEE80211_OFDM_RATE_9MB;
- rates[index++] = IEEE80211_OFDM_RATE_12MB;
- rates[index++] = IEEE80211_OFDM_RATE_18MB;
- rates[index++] = IEEE80211_OFDM_RATE_24MB;
- rates[index++] = IEEE80211_OFDM_RATE_36MB;
- rates[index++] = IEEE80211_OFDM_RATE_48MB;
- rates[index++] = IEEE80211_OFDM_RATE_54MB;
- } else if (band == IEEE80211_BAND_5GHZ) {
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
+ if (wl->bss_type == BSS_TYPE_IBSS) {
+ size = sizeof(struct wl12xx_null_data_template);
+ ptr = NULL;
} else {
- wl1271_error("build_basic_rates invalid band: %d", band);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
+ size = skb->len;
+ ptr = skb->data;
}
- return index;
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
+ WL1271_RATE_AUTOMATIC);
+
+out:
+ dev_kfree_skb(skb);
+ if (ret)
+ wl1271_warning("cmd buld null data failed %d", ret);
+
+ return ret;
+
}
-int wl1271_cmd_build_null_data(struct wl1271 *wl)
+int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
{
- struct wl12xx_null_data_template template;
+ struct sk_buff *skb = NULL;
+ int ret = -ENOMEM;
- if (!is_zero_ether_addr(wl->bssid)) {
- memcpy(template.header.da, wl->bssid, ETH_ALEN);
- memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
- } else {
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- }
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
+ skb->data, skb->len,
+ CMD_TEMPL_KLV_IDX_NULL_DATA,
+ WL1271_RATE_AUTOMATIC);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
+out:
+ dev_kfree_skb(skb);
+ if (ret)
+ wl1271_warning("cmd build klv null data failed %d", ret);
- return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template,
- sizeof(template));
+ return ret;
}
int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
{
- struct wl12xx_ps_poll_template template;
-
- memcpy(template.bssid, wl->bssid, ETH_ALEN);
- memcpy(template.ta, wl->mac_addr, ETH_ALEN);
-
- /* aid in PS-Poll has its two MSBs each set to 1 */
- template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
+ struct sk_buff *skb;
+ int ret = 0;
- template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+ skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
- return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template,
- sizeof(template));
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
+ skb->len, 0, wl->basic_rate);
+out:
+ dev_kfree_skb(skb);
+ return ret;
}
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
- u8 band)
+int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 band)
{
- struct wl12xx_probe_req_template template;
- struct wl12xx_ie_rates *rates;
- char *ptr;
- u16 size;
+ struct sk_buff *skb;
int ret;
- ptr = (char *)&template;
- size = sizeof(struct ieee80211_header);
-
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-
- /* IEs */
- /* SSID */
- template.ssid.header.id = WLAN_EID_SSID;
- template.ssid.header.len = ssid_len;
- if (ssid_len && ssid)
- memcpy(template.ssid.ssid, ssid, ssid_len);
- size += sizeof(struct wl12xx_ie_header) + ssid_len;
- ptr += size;
-
- /* Basic Rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_SUPP_RATES;
- rates->header.len = wl1271_build_basic_rates(rates->rates, band);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
- ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- /* Extended rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_EXT_SUPP_RATES;
- rates->header.len = wl1271_build_extended_rates(rates->rates, band);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
+ skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ ie, ie_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
if (band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
- &template, size);
+ skb->data, skb->len, 0,
+ wl->conf.tx.basic_rate);
else
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- &template, size);
+ skb->data, skb->len, 0,
+ wl->conf.tx.basic_rate_5);
+
+out:
+ dev_kfree_skb(skb);
return ret;
}
+int wl1271_build_qos_null_data(struct wl1271 *wl)
+{
+ struct ieee80211_qos_hdr template;
+
+ memset(&template, 0, sizeof(template));
+
+ memcpy(template.addr1, wl->bssid, ETH_ALEN);
+ memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr3, wl->bssid, ETH_ALEN);
+
+ template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_TODS);
+
+ /* FIXME: not sure what priority to use here */
+ template.qos_ctrl = cpu_to_le16(0);
+
+ return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
+ sizeof(template), 0,
+ WL1271_RATE_AUTOMATIC);
+}
+
int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
{
struct wl1271_cmd_set_keys *cmd;
@@ -976,6 +973,10 @@ int wl1271_cmd_disconnect(struct wl1271 *wl)
goto out_free;
}
+ ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
+ if (ret < 0)
+ wl1271_error("cmd disconnect event completion error");
+
out_free:
kfree(cmd);
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 2dc06c7..f2820b4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -33,7 +33,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
size_t res_len);
int wl1271_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
-int wl1271_cmd_join(struct wl1271 *wl);
+int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -41,15 +41,18 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
-int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 band,
- u8 probe_requests);
+int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 active_scan,
+ u8 high_prio, u8 band, u8 probe_requests);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
- void *buf, size_t buf_len);
+ void *buf, size_t buf_len, int index, u32 rates);
int wl1271_cmd_build_null_data(struct wl1271 *wl);
int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
- u8 band);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 band);
+int wl1271_build_qos_null_data(struct wl1271 *wl);
+int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
@@ -99,6 +102,11 @@ enum wl1271_commands {
#define MAX_CMD_PARAMS 572
+enum {
+ CMD_TEMPL_KLV_IDX_NULL_DATA = 0,
+ CMD_TEMPL_KLV_IDX_MAX = 4
+};
+
enum cmd_templ {
CMD_TEMPL_NULL_DATA = 0,
CMD_TEMPL_BEACON,
@@ -121,6 +129,7 @@ enum cmd_templ {
/* unit ms */
#define WL1271_COMMAND_TIMEOUT 2000
#define WL1271_CMD_TEMPL_MAX_SIZE 252
+#define WL1271_EVENT_TIMEOUT 750
struct wl1271_cmd_header {
__le16 id;
@@ -243,6 +252,8 @@ struct cmd_enabledisable_path {
u8 padding[3];
} __attribute__ ((packed));
+#define WL1271_RATE_AUTOMATIC 0
+
struct wl1271_cmd_template_set {
struct wl1271_cmd_header header;
@@ -509,6 +520,8 @@ enum wl1271_disconnect_type {
};
struct wl1271_cmd_disconnect {
+ struct wl1271_cmd_header header;
+
__le32 rx_config_options;
__le32 rx_filter_options;
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 6f9e75c..d046d04 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -65,110 +65,344 @@ enum {
CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
};
-struct conf_sg_settings {
+enum {
+ CONF_HW_RXTX_RATE_MCS7 = 0,
+ CONF_HW_RXTX_RATE_MCS6,
+ CONF_HW_RXTX_RATE_MCS5,
+ CONF_HW_RXTX_RATE_MCS4,
+ CONF_HW_RXTX_RATE_MCS3,
+ CONF_HW_RXTX_RATE_MCS2,
+ CONF_HW_RXTX_RATE_MCS1,
+ CONF_HW_RXTX_RATE_MCS0,
+ CONF_HW_RXTX_RATE_54,
+ CONF_HW_RXTX_RATE_48,
+ CONF_HW_RXTX_RATE_36,
+ CONF_HW_RXTX_RATE_24,
+ CONF_HW_RXTX_RATE_22,
+ CONF_HW_RXTX_RATE_18,
+ CONF_HW_RXTX_RATE_12,
+ CONF_HW_RXTX_RATE_11,
+ CONF_HW_RXTX_RATE_9,
+ CONF_HW_RXTX_RATE_6,
+ CONF_HW_RXTX_RATE_5_5,
+ CONF_HW_RXTX_RATE_2,
+ CONF_HW_RXTX_RATE_1,
+ CONF_HW_RXTX_RATE_MAX,
+ CONF_HW_RXTX_RATE_UNSUPPORTED = 0xff
+};
+
+enum {
+ CONF_SG_DISABLE = 0,
+ CONF_SG_PROTECTIVE,
+ CONF_SG_OPPORTUNISTIC
+};
+
+enum {
/*
- * Defines the PER threshold in PPM of the BT voice of which reaching
- * this value will trigger raising the priority of the BT voice by
- * the BT IP until next NFS sample interval time as defined in
- * nfs_sample_interval.
+ * PER threshold in PPM of the BT voice
*
- * Unit: PER value in PPM (parts per million)
- * #Error_packets / #Total_packets
+ * Range: 0 - 10000000
+ */
+ CONF_SG_BT_PER_THRESHOLD = 0,
- * Range: u32
+ /*
+ * Number of consequent RX_ACTIVE activities to override BT voice
+ * frames to ensure WLAN connection
+ *
+ * Range: 0 - 100
+ */
+ CONF_SG_HV3_MAX_OVERRIDE,
+
+ /*
+ * Defines the PER threshold of the BT voice
+ *
+ * Range: 0 - 65000
+ */
+ CONF_SG_BT_NFS_SAMPLE_INTERVAL,
+
+ /*
+ * Defines the load ratio of BT
+ *
+ * Range: 0 - 100 (%)
+ */
+ CONF_SG_BT_LOAD_RATIO,
+
+ /*
+ * Defines whether the SG will force WLAN host to enter/exit PSM
+ *
+ * Range: 1 - SG can force, 0 - host handles PSM
+ */
+ CONF_SG_AUTO_PS_MODE,
+
+ /*
+ * Compensation percentage of probe requests when scan initiated
+ * during BT voice/ACL link.
+ *
+ * Range: 0 - 255 (%)
+ */
+ CONF_SG_AUTO_SCAN_PROBE_REQ,
+
+ /*
+ * Compensation percentage of probe requests when active scan initiated
+ * during BT voice
+ *
+ * Range: 0 - 255 (%)
+ */
+ CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3,
+
+ /*
+ * Defines antenna configuration (single/dual antenna)
+ *
+ * Range: 0 - single antenna, 1 - dual antenna
+ */
+ CONF_SG_ANTENNA_CONFIGURATION,
+
+ /*
+ * The threshold (percent) of max consequtive beacon misses before
+ * increasing priority of beacon reception.
+ *
+ * Range: 0 - 100 (%)
+ */
+ CONF_SG_BEACON_MISS_PERCENT,
+
+ /*
+ * The rate threshold below which receiving a data frame from the AP
+ * will increase the priority of the data frame above BT traffic.
+ *
+ * Range: 0,2, 5(=5.5), 6, 9, 11, 12, 18, 24, 36, 48, 54
+ */
+ CONF_SG_RATE_ADAPT_THRESH,
+
+ /*
+ * Not used currently.
+ *
+ * Range: 0
+ */
+ CONF_SG_RATE_ADAPT_SNR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT master basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR,
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT master basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT slave basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR,
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT slave basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT master EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR,
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT master EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT slave EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR,
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT slave EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR,
+
+ /*
+ * RX guard time before the beginning of a new BT voice frame during
+ * which no new WLAN trigger frame is transmitted.
+ *
+ * Range: 0 - 100000 (us)
+ */
+ CONF_SG_RXT,
+
+ /*
+ * TX guard time before the beginning of a new BT voice frame during
+ * which no new WLAN frame is transmitted.
+ *
+ * Range: 0 - 100000 (us)
+ */
+
+ CONF_SG_TXT,
+
+ /*
+ * Enable adaptive RXT/TXT algorithm. If disabled, the host values
+ * will be utilized.
+ *
+ * Range: 0 - disable, 1 - enable
+ */
+ CONF_SG_ADAPTIVE_RXT_TXT,
+
+ /*
+ * The used WLAN legacy service period during active BT ACL link
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_PS_POLL_TIMEOUT,
+
+ /*
+ * The used WLAN UPSD service period during active BT ACL link
+ *
+ * Range: 0 - 255 (ms)
*/
- u32 per_threshold;
+ CONF_SG_UPSD_TIMEOUT,
/*
- * This value is an absolute time in micro-seconds to limit the
- * maximum scan duration compensation while in SG
+ * Configure the min and max time BT gains the antenna
+ * in WLAN Active / BT master EDR
+ *
+ * Range: 0 - 255 (ms)
*/
- u32 max_scan_compensation_time;
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR,
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR,
- /* Defines the PER threshold of the BT voice of which reaching this
- * value will trigger raising the priority of the BT voice until next
- * NFS sample interval time as defined in sample_interval.
+ /*
+ * The maximum time WLAN can gain the antenna for
+ * in WLAN Active / BT master EDR
*
- * Unit: msec
- * Range: 1-65000
+ * Range: 0 - 255 (ms)
*/
- u16 nfs_sample_interval;
+ CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR,
/*
- * Defines the load ratio for the BT.
- * The WLAN ratio is: 100 - load_ratio
+ * Configure the min and max time BT gains the antenna
+ * in WLAN Active / BT slave EDR
*
- * Unit: Percent
- * Range: 0-100
+ * Range: 0 - 255 (ms)
*/
- u8 load_ratio;
+ CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR,
+ CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR,
/*
- * true - Co-ex is allowed to enter/exit P.S automatically and
- * transparently to the host
+ * The maximum time WLAN can gain the antenna for
+ * in WLAN Active / BT slave EDR
*
- * false - Co-ex is disallowed to enter/exit P.S and will trigger an
- * event to the host to notify for the need to enter/exit P.S
- * due to BT change state
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN Active / BT basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR,
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR,
+
+ /*
+ * The maximum time WLAN can gain the antenna for
+ * in WLAN Active / BT basic rate
*
+ * Range: 0 - 255 (ms)
*/
- u8 auto_ps_mode;
+ CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR,
/*
- * This parameter defines the compensation percentage of num of probe
- * requests in case scan is initiated during BT voice/BT ACL
- * guaranteed link.
+ * Compensation percentage of WLAN passive scan window if initiated
+ * during BT voice
*
- * Unit: Percent
- * Range: 0-255 (0 - No compensation)
+ * Range: 0 - 1000 (%)
*/
- u8 probe_req_compensation;
+ CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
/*
- * This parameter defines the compensation percentage of scan window
- * size in case scan is initiated during BT voice/BT ACL Guaranteed
- * link.
+ * Compensation percentage of WLAN passive scan window if initiated
+ * during BT A2DP
*
- * Unit: Percent
- * Range: 0-255 (0 - No compensation)
+ * Range: 0 - 1000 (%)
*/
- u8 scan_window_compensation;
+ CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP,
/*
- * Defines the antenna configuration.
+ * Fixed time ensured for BT traffic to gain the antenna during WLAN
+ * passive scan.
*
- * Range: 0 - Single Antenna; 1 - Dual Antenna
+ * Range: 0 - 1000 ms
*/
- u8 antenna_config;
+ CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME,
/*
- * The percent out of the Max consecutive beacon miss roaming trigger
- * which is the threshold for raising the priority of beacon
- * reception.
+ * Fixed time ensured for WLAN traffic to gain the antenna during WLAN
+ * passive scan.
*
- * Range: 1-100
- * N = MaxConsecutiveBeaconMiss
- * P = coexMaxConsecutiveBeaconMissPrecent
- * Threshold = MIN( N-1, round(N * P / 100))
+ * Range: 0 - 1000 ms
*/
- u8 beacon_miss_threshold;
+ CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME,
/*
- * The RX rate threshold below which rate adaptation is assumed to be
- * occurring at the AP which will raise priority for ACTIVE_RX and RX
- * SP.
+ * Number of consequent BT voice frames not interrupted by WLAN
*
- * Range: HW_BIT_RATE_*
+ * Range: 0 - 100
*/
- u32 rate_adaptation_threshold;
+ CONF_SG_HV3_MAX_SERVED,
/*
- * The SNR above which the RX rate threshold indicating AP rate
- * adaptation is valid
+ * Protection time of the DHCP procedure.
*
- * Range: -128 - 127
+ * Range: 0 - 100000 (ms)
*/
- s8 rate_adaptation_snr;
+ CONF_SG_DHCP_TIME,
+
+ /*
+ * Compensation percentage of WLAN active scan window if initiated
+ * during BT A2DP
+ *
+ * Range: 0 - 1000 (%)
+ */
+ CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
+ CONF_SG_TEMP_PARAM_1,
+ CONF_SG_TEMP_PARAM_2,
+ CONF_SG_TEMP_PARAM_3,
+ CONF_SG_TEMP_PARAM_4,
+ CONF_SG_TEMP_PARAM_5,
+ CONF_SG_PARAMS_MAX,
+ CONF_SG_PARAMS_ALL = 0xff
+};
+
+struct conf_sg_settings {
+ u32 params[CONF_SG_PARAMS_MAX];
+ u8 state;
};
enum conf_rx_queue_type {
@@ -440,6 +674,19 @@ struct conf_tx_settings {
*/
u16 tx_compl_threshold;
+ /*
+ * The rate used for control messages and scanning on the 2.4GHz band
+ *
+ * Range: CONF_HW_BIT_RATE_* bit mask
+ */
+ u32 basic_rate;
+
+ /*
+ * The rate used for control messages and scanning on the 5GHz band
+ *
+ * Range: CONF_HW_BIT_RATE_* bit mask
+ */
+ u32 basic_rate_5;
};
enum {
@@ -509,65 +756,6 @@ enum {
CONF_TRIG_EVENT_DIR_BIDIR
};
-
-struct conf_sig_trigger {
- /*
- * The RSSI / SNR threshold value.
- *
- * FIXME: what is the range?
- */
- s16 threshold;
-
- /*
- * Minimum delay between two trigger events for this trigger in ms.
- *
- * Range: 0 - 60000
- */
- u16 pacing;
-
- /*
- * The measurement data source for this trigger.
- *
- * Range: CONF_TRIG_METRIC_*
- */
- u8 metric;
-
- /*
- * The trigger type of this trigger.
- *
- * Range: CONF_TRIG_EVENT_TYPE_*
- */
- u8 type;
-
- /*
- * The direction of the trigger.
- *
- * Range: CONF_TRIG_EVENT_DIR_*
- */
- u8 direction;
-
- /*
- * Hysteresis range of the trigger around the threshold (in dB)
- *
- * Range: u8
- */
- u8 hysteresis;
-
- /*
- * Index of the trigger rule.
- *
- * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1
- */
- u8 index;
-
- /*
- * Enable / disable this rule (to use for clearing rules.)
- *
- * Range: 1 - Enabled, 2 - Not enabled
- */
- u8 enable;
-};
-
struct conf_sig_weights {
/*
@@ -686,12 +874,6 @@ struct conf_conn_settings {
u8 ps_poll_threshold;
/*
- * Configuration of signal (rssi/snr) triggers.
- */
- u8 sig_trigger_count;
- struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS];
-
- /*
* Configuration of signal average weights.
*/
struct conf_sig_weights sig_weights;
@@ -721,6 +903,22 @@ struct conf_conn_settings {
* Range 0 - 255
*/
u8 psm_entry_retries;
+
+ /*
+ *
+ * Specifies the interval of the connection keep-alive null-func
+ * frame in ms.
+ *
+ * Range: 1000 - 3600000
+ */
+ u32 keep_alive_interval;
+
+ /*
+ * Maximum listen interval supported by the driver in units of beacons.
+ *
+ * Range: u16
+ */
+ u8 max_listen_interval;
};
enum {
@@ -782,6 +980,43 @@ struct conf_pm_config_settings {
bool host_fast_wakeup_support;
};
+struct conf_roam_trigger_settings {
+ /*
+ * The minimum interval between two trigger events.
+ *
+ * Range: 0 - 60000 ms
+ */
+ u16 trigger_pacing;
+
+ /*
+ * The weight for rssi/beacon average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_rssi_beacon;
+
+ /*
+ * The weight for rssi/data frame average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_rssi_data;
+
+ /*
+ * The weight for snr/beacon average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_snr_beacon;
+
+ /*
+ * The weight for snr/data frame average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_snr_data;
+};
+
struct conf_drv_settings {
struct conf_sg_settings sg;
struct conf_rx_settings rx;
@@ -790,6 +1025,7 @@ struct conf_drv_settings {
struct conf_init_settings init;
struct conf_itrim_settings itrim;
struct conf_pm_config_settings pm_config;
+ struct conf_roam_trigger_settings roam_trigger;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index 3f7ff8d..c239ef4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -29,6 +29,7 @@
#include "wl1271.h"
#include "wl1271_acx.h"
#include "wl1271_ps.h"
+#include "wl1271_io.h"
/* ms */
#define WL1271_DEBUGFS_STATS_LIFETIME 1000
@@ -277,13 +278,10 @@ static ssize_t gpio_power_write(struct file *file,
goto out;
}
- if (value) {
- wl->set_power(true);
- set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
- } else {
- wl->set_power(false);
- clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
- }
+ if (value)
+ wl1271_power_on(wl);
+ else
+ wl1271_power_off(wl);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index 7468ef1..cf37aa6 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -23,7 +23,6 @@
#include "wl1271.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_event.h"
#include "wl1271_ps.h"
@@ -32,34 +31,24 @@
static int wl1271_event_scan_complete(struct wl1271 *wl,
struct event_mailbox *mbox)
{
- int size = sizeof(struct wl12xx_probe_req_template);
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
- wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
- NULL, size);
/* 2.4 GHz band scanned, scan 5 GHz band, pretend
* to the wl1271_cmd_scan function that we are not
* scanning as it checks that.
*/
clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
+ /* FIXME: ie missing! */
wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
+ NULL, 0,
wl->scan.active,
wl->scan.high_prio,
WL1271_SCAN_BAND_5_GHZ,
wl->scan.probe_requests);
} else {
- if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ)
- wl1271_cmd_template_set(wl,
- CMD_TEMPL_CFG_PROBE_REQ_2_4,
- NULL, size);
- else
- wl1271_cmd_template_set(wl,
- CMD_TEMPL_CFG_PROBE_REQ_5,
- NULL, size);
-
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, false);
mutex_lock(&wl->mutex);
@@ -92,16 +81,9 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
true);
} else {
- wl1271_error("PSM entry failed, giving up.\n");
- /* FIXME: this may need to be reconsidered. for now it
- is not possible to indicate to the mac80211
- afterwards that PSM entry failed. To maximize
- functionality (receiving data and remaining
- associated) make sure that we are in sync with the
- AP in regard of PSM mode. */
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- false);
+ wl1271_info("No ack to nullfunc from AP.");
wl->psm_entry_retry = 0;
+ *beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
@@ -143,6 +125,24 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
return ret;
}
+static void wl1271_event_rssi_trigger(struct wl1271 *wl,
+ struct event_mailbox *mbox)
+{
+ enum nl80211_cqm_rssi_threshold_event event;
+ s8 metric = mbox->rssi_snr_trigger_metric[0];
+
+ wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
+
+ if (metric <= wl->rssi_thold)
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ else
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+
+ if (event != wl->last_rssi_event)
+ ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
+ wl->last_rssi_event = event;
+}
+
static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
{
wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -172,10 +172,13 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
* filtering) is enabled. Without PSM, the stack will receive all
* beacons and can detect beacon loss by itself.
+ *
+ * As there's possibility that the driver disables PSM before receiving
+ * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
+ *
*/
- if (vector & BSS_LOSE_EVENT_ID &&
- test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
+ if (vector & BSS_LOSE_EVENT_ID) {
+ wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
@@ -188,17 +191,15 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
return ret;
}
- if (wl->vif && beacon_loss) {
- /* Obviously, it's dangerous to release the mutex while
- we are holding many of the variables in the wl struct.
- That's why it's done last in the function, and care must
- be taken that nothing more is done after this function
- returns. */
- mutex_unlock(&wl->mutex);
- ieee80211_beacon_loss(wl->vif);
- mutex_lock(&wl->mutex);
+ if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
+ if (wl->vif)
+ wl1271_event_rssi_trigger(wl, mbox);
}
+ if (wl->vif && beacon_loss)
+ ieee80211_connection_loss(wl->vif);
+
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 278f920..5837100 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -38,6 +38,14 @@
*/
enum {
+ RSSI_SNR_TRIGGER_0_EVENT_ID = BIT(0),
+ RSSI_SNR_TRIGGER_1_EVENT_ID = BIT(1),
+ RSSI_SNR_TRIGGER_2_EVENT_ID = BIT(2),
+ RSSI_SNR_TRIGGER_3_EVENT_ID = BIT(3),
+ RSSI_SNR_TRIGGER_4_EVENT_ID = BIT(4),
+ RSSI_SNR_TRIGGER_5_EVENT_ID = BIT(5),
+ RSSI_SNR_TRIGGER_6_EVENT_ID = BIT(6),
+ RSSI_SNR_TRIGGER_7_EVENT_ID = BIT(7),
MEASUREMENT_START_EVENT_ID = BIT(8),
MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
SCAN_COMPLETE_EVENT_ID = BIT(10),
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index d189e8f..4447af1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -52,50 +52,65 @@ static int wl1271_init_hwenc_config(struct wl1271 *wl)
int wl1271_init_templates_config(struct wl1271 *wl)
{
- int ret;
+ int ret, i;
/* send empty templates for fw memory reservation */
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
- sizeof(struct wl12xx_probe_req_template));
+ sizeof(struct wl12xx_probe_req_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
if (wl1271_11a_enabled()) {
+ size_t size = sizeof(struct wl12xx_probe_req_template);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- NULL,
- sizeof(struct wl12xx_probe_req_template));
+ NULL, size, 0,
+ WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
}
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
- sizeof(struct wl12xx_null_data_template));
+ sizeof(struct wl12xx_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, NULL,
- sizeof(struct wl12xx_ps_poll_template));
+ sizeof(struct wl12xx_ps_poll_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
sizeof
- (struct wl12xx_qos_null_data_template));
+ (struct wl12xx_qos_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL,
sizeof
- (struct wl12xx_probe_resp_template));
+ (struct wl12xx_probe_resp_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL,
sizeof
- (struct wl12xx_beacon_template));
+ (struct wl12xx_beacon_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
+ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE, i,
+ WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -161,11 +176,11 @@ int wl1271_init_pta(struct wl1271 *wl)
{
int ret;
- ret = wl1271_acx_sg_enable(wl);
+ ret = wl1271_acx_sg_cfg(wl);
if (ret < 0)
return ret;
- ret = wl1271_acx_sg_cfg(wl);
+ ret = wl1271_acx_sg_enable(wl, wl->sg_enabled);
if (ret < 0)
return ret;
@@ -237,7 +252,7 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
/* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl);
+ ret = wl1271_acx_conn_monit_params(wl, false);
if (ret < 0)
goto out_free_memmap;
@@ -325,6 +340,24 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ /* disable all keep-alive templates */
+ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ ret = wl1271_acx_keep_alive_config(wl, i,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
+
+ /* disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, false);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
return 0;
out_free_memmap:
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/wl1271_io.c
index 5cd94d5..c8759ac 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.c
+++ b/drivers/net/wireless/wl12xx/wl1271_io.c
@@ -28,30 +28,29 @@
#include "wl1271.h"
#include "wl12xx_80211.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
-static int wl1271_translate_addr(struct wl1271 *wl, int addr)
+#define OCP_CMD_LOOP 32
+
+#define OCP_CMD_WRITE 0x1
+#define OCP_CMD_READ 0x2
+
+#define OCP_READY_MASK BIT(18)
+#define OCP_STATUS_MASK (BIT(16) | BIT(17))
+
+#define OCP_STATUS_NO_RESP 0x00000
+#define OCP_STATUS_OK 0x10000
+#define OCP_STATUS_REQ_FAILED 0x20000
+#define OCP_STATUS_RESP_ERROR 0x30000
+
+void wl1271_disable_interrupts(struct wl1271 *wl)
{
- /*
- * To translate, first check to which window of addresses the
- * particular address belongs. Then subtract the starting address
- * of that window from the address. Then, add offset of the
- * translated region.
- *
- * The translated regions occur next to each other in physical device
- * memory, so just add the sizes of the preceeding address regions to
- * get the offset to the new region.
- *
- * Currently, only the two first regions are addressed, and the
- * assumption is that all addresses will fall into either of those
- * two.
- */
- if ((addr >= wl->part.reg.start) &&
- (addr < wl->part.reg.start + wl->part.reg.size))
- return addr - wl->part.reg.start + wl->part.mem.size;
- else
- return addr - wl->part.mem.start;
+ wl->if_ops->disable_irq(wl);
+}
+
+void wl1271_enable_interrupts(struct wl1271 *wl)
+{
+ wl->if_ops->enable_irq(wl);
}
/* Set the SPI partitions to access the chip addresses
@@ -117,54 +116,12 @@ int wl1271_set_partition(struct wl1271 *wl,
void wl1271_io_reset(struct wl1271 *wl)
{
- wl1271_spi_reset(wl);
+ wl->if_ops->reset(wl);
}
void wl1271_io_init(struct wl1271 *wl)
{
- wl1271_spi_init(wl);
-}
-
-void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- wl1271_spi_raw_write(wl, addr, buf, len, fixed);
-}
-
-void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- wl1271_spi_raw_read(wl, addr, buf, len, fixed);
-}
-
-void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed)
-{
- int physical;
-
- physical = wl1271_translate_addr(wl, addr);
-
- wl1271_spi_raw_read(wl, physical, buf, len, fixed);
-}
-
-void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed)
-{
- int physical;
-
- physical = wl1271_translate_addr(wl, addr);
-
- wl1271_spi_raw_write(wl, physical, buf, len, fixed);
-}
-
-u32 wl1271_read32(struct wl1271 *wl, int addr)
-{
- return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
-}
-
-void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
-{
- wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
+ wl->if_ops->init(wl);
}
void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h
index fa9a0b3..bc806c7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.h
+++ b/drivers/net/wireless/wl12xx/wl1271_io.h
@@ -25,44 +25,145 @@
#ifndef __WL1271_IO_H__
#define __WL1271_IO_H__
+#include "wl1271_reg.h"
+
+#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
+
+#define HW_PARTITION_REGISTERS_ADDR 0x1FFC0
+#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
+#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
+#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
+#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
+#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
+#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
+#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
+
+#define HW_ACCESS_REGISTER_SIZE 4
+
+#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
+
struct wl1271;
+void wl1271_disable_interrupts(struct wl1271 *wl);
+void wl1271_enable_interrupts(struct wl1271 *wl);
+
void wl1271_io_reset(struct wl1271 *wl);
void wl1271_io_init(struct wl1271 *wl);
-/* Raw target IO, address is not translated */
-void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
-void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
+static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
+{
+ return wl->if_ops->dev(wl);
+}
-/* Translated target IO */
-void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed);
-void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed);
-u32 wl1271_read32(struct wl1271 *wl, int addr);
-void wl1271_write32(struct wl1271 *wl, int addr, u32 val);
-/* Top Register IO */
-void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
-u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
+/* Raw target IO, address is not translated */
+static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ wl->if_ops->write(wl, addr, buf, len, fixed);
+}
-int wl1271_set_partition(struct wl1271 *wl,
- struct wl1271_partition_set *p);
+static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ wl->if_ops->read(wl, addr, buf, len, fixed);
+}
static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
{
wl1271_raw_read(wl, addr, &wl->buffer_32,
sizeof(wl->buffer_32), false);
- return wl->buffer_32;
+ return le32_to_cpu(wl->buffer_32);
}
static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
{
- wl->buffer_32 = val;
+ wl->buffer_32 = cpu_to_le32(val);
wl1271_raw_write(wl, addr, &wl->buffer_32,
sizeof(wl->buffer_32), false);
}
+
+/* Translated target IO */
+static inline int wl1271_translate_addr(struct wl1271 *wl, int addr)
+{
+ /*
+ * To translate, first check to which window of addresses the
+ * particular address belongs. Then subtract the starting address
+ * of that window from the address. Then, add offset of the
+ * translated region.
+ *
+ * The translated regions occur next to each other in physical device
+ * memory, so just add the sizes of the preceeding address regions to
+ * get the offset to the new region.
+ *
+ * Currently, only the two first regions are addressed, and the
+ * assumption is that all addresses will fall into either of those
+ * two.
+ */
+ if ((addr >= wl->part.reg.start) &&
+ (addr < wl->part.reg.start + wl->part.reg.size))
+ return addr - wl->part.reg.start + wl->part.mem.size;
+ else
+ return addr - wl->part.mem.start;
+}
+
+static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int physical;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_raw_read(wl, physical, buf, len, fixed);
+}
+
+static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int physical;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_raw_write(wl, physical, buf, len, fixed);
+}
+
+static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
+{
+ return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
+}
+
+static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
+{
+ wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
+}
+
+static inline void wl1271_power_off(struct wl1271 *wl)
+{
+ wl->if_ops->power(wl, false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+}
+
+static inline void wl1271_power_on(struct wl1271 *wl)
+{
+ wl->if_ops->power(wl, true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+}
+
+
+/* Top Register IO */
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
+
+int wl1271_set_partition(struct wl1271 *wl,
+ struct wl1271_partition_set *p);
+
+/* Functions from wl1271_main.c */
+
+int wl1271_register_hw(struct wl1271 *wl);
+void wl1271_unregister_hw(struct wl1271 *wl);
+int wl1271_init_ieee80211(struct wl1271 *wl);
+struct ieee80211_hw *wl1271_alloc_hw(void);
+int wl1271_free_hw(struct wl1271 *wl);
+
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 65a1aeb..b7d9137 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -22,23 +22,19 @@
*/
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/delay.h>
-#include <linux/irq.h>
#include <linux/spi/spi.h>
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
-#include <linux/spi/wl12xx.h>
#include <linux/inetdevice.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "wl1271.h"
#include "wl12xx_80211.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_event.h"
#include "wl1271_tx.h"
@@ -54,17 +50,57 @@
static struct conf_drv_settings default_conf = {
.sg = {
- .per_threshold = 7500,
- .max_scan_compensation_time = 120000,
- .nfs_sample_interval = 400,
- .load_ratio = 50,
- .auto_ps_mode = 0,
- .probe_req_compensation = 170,
- .scan_window_compensation = 50,
- .antenna_config = 0,
- .beacon_miss_threshold = 60,
- .rate_adaptation_threshold = CONF_HW_BIT_RATE_12MBPS,
- .rate_adaptation_snr = 0
+ .params = {
+ [CONF_SG_BT_PER_THRESHOLD] = 7500,
+ [CONF_SG_HV3_MAX_OVERRIDE] = 0,
+ [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
+ [CONF_SG_BT_LOAD_RATIO] = 50,
+ [CONF_SG_AUTO_PS_MODE] = 0,
+ [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
+ [CONF_SG_ANTENNA_CONFIGURATION] = 0,
+ [CONF_SG_BEACON_MISS_PERCENT] = 60,
+ [CONF_SG_RATE_ADAPT_THRESH] = 12,
+ [CONF_SG_RATE_ADAPT_SNR] = 0,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 30,
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 8,
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 50,
+ /* Note: with UPSD, this should be 4 */
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 8,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 20,
+ /* Note: with UPDS, this should be 15 */
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
+ /* Note: with UPDS, this should be 50 */
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 40,
+ /* Note: with UPDS, this should be 10 */
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 20,
+ [CONF_SG_RXT] = 1200,
+ [CONF_SG_TXT] = 1000,
+ [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
+ [CONF_SG_PS_POLL_TIMEOUT] = 10,
+ [CONF_SG_UPSD_TIMEOUT] = 10,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
+ [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
+ [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
+ [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
+ [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
+ [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
+ [CONF_SG_HV3_MAX_SERVED] = 6,
+ [CONF_SG_DHCP_TIME] = 5000,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
+ },
+ .state = CONF_SG_PROTECTIVE,
},
.rx = {
.rx_msdu_life_time = 512000,
@@ -81,8 +117,7 @@ static struct conf_drv_settings default_conf = {
.tx = {
.tx_energy_detection = 0,
.rc_conf = {
- .enabled_rates = CONF_HW_BIT_RATE_1MBPS |
- CONF_HW_BIT_RATE_2MBPS,
+ .enabled_rates = 0,
.short_retry_limit = 10,
.long_retry_limit = 10,
.aflags = 0
@@ -179,11 +214,13 @@ static struct conf_drv_settings default_conf = {
},
.frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
.tx_compl_timeout = 700,
- .tx_compl_threshold = 4
+ .tx_compl_threshold = 4,
+ .basic_rate = CONF_HW_BIT_RATE_1MBPS,
+ .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
- .listen_interval = 0,
+ .listen_interval = 1,
.bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
.bcn_filt_ie_count = 1,
.bcn_filt_ie = {
@@ -198,38 +235,11 @@ static struct conf_drv_settings default_conf = {
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
.ps_poll_threshold = 20,
- .sig_trigger_count = 2,
- .sig_trigger = {
- [0] = {
- .threshold = -75,
- .pacing = 500,
- .metric = CONF_TRIG_METRIC_RSSI_BEACON,
- .type = CONF_TRIG_EVENT_TYPE_EDGE,
- .direction = CONF_TRIG_EVENT_DIR_LOW,
- .hysteresis = 2,
- .index = 0,
- .enable = 1
- },
- [1] = {
- .threshold = -75,
- .pacing = 500,
- .metric = CONF_TRIG_METRIC_RSSI_BEACON,
- .type = CONF_TRIG_EVENT_TYPE_EDGE,
- .direction = CONF_TRIG_EVENT_DIR_HIGH,
- .hysteresis = 2,
- .index = 1,
- .enable = 1
- }
- },
- .sig_weights = {
- .rssi_bcn_avg_weight = 10,
- .rssi_pkt_avg_weight = 10,
- .snr_bcn_avg_weight = 10,
- .snr_pkt_avg_weight = 10
- },
.bet_enable = CONF_BET_MODE_ENABLE,
.bet_max_consecutive = 10,
- .psm_entry_retries = 3
+ .psm_entry_retries = 3,
+ .keep_alive_interval = 55000,
+ .max_listen_interval = 20,
},
.init = {
.radioparam = {
@@ -243,9 +253,32 @@ static struct conf_drv_settings default_conf = {
.pm_config = {
.host_clk_settling_time = 5000,
.host_fast_wakeup_support = false
+ },
+ .roam_trigger = {
+ /* FIXME: due to firmware bug, must use value 1 for now */
+ .trigger_pacing = 1,
+ .avg_weight_rssi_beacon = 20,
+ .avg_weight_rssi_data = 10,
+ .avg_weight_snr_beacon = 20,
+ .avg_weight_snr_data = 10
}
};
+static void wl1271_device_release(struct device *dev)
+{
+
+}
+
+static struct platform_device wl1271_device = {
+ .name = "wl1271",
+ .id = -1,
+
+ /* device model insists to have a release function */
+ .dev = {
+ .release = wl1271_device_release,
+ },
+};
+
static LIST_HEAD(wl_list);
static void wl1271_conf_init(struct wl1271 *wl)
@@ -298,7 +331,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
goto out_free_memmap;
/* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl);
+ ret = wl1271_acx_conn_monit_params(wl, false);
if (ret < 0)
goto out_free_memmap;
@@ -365,30 +398,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
-static void wl1271_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_power_off(struct wl1271 *wl)
-{
- wl->set_power(false);
- clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
-}
-
-static void wl1271_power_on(struct wl1271 *wl)
-{
- wl->set_power(true);
- set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
-}
-
static void wl1271_fw_status(struct wl1271 *wl,
struct wl1271_fw_status *status)
{
+ struct timespec ts;
u32 total = 0;
int i;
- wl1271_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
+ wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -413,14 +430,19 @@ static void wl1271_fw_status(struct wl1271 *wl,
ieee80211_queue_work(wl->hw, &wl->tx_work);
/* update the host-chipset time offset */
- wl->time_offset = jiffies_to_usecs(jiffies) -
- le32_to_cpu(status->fw_localtime);
+ getnstimeofday(&ts);
+ wl->time_offset = (timespec_to_ns(&ts) >> 10) -
+ (s64)le32_to_cpu(status->fw_localtime);
}
+#define WL1271_IRQ_MAX_LOOPS 10
+
static void wl1271_irq_work(struct work_struct *work)
{
int ret;
u32 intr;
+ int loopcount = WL1271_IRQ_MAX_LOOPS;
+ unsigned long flags;
struct wl1271 *wl =
container_of(work, struct wl1271, irq_work);
@@ -428,91 +450,78 @@ static void wl1271_irq_work(struct work_struct *work)
wl1271_debug(DEBUG_IRQ, "IRQ work");
- if (wl->state == WL1271_STATE_OFF)
+ if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, true);
if (ret < 0)
goto out;
- wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
-
- wl1271_fw_status(wl, wl->fw_status);
- intr = le32_to_cpu(wl->fw_status->intr);
- if (!intr) {
- wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
- goto out_sleep;
- }
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
+ clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ loopcount--;
+
+ wl1271_fw_status(wl, wl->fw_status);
+ intr = le32_to_cpu(wl->fw_status->intr);
+ if (!intr) {
+ wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ continue;
+ }
- intr &= WL1271_INTR_MASK;
+ intr &= WL1271_INTR_MASK;
- if (intr & WL1271_ACX_INTR_EVENT_A) {
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
- wl1271_event_handle(wl, 0);
- }
+ if (intr & WL1271_ACX_INTR_DATA) {
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- if (intr & WL1271_ACX_INTR_EVENT_B) {
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
- wl1271_event_handle(wl, 1);
- }
+ /* check for tx results */
+ if (wl->fw_status->tx_results_counter !=
+ (wl->tx_results_count & 0xff))
+ wl1271_tx_complete(wl);
- if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
- wl1271_debug(DEBUG_IRQ,
- "WL1271_ACX_INTR_INIT_COMPLETE");
+ wl1271_rx(wl, wl->fw_status);
+ }
- if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
+ if (intr & WL1271_ACX_INTR_EVENT_A) {
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
+ wl1271_event_handle(wl, 0);
+ }
- if (intr & WL1271_ACX_INTR_DATA) {
- u8 tx_res_cnt = wl->fw_status->tx_results_counter -
- wl->tx_results_count;
+ if (intr & WL1271_ACX_INTR_EVENT_B) {
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
+ wl1271_event_handle(wl, 1);
+ }
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
+ if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
+ wl1271_debug(DEBUG_IRQ,
+ "WL1271_ACX_INTR_INIT_COMPLETE");
- /* check for tx results */
- if (tx_res_cnt)
- wl1271_tx_complete(wl, tx_res_cnt);
+ if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
- wl1271_rx(wl, wl->fw_status);
+ spin_lock_irqsave(&wl->wl_lock, flags);
}
-out_sleep:
- wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
- WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
+ if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+ else
+ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
}
-static irqreturn_t wl1271_irq(int irq, void *cookie)
-{
- struct wl1271 *wl;
- unsigned long flags;
-
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- wl = cookie;
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
-
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_HANDLED;
-}
-
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL1271_FW_NAME, &wl->spi->dev);
+ ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get firmware: %d", ret);
@@ -545,46 +554,12 @@ out:
return ret;
}
-static int wl1271_update_mac_addr(struct wl1271 *wl)
-{
- int ret = 0;
- u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
-
- /* get mac address from the NVS */
- wl->mac_addr[0] = nvs_ptr[11];
- wl->mac_addr[1] = nvs_ptr[10];
- wl->mac_addr[2] = nvs_ptr[6];
- wl->mac_addr[3] = nvs_ptr[5];
- wl->mac_addr[4] = nvs_ptr[4];
- wl->mac_addr[5] = nvs_ptr[3];
-
- /* FIXME: if it is a zero-address, we should bail out. Now, instead,
- we randomize an address */
- if (is_zero_ether_addr(wl->mac_addr)) {
- static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
- memcpy(wl->mac_addr, nokia_oui, 3);
- get_random_bytes(wl->mac_addr + 3, 3);
-
- /* update this address to the NVS */
- nvs_ptr[11] = wl->mac_addr[0];
- nvs_ptr[10] = wl->mac_addr[1];
- nvs_ptr[6] = wl->mac_addr[2];
- nvs_ptr[5] = wl->mac_addr[3];
- nvs_ptr[4] = wl->mac_addr[4];
- nvs_ptr[3] = wl->mac_addr[5];
- }
-
- SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
-
- return ret;
-}
-
static int wl1271_fetch_nvs(struct wl1271 *wl)
{
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL1271_NVS_NAME, &wl->spi->dev);
+ ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get nvs file: %d", ret);
@@ -608,8 +583,6 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
- ret = wl1271_update_mac_addr(wl);
-
out:
release_firmware(fw);
@@ -826,15 +799,13 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
- if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_MAX_LENGTH) {
- ieee80211_stop_queues(wl->hw);
+ if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
+ wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
- /*
- * FIXME: this is racy, the variable is not properly
- * protected. Maybe fix this by removing the stupid
- * variable altogether and checking the real queue state?
- */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ ieee80211_stop_queues(wl->hw);
set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
}
return NETDEV_TX_OK;
@@ -882,7 +853,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (wl == wl_temp)
break;
}
- if (wl == NULL)
+ if (wl != wl_temp)
return NOTIFY_DONE;
/* Get the interface IP address for the device. "ifa" will become
@@ -929,13 +900,60 @@ static struct notifier_block wl1271_dev_notifier = {
static int wl1271_op_start(struct ieee80211_hw *hw)
{
+ wl1271_debug(DEBUG_MAC80211, "mac80211 start");
+
+ /*
+ * We have to delay the booting of the hardware because
+ * we need to know the local MAC address before downloading and
+ * initializing the firmware. The MAC address cannot be changed
+ * after boot, and without the proper MAC address, the firmware
+ * will not function properly.
+ *
+ * The MAC address is first known when the corresponding interface
+ * is added. That is where we will initialize the hardware.
+ */
+
+ return 0;
+}
+
+static void wl1271_op_stop(struct ieee80211_hw *hw)
+{
+ wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+}
+
+static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
struct wl1271 *wl = hw->priv;
int retries = WL1271_BOOT_RETRIES;
int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "mac80211 start");
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
+ if (wl->vif) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ wl->vif = vif;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ wl->bss_type = BSS_TYPE_STA_BSS;
+ wl->set_bss_type = BSS_TYPE_STA_BSS;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ wl->bss_type = BSS_TYPE_IBSS;
+ wl->set_bss_type = BSS_TYPE_STA_BSS;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
if (wl->state != WL1271_STATE_OFF) {
wl1271_error("cannot start because not in off state: %d",
@@ -991,19 +1009,20 @@ out:
return ret;
}
-static void wl1271_op_stop(struct ieee80211_hw *hw)
+static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
int i;
- wl1271_info("down");
-
- wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
-
unregister_inetaddr_notifier(&wl1271_dev_notifier);
- list_del(&wl->list);
mutex_lock(&wl->mutex);
+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
+
+ wl1271_info("down");
+
+ list_del(&wl->list);
WARN_ON(wl->state != WL1271_STATE_ON);
@@ -1032,6 +1051,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
wl->ssid_len = 0;
wl->bss_type = MAX_BSS_TYPE;
+ wl->set_bss_type = MAX_BSS_TYPE;
wl->band = IEEE80211_BAND_2GHZ;
wl->rx_counter = 0;
@@ -1041,163 +1061,142 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->tx_results_count = 0;
wl->tx_packets_count = 0;
wl->tx_security_last_seq = 0;
- wl->tx_security_seq_16 = 0;
- wl->tx_security_seq_32 = 0;
+ wl->tx_security_seq = 0;
wl->time_offset = 0;
wl->session_counter = 0;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
wl->sta_rate_set = 0;
wl->flags = 0;
+ wl->vif = NULL;
+ wl->filters = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
wl1271_debugfs_reset(wl);
+
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
+ kfree(wl->tx_res_if);
+ wl->tx_res_if = NULL;
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
mutex_unlock(&wl->mutex);
}
-static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
{
- struct wl1271 *wl = hw->priv;
- int ret = 0;
+ wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
- wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- vif->type, vif->addr);
+ /* combine requested filters with current filter config */
+ filters = wl->filters | filters;
- mutex_lock(&wl->mutex);
- if (wl->vif) {
- ret = -EBUSY;
- goto out;
+ wl1271_debug(DEBUG_FILTERS, "RX filters set: ");
+
+ if (filters & FIF_PROMISC_IN_BSS) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_PROMISC_IN_BSS");
+ wl->rx_config &= ~CFG_UNI_FILTER_EN;
+ wl->rx_config |= CFG_BSSID_FILTER_EN;
+ }
+ if (filters & FIF_BCN_PRBRESP_PROMISC) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_BCN_PRBRESP_PROMISC");
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ wl->rx_config &= ~CFG_SSID_FILTER_EN;
}
+ if (filters & FIF_OTHER_BSS) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_OTHER_BSS");
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ }
+ if (filters & FIF_CONTROL) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_CONTROL");
+ wl->rx_filter |= CFG_RX_CTL_EN;
+ }
+ if (filters & FIF_FCSFAIL) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_FCSFAIL");
+ wl->rx_filter |= CFG_RX_FCS_ERROR;
+ }
+}
- wl->vif = vif;
+static int wl1271_dummy_join(struct wl1271 *wl)
+{
+ int ret = 0;
+ /* we need to use a dummy BSSID for now */
+ static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
+ 0xad, 0xbe, 0xef };
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- wl->bss_type = BSS_TYPE_STA_BSS;
- break;
- case NL80211_IFTYPE_ADHOC:
- wl->bss_type = BSS_TYPE_IBSS;
- break;
- default:
- ret = -EOPNOTSUPP;
+ memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
+
+ /* pass through frames from all BSS */
+ wl1271_configure_filters(wl, FIF_OTHER_BSS);
+
+ ret = wl1271_cmd_join(wl, wl->set_bss_type);
+ if (ret < 0)
goto out;
- }
- /* FIXME: what if conf->mac_addr changes? */
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
out:
- mutex_unlock(&wl->mutex);
return ret;
}
-static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct wl1271 *wl = hw->priv;
-
- mutex_lock(&wl->mutex);
- wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
- wl->vif = NULL;
- mutex_unlock(&wl->mutex);
-}
-
-#if 0
-static int wl1271_op_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
+static int wl1271_join(struct wl1271 *wl, bool set_assoc)
{
- struct wl1271 *wl = hw->priv;
- struct sk_buff *beacon;
int ret;
- wl1271_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %pM",
- conf->bssid);
- wl1271_dump_ascii(DEBUG_MAC80211, "ssid: ", conf->ssid,
- conf->ssid_len);
+ /*
+ * One of the side effects of the JOIN command is that is clears
+ * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
+ * to a WPA/WPA2 access point will therefore kill the data-path.
+ * Currently there is no supported scenario for JOIN during
+ * association - if it becomes a supported scenario, the WPA/WPA2 keys
+ * must be handled somehow.
+ *
+ */
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ wl1271_info("JOIN while associated.");
- mutex_lock(&wl->mutex);
+ if (set_assoc)
+ set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_cmd_join(wl, wl->set_bss_type);
if (ret < 0)
goto out;
- if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) {
- wl1271_debug(DEBUG_MAC80211, "bssid changed");
-
- memcpy(wl->bssid, conf->bssid, ETH_ALEN);
-
- ret = wl1271_cmd_join(wl);
- if (ret < 0)
- goto out_sleep;
-
- ret = wl1271_cmd_build_null_data(wl);
- if (ret < 0)
- goto out_sleep;
- }
-
- wl->ssid_len = conf->ssid_len;
- if (wl->ssid_len)
- memcpy(wl->ssid, conf->ssid, wl->ssid_len);
-
- if (conf->changed & IEEE80211_IFCC_BEACON) {
- beacon = ieee80211_beacon_get(hw, vif);
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
- beacon->data, beacon->len);
-
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out_sleep;
- }
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE,
- beacon->data, beacon->len);
-
- dev_kfree_skb(beacon);
-
- if (ret < 0)
- goto out_sleep;
- }
-
-out_sleep:
- wl1271_ps_elp_sleep(wl);
-
-out:
- mutex_unlock(&wl->mutex);
-
- return ret;
-}
-#endif
-
-static int wl1271_join_channel(struct wl1271 *wl, int channel)
-{
- int ret = 0;
- /* we need to use a dummy BSSID for now */
- static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
- 0xad, 0xbe, 0xef };
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
- /* the dummy join is not required for ad-hoc */
- if (wl->bss_type == BSS_TYPE_IBSS)
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
- /* disable mac filter, so we hear everything */
- wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ /*
+ * The join command disable the keep-alive mode, shut down its process,
+ * and also clear the template config, so we need to reset it all after
+ * the join. The acx_aid starts the keep-alive process, and the order
+ * of the commands below is relevant.
+ */
+ ret = wl1271_acx_keep_alive_mode(wl, true);
+ if (ret < 0)
+ goto out;
- wl->channel = channel;
- memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
+ ret = wl1271_acx_aid(wl, wl->aid);
+ if (ret < 0)
+ goto out;
- ret = wl1271_cmd_join(wl);
+ ret = wl1271_cmd_build_klv_null_data(wl);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_JOINED, &wl->flags);
+ ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ACX_KEEP_ALIVE_TPL_VALID);
+ if (ret < 0)
+ goto out;
out:
return ret;
}
-static int wl1271_unjoin_channel(struct wl1271 *wl)
+static int wl1271_unjoin(struct wl1271 *wl)
{
int ret;
@@ -1207,14 +1206,41 @@ static int wl1271_unjoin_channel(struct wl1271 *wl)
goto out;
clear_bit(WL1271_FLAG_JOINED, &wl->flags);
- wl->channel = 0;
memset(wl->bssid, 0, ETH_ALEN);
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
+
+ /* stop filterting packets based on bssid */
+ wl1271_configure_filters(wl, FIF_OTHER_BSS);
out:
return ret;
}
+static void wl1271_set_band_rate(struct wl1271 *wl)
+{
+ if (wl->band == IEEE80211_BAND_2GHZ)
+ wl->basic_rate_set = wl->conf.tx.basic_rate;
+ else
+ wl->basic_rate_set = wl->conf.tx.basic_rate_5;
+}
+
+static u32 wl1271_min_rate_get(struct wl1271 *wl)
+{
+ int i;
+ u32 rate = 0;
+
+ if (!wl->basic_rate_set) {
+ WARN_ON(1);
+ wl->basic_rate_set = wl->conf.tx.basic_rate;
+ }
+
+ for (i = 0; !rate; i++) {
+ if ((wl->basic_rate_set >> i) & 0x1)
+ rate = 1 << i;
+ }
+
+ return rate;
+}
+
static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1271 *wl = hw->priv;
@@ -1231,38 +1257,62 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&wl->mutex);
- wl->band = conf->channel->band;
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
+ /* if the channel changes while joined, join again */
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
+ ((wl->band != conf->channel->band) ||
+ (wl->channel != channel))) {
+ wl->band = conf->channel->band;
+ wl->channel = channel;
+
+ /*
+ * FIXME: the mac80211 should really provide a fixed rate
+ * to use here. for now, just use the smallest possible rate
+ * for the band as a fixed rate for association frames and
+ * other control messages.
+ */
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ wl1271_set_band_rate(wl);
+
+ wl->basic_rate = wl1271_min_rate_get(wl);
+ ret = wl1271_acx_rate_policies(wl);
+ if (ret < 0)
+ wl1271_warning("rate policy for update channel "
+ "failed %d", ret);
+
+ if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ ret = wl1271_join(wl, false);
+ if (ret < 0)
+ wl1271_warning("cmd join to update channel "
+ "failed %d", ret);
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
if (conf->flags & IEEE80211_CONF_IDLE &&
test_bit(WL1271_FLAG_JOINED, &wl->flags))
- wl1271_unjoin_channel(wl);
+ wl1271_unjoin(wl);
else if (!(conf->flags & IEEE80211_CONF_IDLE))
- wl1271_join_channel(wl, channel);
+ wl1271_dummy_join(wl);
if (conf->flags & IEEE80211_CONF_IDLE) {
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->rate_set = wl1271_min_rate_get(wl);
wl->sta_rate_set = 0;
wl1271_acx_rate_policies(wl);
- }
+ wl1271_acx_keep_alive_config(
+ wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ set_bit(WL1271_FLAG_IDLE, &wl->flags);
+ } else
+ clear_bit(WL1271_FLAG_IDLE, &wl->flags);
}
- /* if the channel changes while joined, join again */
- if (channel != wl->channel &&
- test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
- wl->channel = channel;
- /* FIXME: maybe use CMD_CHANNEL_SWITCH for this? */
- ret = wl1271_cmd_join(wl);
- if (ret < 0)
- wl1271_warning("cmd join to update channel failed %d",
- ret);
- } else
- wl->channel = channel;
-
if (conf->flags & IEEE80211_CONF_PS &&
!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
@@ -1273,13 +1323,13 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* through the bss_info_changed() hook.
*/
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
- wl1271_info("psm enabled");
+ wl1271_debug(DEBUG_PSM, "psm enabled");
ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
true);
}
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
- wl1271_info("psm disabled");
+ wl1271_debug(DEBUG_PSM, "psm disabled");
clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
@@ -1311,11 +1361,15 @@ struct wl1271_filter_params {
u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
};
-static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
- struct dev_addr_list *mc_list)
+static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
struct wl1271_filter_params *fp;
- int i;
+ struct netdev_hw_addr *ha;
+ struct wl1271 *wl = hw->priv;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ return 0;
fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
if (!fp) {
@@ -1324,21 +1378,16 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
}
/* update multicast filtering parameters */
- fp->enabled = true;
- if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) {
- mc_count = 0;
- fp->enabled = false;
- }
-
fp->mc_list_length = 0;
- for (i = 0; i < mc_count; i++) {
- if (mc_list->da_addrlen == ETH_ALEN) {
+ if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
+ fp->enabled = false;
+ } else {
+ fp->enabled = true;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
memcpy(fp->mc_list[fp->mc_list_length],
- mc_list->da_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
fp->mc_list_length++;
- } else
- wl1271_warning("Unknown mc address length.");
- mc_list = mc_list->next;
+ }
}
return (u64)(unsigned long)fp;
@@ -1363,15 +1412,16 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF)
+ *total &= WL1271_SUPPORTED_FILTERS;
+ changed &= WL1271_SUPPORTED_FILTERS;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
- *total &= WL1271_SUPPORTED_FILTERS;
- changed &= WL1271_SUPPORTED_FILTERS;
if (*total & FIF_ALLMULTI)
ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
@@ -1382,14 +1432,14 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- kfree(fp);
-
- /* FIXME: We still need to set our filters properly */
-
/* determine, whether supported filter values have changed */
if (changed == 0)
goto out_sleep;
+ /* configure filters */
+ wl->filters = *total;
+ wl1271_configure_filters(wl, 0);
+
/* apply configured filters */
ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
if (ret < 0)
@@ -1400,6 +1450,7 @@ out_sleep:
out:
mutex_unlock(&wl->mutex);
+ kfree(fp);
}
static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -1450,15 +1501,15 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type = KEY_TKIP;
key_conf->hw_key_idx = key_conf->keyidx;
- tx_seq_32 = wl->tx_security_seq_32;
- tx_seq_16 = wl->tx_security_seq_16;
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
break;
case ALG_CCMP:
key_type = KEY_AES;
key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- tx_seq_32 = wl->tx_security_seq_32;
- tx_seq_16 = wl->tx_security_seq_16;
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->alg);
@@ -1508,8 +1559,6 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
default:
wl1271_error("Unsupported key cmd 0x%x", cmd);
ret = -EOPNOTSUPP;
- goto out_sleep;
-
break;
}
@@ -1524,6 +1573,7 @@ out:
}
static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct wl1271 *wl = hw->priv;
@@ -1545,10 +1595,12 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
if (wl1271_11a_enabled())
- ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
+ ret = wl1271_cmd_scan(hw->priv, ssid, len,
+ req->ie, req->ie_len, 1, 0,
WL1271_SCAN_BAND_DUAL, 3);
else
- ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
+ ret = wl1271_cmd_scan(hw->priv, ssid, len,
+ req->ie, req->ie_len, 1, 0,
WL1271_SCAN_BAND_2_4_GHZ, 3);
wl1271_ps_elp_sleep(wl);
@@ -1562,10 +1614,13 @@ out:
static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1271 *wl = hw->priv;
- int ret;
+ int ret = 0;
mutex_lock(&wl->mutex);
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
@@ -1607,6 +1662,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
enum wl1271_cmd_ps_mode mode;
struct wl1271 *wl = hw->priv;
bool do_join = false;
+ bool set_assoc = false;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1617,20 +1673,29 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (wl->bss_type == BSS_TYPE_IBSS) {
- /* FIXME: This implements rudimentary ad-hoc support -
- proper templates are on the wish list and notification
- on when they change. This patch will update the templates
- on every call to this function. */
+ if ((changed && BSS_CHANGED_BEACON_INT) &&
+ (wl->bss_type == BSS_TYPE_IBSS)) {
+ wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
+ bss_conf->beacon_int);
+
+ wl->beacon_int = bss_conf->beacon_int;
+ do_join = true;
+ }
+
+ if ((changed && BSS_CHANGED_BEACON) &&
+ (wl->bss_type == BSS_TYPE_IBSS)) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
+
if (beacon) {
struct ieee80211_hdr *hdr;
wl1271_ssid_set(wl, beacon);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
beacon->data,
- beacon->len);
+ beacon->len, 0,
+ wl1271_min_rate_get(wl));
if (ret < 0) {
dev_kfree_skb(beacon);
@@ -1645,7 +1710,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1271_cmd_template_set(wl,
CMD_TEMPL_PROBE_RESPONSE,
beacon->data,
- beacon->len);
+ beacon->len, 0,
+ wl1271_min_rate_get(wl));
dev_kfree_skb(beacon);
if (ret < 0)
goto out_sleep;
@@ -1655,20 +1721,48 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ (wl->bss_type == BSS_TYPE_IBSS)) {
+ wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
+ bss_conf->enable_beacon ? "enabled" : "disabled");
+
+ if (bss_conf->enable_beacon)
+ wl->set_bss_type = BSS_TYPE_IBSS;
+ else
+ wl->set_bss_type = BSS_TYPE_STA_BSS;
+ do_join = true;
+ }
+
+ if (changed & BSS_CHANGED_CQM) {
+ bool enable = false;
+ if (bss_conf->cqm_rssi_thold)
+ enable = true;
+ ret = wl1271_acx_rssi_snr_trigger(wl, enable,
+ bss_conf->cqm_rssi_thold,
+ bss_conf->cqm_rssi_hyst);
+ if (ret < 0)
+ goto out;
+ wl->rssi_thold = bss_conf->cqm_rssi_thold;
+ }
+
if ((changed & BSS_CHANGED_BSSID) &&
/*
* Now we know the correct bssid, so we send a new join command
* and enable the BSSID filter
*/
memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- wl->rx_config |= CFG_BSSID_FILTER_EN;
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+
ret = wl1271_cmd_build_null_data(wl);
- if (ret < 0) {
- wl1271_warning("cmd buld null data failed %d",
- ret);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1271_build_qos_null_data(wl);
+ if (ret < 0)
goto out_sleep;
- }
+
+ /* filter out all packets not from this BSSID */
+ wl1271_configure_filters(wl, 0);
/* Need to update the BSSID (for filtering etc) */
do_join = true;
@@ -1676,8 +1770,21 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
+ u32 rates;
wl->aid = bss_conf->aid;
- set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
+ set_assoc = true;
+
+ /*
+ * use basic rates from AP, and determine lowest rate
+ * to use with control frames.
+ */
+ rates = bss_conf->basic_rates;
+ wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
+ rates);
+ wl->basic_rate = wl1271_min_rate_get(wl);
+ ret = wl1271_acx_rate_policies(wl);
+ if (ret < 0)
+ goto out_sleep;
/*
* with wl1271, we don't need to update the
@@ -1689,7 +1796,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- ret = wl1271_acx_aid(wl, wl->aid);
+ /*
+ * The SSID is intentionally set to NULL here - the
+ * firmware will set the probe request with a
+ * broadcast SSID regardless of what we set in the
+ * template.
+ */
+ ret = wl1271_cmd_build_probe_req(wl, NULL, 0,
+ NULL, 0, wl->band);
+
+ /* enable the connection monitoring feature */
+ ret = wl1271_acx_conn_monit_params(wl, true);
if (ret < 0)
goto out_sleep;
@@ -1705,6 +1822,22 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* use defaults when not associated */
clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
wl->aid = 0;
+
+ /* revert back to minimum rates for the current band */
+ wl1271_set_band_rate(wl);
+ wl->basic_rate = wl1271_min_rate_get(wl);
+ ret = wl1271_acx_rate_policies(wl);
+ if (ret < 0)
+ goto out_sleep;
+
+ /* disable connection monitor features */
+ ret = wl1271_acx_conn_monit_params(wl, false);
+
+ /* Disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, false);
+
+ if (ret < 0)
+ goto out_sleep;
}
}
@@ -1739,12 +1872,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (do_join) {
- ret = wl1271_cmd_join(wl);
+ ret = wl1271_join(wl, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
goto out_sleep;
}
- set_bit(WL1271_FLAG_JOINED, &wl->flags);
}
out_sleep:
@@ -1758,6 +1890,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
+ u8 ps_scheme;
int ret;
mutex_lock(&wl->mutex);
@@ -1768,17 +1901,22 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
if (ret < 0)
goto out;
+ /* the txop is confed in units of 32us by the mac80211, we need us */
ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
params->cw_min, params->cw_max,
- params->aifs, params->txop);
+ params->aifs, params->txop << 5);
if (ret < 0)
goto out_sleep;
+ if (params->uapsd)
+ ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
+ else
+ ps_scheme = CONF_PS_SCHEME_LEGACY;
+
ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
CONF_CHANNEL_TYPE_EDCF,
wl1271_tx_get_queue(queue),
- CONF_PS_SCHEME_LEGACY_PSPOLL,
- CONF_ACK_POLICY_LEGACY, 0, 0);
+ ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
if (ret < 0)
goto out_sleep;
@@ -1852,6 +1990,36 @@ static struct ieee80211_channel wl1271_channels[] = {
{ .hw_value = 13, .center_freq = 2472, .max_power = 25 },
};
+/* mapping to indexes for wl1271_rates */
+const static u8 wl1271_rate_to_idx_2ghz[] = {
+ /* MCS rates are used only with 11n */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
+
+ 11, /* CONF_HW_RXTX_RATE_54 */
+ 10, /* CONF_HW_RXTX_RATE_48 */
+ 9, /* CONF_HW_RXTX_RATE_36 */
+ 8, /* CONF_HW_RXTX_RATE_24 */
+
+ /* TI-specific rate */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
+
+ 7, /* CONF_HW_RXTX_RATE_18 */
+ 6, /* CONF_HW_RXTX_RATE_12 */
+ 3, /* CONF_HW_RXTX_RATE_11 */
+ 5, /* CONF_HW_RXTX_RATE_9 */
+ 4, /* CONF_HW_RXTX_RATE_6 */
+ 2, /* CONF_HW_RXTX_RATE_5_5 */
+ 1, /* CONF_HW_RXTX_RATE_2 */
+ 0 /* CONF_HW_RXTX_RATE_1 */
+};
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1271_band_2ghz = {
.channels = wl1271_channels,
@@ -1934,6 +2102,35 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
{ .hw_value = 165, .center_freq = 5825},
};
+/* mapping to indexes for wl1271_rates_5ghz */
+const static u8 wl1271_rate_to_idx_5ghz[] = {
+ /* MCS rates are used only with 11n */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
+
+ 7, /* CONF_HW_RXTX_RATE_54 */
+ 6, /* CONF_HW_RXTX_RATE_48 */
+ 5, /* CONF_HW_RXTX_RATE_36 */
+ 4, /* CONF_HW_RXTX_RATE_24 */
+
+ /* TI-specific rate */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
+
+ 3, /* CONF_HW_RXTX_RATE_18 */
+ 2, /* CONF_HW_RXTX_RATE_12 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11 */
+ 1, /* CONF_HW_RXTX_RATE_9 */
+ 0, /* CONF_HW_RXTX_RATE_6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED /* CONF_HW_RXTX_RATE_1 */
+};
static struct ieee80211_supported_band wl1271_band_5ghz = {
.channels = wl1271_channels_5ghz,
@@ -1942,13 +2139,17 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
};
+const static u8 *wl1271_band_rate_to_idx[] = {
+ [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
+ [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
+};
+
static const struct ieee80211_ops wl1271_ops = {
.start = wl1271_op_start,
.stop = wl1271_op_stop,
.add_interface = wl1271_op_add_interface,
.remove_interface = wl1271_op_remove_interface,
.config = wl1271_op_config,
-/* .config_interface = wl1271_op_config_interface, */
.prepare_multicast = wl1271_op_prepare_multicast,
.configure_filter = wl1271_op_configure_filter,
.tx = wl1271_op_tx,
@@ -1960,7 +2161,113 @@ static const struct ieee80211_ops wl1271_ops = {
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
-static int wl1271_register_hw(struct wl1271 *wl)
+
+u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate)
+{
+ u8 idx;
+
+ BUG_ON(wl->band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
+
+ if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
+ wl1271_error("Illegal RX rate from HW: %d", rate);
+ return 0;
+ }
+
+ idx = wl1271_band_rate_to_idx[wl->band][rate];
+ if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
+ wl1271_error("Unsupported RX rate from HW: %d", rate);
+ return 0;
+ }
+
+ return idx;
+}
+
+static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ ssize_t len;
+
+ /* FIXME: what's the maximum length of buf? page size?*/
+ len = 500;
+
+ mutex_lock(&wl->mutex);
+ len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
+ wl->sg_enabled);
+ mutex_unlock(&wl->mutex);
+
+ return len;
+
+}
+
+static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ unsigned long res;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &res);
+
+ if (ret < 0) {
+ wl1271_warning("incorrect value written to bt_coex_mode");
+ return count;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ res = !!res;
+
+ if (res == wl->sg_enabled)
+ goto out;
+
+ wl->sg_enabled = res;
+
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ wl1271_acx_sg_enable(wl, wl->sg_enabled);
+ wl1271_ps_elp_sleep(wl);
+
+ out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
+ wl1271_sysfs_show_bt_coex_state,
+ wl1271_sysfs_store_bt_coex_state);
+
+static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ ssize_t len;
+
+ /* FIXME: what's the maximum length of buf? page size?*/
+ len = 500;
+
+ mutex_lock(&wl->mutex);
+ if (wl->hw_pg_ver >= 0)
+ len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
+ else
+ len = snprintf(buf, len, "n/a\n");
+ mutex_unlock(&wl->mutex);
+
+ return len;
+}
+
+static DEVICE_ATTR(hw_pg_ver, S_IRUGO | S_IWUSR,
+ wl1271_sysfs_show_hw_pg_ver, NULL);
+
+int wl1271_register_hw(struct wl1271 *wl)
{
int ret;
@@ -1981,8 +2288,17 @@ static int wl1271_register_hw(struct wl1271 *wl)
return 0;
}
+EXPORT_SYMBOL_GPL(wl1271_register_hw);
+
+void wl1271_unregister_hw(struct wl1271 *wl)
+{
+ ieee80211_unregister_hw(wl->hw);
+ wl->mac80211_registered = false;
+
+}
+EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
-static int wl1271_init_ieee80211(struct wl1271 *wl)
+int wl1271_init_ieee80211(struct wl1271 *wl)
{
/* The tx descriptor buffer and the TKIP space. */
wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
@@ -1991,11 +2307,15 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
/* unit us */
/* FIXME: find a proper value */
wl->hw->channel_change_time = 10000;
+ wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_BEACON_FILTER |
- IEEE80211_HW_SUPPORTS_PS;
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_UAPSD |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_SUPPORTS_CQM_RSSI;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
@@ -2005,51 +2325,53 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
if (wl1271_11a_enabled())
wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
- SET_IEEE80211_DEV(wl->hw, &wl->spi->dev);
+ wl->hw->queues = 4;
+ wl->hw->max_rates = 1;
- return 0;
-}
-
-static void wl1271_device_release(struct device *dev)
-{
+ SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ return 0;
}
-
-static struct platform_device wl1271_device = {
- .name = "wl1271",
- .id = -1,
-
- /* device model insists to have a release function */
- .dev = {
- .release = wl1271_device_release,
- },
-};
+EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
#define WL1271_DEFAULT_CHANNEL 0
-static struct ieee80211_hw *wl1271_alloc_hw(void)
+struct ieee80211_hw *wl1271_alloc_hw(void)
{
struct ieee80211_hw *hw;
+ struct platform_device *plat_dev = NULL;
struct wl1271 *wl;
- int i;
+ int i, ret;
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_hw_alloc;
+ }
+
+ plat_dev = kmalloc(sizeof(wl1271_device), GFP_KERNEL);
+ if (!plat_dev) {
+ wl1271_error("could not allocate platform_device");
+ ret = -ENOMEM;
+ goto err_plat_alloc;
}
+ memcpy(plat_dev, &wl1271_device, sizeof(wl1271_device));
+
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
INIT_LIST_HEAD(&wl->list);
wl->hw = hw;
+ wl->plat_dev = plat_dev;
skb_queue_head_init(&wl->tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
wl->channel = WL1271_DEFAULT_CHANNEL;
+ wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
wl->default_key = 0;
wl->rx_counter = 0;
wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
@@ -2057,11 +2379,14 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
wl->sta_rate_set = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
wl->flags = 0;
+ wl->sg_enabled = true;
+ wl->hw_pg_ver = -1;
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
wl->tx_frames[i] = NULL;
@@ -2074,167 +2399,72 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
/* Apply default driver configuration. */
wl1271_conf_init(wl);
- return hw;
-}
-
-int wl1271_free_hw(struct wl1271 *wl)
-{
- ieee80211_unregister_hw(wl->hw);
-
- wl1271_debugfs_exit(wl);
-
- kfree(wl->target_mem_map);
- vfree(wl->fw);
- wl->fw = NULL;
- kfree(wl->nvs);
- wl->nvs = NULL;
-
- kfree(wl->fw_status);
- kfree(wl->tx_res_if);
-
- ieee80211_free_hw(wl->hw);
-
- return 0;
-}
-
-static int __devinit wl1271_probe(struct spi_device *spi)
-{
- struct wl12xx_platform_data *pdata;
- struct ieee80211_hw *hw;
- struct wl1271 *wl;
- int ret;
+ wl1271_debugfs_init(wl);
- pdata = spi->dev.platform_data;
- if (!pdata) {
- wl1271_error("no platform data");
- return -ENODEV;
+ /* Register platform device */
+ ret = platform_device_register(wl->plat_dev);
+ if (ret) {
+ wl1271_error("couldn't register platform device");
+ goto err_hw;
}
+ dev_set_drvdata(&wl->plat_dev->dev, wl);
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- wl = hw->priv;
-
- dev_set_drvdata(&spi->dev, wl);
- wl->spi = spi;
-
- /* This is the only SPI value that we need to set here, the rest
- * comes from the board-peripherals file */
- spi->bits_per_word = 32;
-
- ret = spi_setup(spi);
+ /* Create sysfs file to control bt coex state */
+ ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
if (ret < 0) {
- wl1271_error("spi_setup failed");
- goto out_free;
- }
-
- wl->set_power = pdata->set_power;
- if (!wl->set_power) {
- wl1271_error("set power function missing in platform data");
- ret = -ENODEV;
- goto out_free;
+ wl1271_error("failed to create sysfs file bt_coex_state");
+ goto err_platform;
}
- wl->irq = spi->irq;
- if (wl->irq < 0) {
- wl1271_error("irq missing in platform data");
- ret = -ENODEV;
- goto out_free;
- }
-
- ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ /* Create sysfs file to get HW PG version */
+ ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
- }
-
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
- disable_irq(wl->irq);
-
- ret = platform_device_register(&wl1271_device);
- if (ret) {
- wl1271_error("couldn't register platform device");
- goto out_irq;
+ wl1271_error("failed to create sysfs file hw_pg_ver");
+ goto err_bt_coex_state;
}
- dev_set_drvdata(&wl1271_device.dev, wl);
-
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_platform;
-
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_platform;
-
- wl1271_debugfs_init(wl);
- wl1271_notice("initialized");
+ return hw;
- return 0;
+err_bt_coex_state:
+ device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- out_platform:
- platform_device_unregister(&wl1271_device);
+err_platform:
+ platform_device_unregister(wl->plat_dev);
- out_irq:
- free_irq(wl->irq, wl);
+err_hw:
+ wl1271_debugfs_exit(wl);
+ kfree(plat_dev);
- out_free:
+err_plat_alloc:
ieee80211_free_hw(hw);
- return ret;
-}
-
-static int __devexit wl1271_remove(struct spi_device *spi)
-{
- struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+err_hw_alloc:
- platform_device_unregister(&wl1271_device);
- free_irq(wl->irq, wl);
-
- wl1271_free_hw(wl);
-
- return 0;
+ return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
-
-static struct spi_driver wl1271_spi_driver = {
- .driver = {
- .name = "wl1271",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- },
-
- .probe = wl1271_probe,
- .remove = __devexit_p(wl1271_remove),
-};
-
-static int __init wl1271_init(void)
+int wl1271_free_hw(struct wl1271 *wl)
{
- int ret;
+ platform_device_unregister(wl->plat_dev);
+ kfree(wl->plat_dev);
- ret = spi_register_driver(&wl1271_spi_driver);
- if (ret < 0) {
- wl1271_error("failed to register spi driver: %d", ret);
- goto out;
- }
+ wl1271_debugfs_exit(wl);
-out:
- return ret;
-}
+ vfree(wl->fw);
+ wl->fw = NULL;
+ kfree(wl->nvs);
+ wl->nvs = NULL;
-static void __exit wl1271_exit(void)
-{
- spi_unregister_driver(&wl1271_spi_driver);
+ kfree(wl->fw_status);
+ kfree(wl->tx_res_if);
- wl1271_notice("unloaded");
-}
+ ieee80211_free_hw(wl->hw);
-module_init(wl1271_init);
-module_exit(wl1271_exit);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wl1271_free_hw);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index e2b1ebf..a5e60e0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -23,7 +23,6 @@
#include "wl1271_reg.h"
#include "wl1271_ps.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -41,7 +40,8 @@ void wl1271_elp_work(struct work_struct *work)
mutex_lock(&wl->mutex);
if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
- !test_bit(WL1271_FLAG_PSM, &wl->flags))
+ (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
+ !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
goto out;
wl1271_debug(DEBUG_PSM, "chip to elp");
@@ -57,7 +57,8 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
- if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags) ||
+ test_bit(WL1271_FLAG_IDLE, &wl->flags)) {
cancel_delayed_work(&wl->elp_work);
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index c723d9c..57f4bfd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -27,7 +27,6 @@
#include "wl1271_acx.h"
#include "wl1271_reg.h"
#include "wl1271_rx.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
@@ -44,66 +43,6 @@ static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
}
-/* The values of this table must match the wl1271_rates[] array */
-static u8 wl1271_rx_rate_to_idx[] = {
- /* MCS rates are used only with 11n */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
-
- 11, /* WL1271_RATE_54 */
- 10, /* WL1271_RATE_48 */
- 9, /* WL1271_RATE_36 */
- 8, /* WL1271_RATE_24 */
-
- /* TI-specific rate */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
-
- 7, /* WL1271_RATE_18 */
- 6, /* WL1271_RATE_12 */
- 3, /* WL1271_RATE_11 */
- 5, /* WL1271_RATE_9 */
- 4, /* WL1271_RATE_6 */
- 2, /* WL1271_RATE_5_5 */
- 1, /* WL1271_RATE_2 */
- 0 /* WL1271_RATE_1 */
-};
-
-/* The values of this table must match the wl1271_rates[] array */
-static u8 wl1271_5_ghz_rx_rate_to_idx[] = {
- /* MCS rates are used only with 11n */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
-
- 7, /* WL1271_RATE_54 */
- 6, /* WL1271_RATE_48 */
- 5, /* WL1271_RATE_36 */
- 4, /* WL1271_RATE_24 */
-
- /* TI-specific rate */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
-
- 3, /* WL1271_RATE_18 */
- 2, /* WL1271_RATE_12 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11 */
- 1, /* WL1271_RATE_9 */
- 0, /* WL1271_RATE_6 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2 */
- WL1271_RX_RATE_UNSUPPORTED /* WL1271_RATE_1 */
-};
-
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
@@ -111,20 +50,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
{
memset(status, 0, sizeof(struct ieee80211_rx_status));
- if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
- WL1271_RX_DESC_BAND_BG) {
- status->band = IEEE80211_BAND_2GHZ;
- status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
- } else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
- WL1271_RX_DESC_BAND_A) {
- status->band = IEEE80211_BAND_5GHZ;
- status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate];
- } else
- wl1271_warning("unsupported band 0x%x",
- desc->flags & WL1271_RX_DESC_BAND_MASK);
-
- if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED))
- wl1271_warning("unsupported rate");
+ status->band = wl->band;
+ status->rate_idx = wl1271_rate_to_idx(wl, desc->rate);
/*
* FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the
@@ -134,13 +61,6 @@ static void wl1271_rx_status(struct wl1271 *wl,
*/
status->signal = desc->rssi;
- /*
- * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
- * need to divide by two for now, but TI has been discussing about
- * changing it. This needs to be rechecked.
- */
- status->noise = desc->rssi - (desc->snr >> 1);
-
status->freq = ieee80211_channel_to_frequency(desc->channel);
if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
@@ -162,6 +82,13 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
u8 *buf;
u8 beacon = 0;
+ /*
+ * In PLT mode we seem to get frames and mac80211 warns about them,
+ * workaround this by not retrieving them at all.
+ */
+ if (unlikely(wl->state == WL1271_STATE_PLT))
+ return;
+
skb = __dev_alloc_skb(length, GFP_KERNEL);
if (!skb) {
wl1271_error("Couldn't allocate RX frame");
@@ -220,6 +147,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
wl->rx_counter++;
drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
- wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
+
+ wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index 1ae6d17..b89be47 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -43,7 +43,6 @@
#define RX_MAX_PACKET_ID 3
#define NUM_RX_PKT_DESC_MOD_MASK 7
-#define WL1271_RX_RATE_UNSUPPORTED 0xFF
#define RX_DESC_VALID_FCS 0x0001
#define RX_DESC_MATCH_RXADDR1 0x0002
@@ -117,5 +116,6 @@ struct wl1271_rx_descriptor {
} __attribute__ ((packed));
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
+u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c
new file mode 100644
index 0000000..d3d6f30
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c
@@ -0,0 +1,291 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/crc7.h>
+#include <linux/vmalloc.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/card.h>
+#include <plat/gpio.h>
+
+#include "wl1271.h"
+#include "wl12xx_80211.h"
+#include "wl1271_io.h"
+
+
+#define RX71_WL1271_IRQ_GPIO 42
+
+#ifndef SDIO_VENDOR_ID_TI
+#define SDIO_VENDOR_ID_TI 0x0097
+#endif
+
+#ifndef SDIO_DEVICE_ID_TI_WL1271
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#endif
+
+static const struct sdio_device_id wl1271_devices[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
+ {}
+};
+MODULE_DEVICE_TABLE(sdio, wl1271_devices);
+
+static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
+{
+ return wl->if_priv;
+}
+
+static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
+{
+ return &(wl_to_func(wl)->dev);
+}
+
+static irqreturn_t wl1271_irq(int irq, void *cookie)
+{
+ struct wl1271 *wl = cookie;
+ unsigned long flags;
+
+ wl1271_debug(DEBUG_IRQ, "IRQ");
+
+ /* complete the ELP completion */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (wl->elp_compl) {
+ complete(wl->elp_compl);
+ wl->elp_compl = NULL;
+ }
+
+ if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+ set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
+{
+ disable_irq(wl->irq);
+}
+
+static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
+{
+ enable_irq(wl->irq);
+}
+
+static void wl1271_sdio_reset(struct wl1271 *wl)
+{
+}
+
+static void wl1271_sdio_init(struct wl1271 *wl)
+{
+}
+
+static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int ret;
+ struct sdio_func *func = wl_to_func(wl);
+
+ if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
+ ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
+ wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
+ addr, ((u8 *)buf)[0]);
+ } else {
+ if (fixed)
+ ret = sdio_readsb(func, buf, addr, len);
+ else
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+
+ wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
+ addr, len);
+ wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ }
+
+ if (ret)
+ wl1271_error("sdio read failed (%d)", ret);
+
+}
+
+static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int ret;
+ struct sdio_func *func = wl_to_func(wl);
+
+ if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
+ sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
+ wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
+ addr, ((u8 *)buf)[0]);
+ } else {
+ wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
+ addr, len);
+ wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+
+ if (fixed)
+ ret = sdio_writesb(func, addr, buf, len);
+ else
+ ret = sdio_memcpy_toio(func, addr, buf, len);
+ }
+ if (ret)
+ wl1271_error("sdio write failed (%d)", ret);
+
+}
+
+static void wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+{
+ struct sdio_func *func = wl_to_func(wl);
+
+ /* Let the SDIO stack handle wlan_enable control, so we
+ * keep host claimed while wlan is in use to keep wl1271
+ * alive.
+ */
+ if (enable) {
+ sdio_claim_host(func);
+ sdio_enable_func(func);
+ } else {
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ }
+}
+
+static struct wl1271_if_operations sdio_ops = {
+ .read = wl1271_sdio_raw_read,
+ .write = wl1271_sdio_raw_write,
+ .reset = wl1271_sdio_reset,
+ .init = wl1271_sdio_init,
+ .power = wl1271_sdio_set_power,
+ .dev = wl1271_sdio_wl_to_dev,
+ .enable_irq = wl1271_sdio_enable_interrupts,
+ .disable_irq = wl1271_sdio_disable_interrupts
+};
+
+static int __devinit wl1271_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ int ret;
+
+ /* We are only able to handle the wlan function */
+ if (func->num != 0x02)
+ return -ENODEV;
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ wl = hw->priv;
+
+ wl->if_priv = func;
+ wl->if_ops = &sdio_ops;
+
+ /* Grab access to FN0 for ELP reg. */
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+
+ wl->irq = gpio_to_irq(RX71_WL1271_IRQ_GPIO);
+ if (wl->irq < 0) {
+ ret = wl->irq;
+ wl1271_error("could not get irq!");
+ goto out_free;
+ }
+
+ ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ if (ret < 0) {
+ wl1271_error("request_irq() failed: %d", ret);
+ goto out_free;
+ }
+
+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+
+ disable_irq(wl->irq);
+
+ ret = wl1271_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl1271_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ sdio_set_drvdata(func, wl);
+
+ wl1271_notice("initialized");
+
+ return 0;
+
+ out_irq:
+ free_irq(wl->irq, wl);
+
+
+ out_free:
+ wl1271_free_hw(wl);
+
+ return ret;
+}
+
+static void __devexit wl1271_remove(struct sdio_func *func)
+{
+ struct wl1271 *wl = sdio_get_drvdata(func);
+
+ free_irq(wl->irq, wl);
+
+ wl1271_unregister_hw(wl);
+ wl1271_free_hw(wl);
+}
+
+static struct sdio_driver wl1271_sdio_driver = {
+ .name = "wl1271_sdio",
+ .id_table = wl1271_devices,
+ .probe = wl1271_probe,
+ .remove = __devexit_p(wl1271_remove),
+};
+
+static int __init wl1271_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&wl1271_sdio_driver);
+ if (ret < 0) {
+ wl1271_error("failed to register sdio driver: %d", ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void __exit wl1271_exit(void)
+{
+ sdio_unregister_driver(&wl1271_sdio_driver);
+
+ wl1271_notice("unloaded");
+}
+
+module_init(wl1271_init);
+module_exit(wl1271_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 053c84a..5189b81 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -21,18 +21,69 @@
*
*/
+#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
+#include <linux/spi/wl12xx.h>
#include <linux/slab.h>
#include "wl1271.h"
#include "wl12xx_80211.h"
-#include "wl1271_spi.h"
+#include "wl1271_io.h"
+
+#include "wl1271_reg.h"
+
+#define WSPI_CMD_READ 0x40000000
+#define WSPI_CMD_WRITE 0x00000000
+#define WSPI_CMD_FIXED 0x20000000
+#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
+#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
+#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
+
+#define WSPI_INIT_CMD_CRC_LEN 5
+
+#define WSPI_INIT_CMD_START 0x00
+#define WSPI_INIT_CMD_TX 0x40
+/* the extra bypass bit is sampled by the TNET as '1' */
+#define WSPI_INIT_CMD_BYPASS_BIT 0x80
+#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
+#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
+#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
+#define WSPI_INIT_CMD_IOD 0x40
+#define WSPI_INIT_CMD_IP 0x20
+#define WSPI_INIT_CMD_CS 0x10
+#define WSPI_INIT_CMD_WS 0x08
+#define WSPI_INIT_CMD_WSPI 0x01
+#define WSPI_INIT_CMD_END 0x01
+
+#define WSPI_INIT_CMD_LEN 8
+
+#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
+ ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
+#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
+
+static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
+{
+ return wl->if_priv;
+}
+static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
+{
+ return &(wl_to_spi(wl)->dev);
+}
-void wl1271_spi_reset(struct wl1271 *wl)
+static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
+{
+ disable_irq(wl->irq);
+}
+
+static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
+{
+ enable_irq(wl->irq);
+}
+
+static void wl1271_spi_reset(struct wl1271 *wl)
{
u8 *cmd;
struct spi_transfer t;
@@ -53,12 +104,13 @@ void wl1271_spi_reset(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl->spi, &m);
+ spi_sync(wl_to_spi(wl), &m);
+ kfree(cmd);
wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
}
-void wl1271_spi_init(struct wl1271 *wl)
+static void wl1271_spi_init(struct wl1271 *wl)
{
u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
@@ -107,48 +159,25 @@ void wl1271_spi_init(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl->spi, &m);
+ spi_sync(wl_to_spi(wl), &m);
+ kfree(cmd);
wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
}
#define WL1271_BUSY_WORD_TIMEOUT 1000
-/* FIXME: Check busy words, removed due to SPI bug */
-#if 0
-static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
+static int wl1271_spi_read_busy(struct wl1271 *wl)
{
struct spi_transfer t[1];
struct spi_message m;
u32 *busy_buf;
int num_busy_bytes = 0;
- wl1271_info("spi read BUSY!");
-
- /*
- * Look for the non-busy word in the read buffer, and if found,
- * read in the remaining data into the buffer.
- */
- busy_buf = (u32 *)buf;
- for (; (u32)busy_buf < (u32)buf + len; busy_buf++) {
- num_busy_bytes += sizeof(u32);
- if (*busy_buf & 0x1) {
- spi_message_init(&m);
- memset(t, 0, sizeof(t));
- memmove(buf, busy_buf, len - num_busy_bytes);
- t[0].rx_buf = buf + (len - num_busy_bytes);
- t[0].len = num_busy_bytes;
- spi_message_add_tail(&t[0], &m);
- spi_sync(wl->spi, &m);
- return;
- }
- }
-
/*
* Read further busy words from SPI until a non-busy word is
* encountered, then read the data itself into the buffer.
*/
- wl1271_info("spi read BUSY-polling needed!");
num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
busy_buf = wl->buffer_busyword;
@@ -158,28 +187,21 @@ static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
memset(t, 0, sizeof(t));
t[0].rx_buf = busy_buf;
t[0].len = sizeof(u32);
+ t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl->spi, &m);
-
- if (*busy_buf & 0x1) {
- spi_message_init(&m);
- memset(t, 0, sizeof(t));
- t[0].rx_buf = buf;
- t[0].len = len;
- spi_message_add_tail(&t[0], &m);
- spi_sync(wl->spi, &m);
- return;
- }
+ spi_sync(wl_to_spi(wl), &m);
+
+ if (*busy_buf & 0x1)
+ return 0;
}
/* The SPI bus is unresponsive, the read failed. */
- memset(buf, 0, len);
wl1271_error("SPI read busy-word timeout!\n");
+ return -ETIMEDOUT;
}
-#endif
-void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
{
struct spi_transfer t[3];
struct spi_message m;
@@ -202,28 +224,38 @@ void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[0].tx_buf = cmd;
t[0].len = 4;
+ t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
/* Busy and non busy words read */
t[1].rx_buf = busy_buf;
t[1].len = WL1271_BUSY_WORD_LEN;
+ t[1].cs_change = true;
spi_message_add_tail(&t[1], &m);
- t[2].rx_buf = buf;
- t[2].len = len;
- spi_message_add_tail(&t[2], &m);
+ spi_sync(wl_to_spi(wl), &m);
- spi_sync(wl->spi, &m);
+ if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
+ wl1271_spi_read_busy(wl)) {
+ memset(buf, 0, len);
+ return;
+ }
- /* FIXME: Check busy words, removed due to SPI bug */
- /* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1))
- wl1271_spi_read_busy(wl, buf, len); */
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].rx_buf = buf;
+ t[0].len = len;
+ t[0].cs_change = true;
+ spi_message_add_tail(&t[0], &m);
+
+ spi_sync(wl_to_spi(wl), &m);
wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
}
-void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
+static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
struct spi_transfer t[2];
@@ -251,8 +283,181 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
t[1].len = len;
spi_message_add_tail(&t[1], &m);
- spi_sync(wl->spi, &m);
+ spi_sync(wl_to_spi(wl), &m);
wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
}
+
+static irqreturn_t wl1271_irq(int irq, void *cookie)
+{
+ struct wl1271 *wl;
+ unsigned long flags;
+
+ wl1271_debug(DEBUG_IRQ, "IRQ");
+
+ wl = cookie;
+
+ /* complete the ELP completion */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (wl->elp_compl) {
+ complete(wl->elp_compl);
+ wl->elp_compl = NULL;
+ }
+
+ if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+ set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void wl1271_spi_set_power(struct wl1271 *wl, bool enable)
+{
+ if (wl->set_power)
+ wl->set_power(enable);
+}
+
+static struct wl1271_if_operations spi_ops = {
+ .read = wl1271_spi_raw_read,
+ .write = wl1271_spi_raw_write,
+ .reset = wl1271_spi_reset,
+ .init = wl1271_spi_init,
+ .power = wl1271_spi_set_power,
+ .dev = wl1271_spi_wl_to_dev,
+ .enable_irq = wl1271_spi_enable_interrupts,
+ .disable_irq = wl1271_spi_disable_interrupts
+};
+
+static int __devinit wl1271_probe(struct spi_device *spi)
+{
+ struct wl12xx_platform_data *pdata;
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ int ret;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata) {
+ wl1271_error("no platform data");
+ return -ENODEV;
+ }
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ wl = hw->priv;
+
+ dev_set_drvdata(&spi->dev, wl);
+ wl->if_priv = spi;
+
+ wl->if_ops = &spi_ops;
+
+ /* This is the only SPI value that we need to set here, the rest
+ * comes from the board-peripherals file */
+ spi->bits_per_word = 32;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ wl1271_error("spi_setup failed");
+ goto out_free;
+ }
+
+ wl->set_power = pdata->set_power;
+ if (!wl->set_power) {
+ wl1271_error("set power function missing in platform data");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ wl->irq = spi->irq;
+ if (wl->irq < 0) {
+ wl1271_error("irq missing in platform data");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ if (ret < 0) {
+ wl1271_error("request_irq() failed: %d", ret);
+ goto out_free;
+ }
+
+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+
+ disable_irq(wl->irq);
+
+ ret = wl1271_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl1271_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ wl1271_notice("initialized");
+
+ return 0;
+
+ out_irq:
+ free_irq(wl->irq, wl);
+
+ out_free:
+ wl1271_free_hw(wl);
+
+ return ret;
+}
+
+static int __devexit wl1271_remove(struct spi_device *spi)
+{
+ struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+
+ free_irq(wl->irq, wl);
+
+ wl1271_unregister_hw(wl);
+ wl1271_free_hw(wl);
+
+ return 0;
+}
+
+
+static struct spi_driver wl1271_spi_driver = {
+ .driver = {
+ .name = "wl1271_spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = wl1271_probe,
+ .remove = __devexit_p(wl1271_remove),
+};
+
+static int __init wl1271_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&wl1271_spi_driver);
+ if (ret < 0) {
+ wl1271_error("failed to register spi driver: %d", ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void __exit wl1271_exit(void)
+{
+ spi_unregister_driver(&wl1271_spi_driver);
+
+ wl1271_notice("unloaded");
+}
+
+module_init(wl1271_init);
+module_exit(wl1271_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
deleted file mode 100644
index a803596..0000000
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * This file is part of wl1271
- *
- * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#ifndef __WL1271_SPI_H__
-#define __WL1271_SPI_H__
-
-#include "wl1271_reg.h"
-
-#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
-
-#define HW_PARTITION_REGISTERS_ADDR 0x1ffc0
-#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
-#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
-#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
-#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
-#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
-#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
-#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
-
-#define HW_ACCESS_REGISTER_SIZE 4
-
-#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
-
-#define WSPI_CMD_READ 0x40000000
-#define WSPI_CMD_WRITE 0x00000000
-#define WSPI_CMD_FIXED 0x20000000
-#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
-#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
-#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
-
-#define WSPI_INIT_CMD_CRC_LEN 5
-
-#define WSPI_INIT_CMD_START 0x00
-#define WSPI_INIT_CMD_TX 0x40
-/* the extra bypass bit is sampled by the TNET as '1' */
-#define WSPI_INIT_CMD_BYPASS_BIT 0x80
-#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
-#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
-#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
-#define WSPI_INIT_CMD_IOD 0x40
-#define WSPI_INIT_CMD_IP 0x20
-#define WSPI_INIT_CMD_CS 0x10
-#define WSPI_INIT_CMD_WS 0x08
-#define WSPI_INIT_CMD_WSPI 0x01
-#define WSPI_INIT_CMD_END 0x01
-
-#define WSPI_INIT_CMD_LEN 8
-
-#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
- ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
-#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
-
-#define OCP_CMD_LOOP 32
-
-#define OCP_CMD_WRITE 0x1
-#define OCP_CMD_READ 0x2
-
-#define OCP_READY_MASK BIT(18)
-#define OCP_STATUS_MASK (BIT(16) | BIT(17))
-
-#define OCP_STATUS_NO_RESP 0x00000
-#define OCP_STATUS_OK 0x10000
-#define OCP_STATUS_REQ_FAILED 0x20000
-#define OCP_STATUS_RESP_ERROR 0x30000
-
-/* Raw target IO, address is not translated */
-void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
-void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
-
-/* INIT and RESET words */
-void wl1271_spi_reset(struct wl1271 *wl);
-void wl1271_spi_init(struct wl1271 *wl);
-#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
index 5c1c4f5..554deb4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -26,7 +26,6 @@
#include <net/genetlink.h>
#include "wl1271.h"
-#include "wl1271_spi.h"
#include "wl1271_acx.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 811e739..62db795 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include "wl1271.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_reg.h"
#include "wl1271_ps.h"
@@ -47,7 +46,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
- u32 total_blocks, excluded;
+ u32 total_blocks;
int id, ret = -EBUSY;
/* allocate free identifier for the packet */
@@ -57,12 +56,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
/* approximate the number of blocks required for this packet
in the firmware */
- /* FIXME: try to figure out what is done here and make it cleaner */
- total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV;
- excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34;
- total_blocks += (excluded > 252) ? 2 : 1;
- total_blocks += TX_HW_BLOCK_SPARE;
-
+ total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
+ total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
if (total_blocks <= wl->tx_blocks_available) {
desc = (struct wl1271_tx_hw_descr *)skb_push(
skb, total_len - skb->len);
@@ -87,8 +82,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
u32 extra, struct ieee80211_tx_info *control)
{
+ struct timespec ts;
struct wl1271_tx_hw_descr *desc;
int pad, ac;
+ s64 hosttime;
u16 tx_attr;
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -102,8 +99,9 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
}
/* configure packet life time */
- desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) -
- wl->time_offset);
+ getnstimeofday(&ts);
+ hosttime = (timespec_to_ns(&ts) >> 10);
+ desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
/* configure the tx attributes */
@@ -170,7 +168,6 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
/* write packet new counter into the write access register */
wl->tx_packets_count++;
- wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
desc = (struct wl1271_tx_hw_descr *) skb->data;
wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -223,7 +220,7 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
return ret;
}
-static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
+u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
{
struct ieee80211_supported_band *band;
u32 enabled_rates = 0;
@@ -245,6 +242,7 @@ void wl1271_tx_work(struct work_struct *work)
struct sk_buff *skb;
bool woken_up = false;
u32 sta_rates = 0;
+ u32 prev_tx_packets_count;
int ret;
/* check if the rates supported by the AP have changed */
@@ -261,6 +259,8 @@ void wl1271_tx_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ prev_tx_packets_count = wl->tx_packets_count;
+
/* if rates have changed, re-configure the rate policy */
if (unlikely(sta_rates)) {
wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
@@ -271,31 +271,26 @@ void wl1271_tx_work(struct work_struct *work)
if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
- goto out;
+ goto out_ack;
woken_up = true;
}
ret = wl1271_tx_frame(wl, skb);
if (ret == -EBUSY) {
- /* firmware buffer is full, stop queues */
- wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
- "stop queues");
- ieee80211_stop_queues(wl->hw);
- set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ /* firmware buffer is full, lets stop transmitting. */
skb_queue_head(&wl->tx_queue, skb);
- goto out;
+ goto out_ack;
} else if (ret < 0) {
dev_kfree_skb(skb);
- goto out;
- } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
- &wl->flags)) {
- /* firmware buffer has space, restart queues */
- wl1271_debug(DEBUG_TX,
- "complete_packet: waking queues");
- ieee80211_wake_queues(wl->hw);
+ goto out_ack;
}
}
+out_ack:
+ /* interrupt the firmware with the new packets */
+ if (prev_tx_packets_count != wl->tx_packets_count)
+ wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
+
out:
if (woken_up)
wl1271_ps_elp_sleep(wl);
@@ -308,11 +303,12 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
{
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- u16 seq;
int id = result->id;
+ int rate = -1;
+ u8 retries = 0;
/* check for id legality */
- if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) {
+ if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
wl1271_warning("TX result illegal id: %d", id);
return;
}
@@ -320,31 +316,29 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
skb = wl->tx_frames[id];
info = IEEE80211_SKB_CB(skb);
- /* update packet status */
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- if (result->status == TX_SUCCESS)
+ /* update the TX status info */
+ if (result->status == TX_SUCCESS) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- if (result->status & TX_RETRY_EXCEEDED) {
- /* FIXME */
- /* info->status.excessive_retries = 1; */
- wl->stats.excessive_retries++;
- }
+ rate = wl1271_rate_to_idx(wl, result->rate_class_index);
+ retries = result->ack_failures;
+ } else if (result->status == TX_RETRY_EXCEEDED) {
+ wl->stats.excessive_retries++;
+ retries = result->ack_failures;
}
- /* FIXME */
- /* info->status.retry_count = result->ack_failures; */
+ info->status.rates[0].idx = rate;
+ info->status.rates[0].count = retries;
+ info->status.rates[0].flags = 0;
+ info->status.ack_signal = -1;
+
wl->stats.retry_count += result->ack_failures;
/* update security sequence number */
- seq = wl->tx_security_seq_16 +
- (result->lsb_security_sequence_number -
- wl->tx_security_last_seq);
+ wl->tx_security_seq += (result->lsb_security_sequence_number -
+ wl->tx_security_last_seq);
wl->tx_security_last_seq = result->lsb_security_sequence_number;
- if (seq < wl->tx_security_seq_16)
- wl->tx_security_seq_32++;
- wl->tx_security_seq_16 = seq;
-
/* remove private header from packet */
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
@@ -367,23 +361,29 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
}
/* Called upon reception of a TX complete interrupt */
-void wl1271_tx_complete(struct wl1271 *wl, u32 count)
+void wl1271_tx_complete(struct wl1271 *wl)
{
struct wl1271_acx_mem_map *memmap =
(struct wl1271_acx_mem_map *)wl->target_mem_map;
+ u32 count, fw_counter;
u32 i;
- wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
-
/* read the tx results from the chipset */
wl1271_read(wl, le32_to_cpu(memmap->tx_result),
wl->tx_res_if, sizeof(*wl->tx_res_if), false);
+ fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
+
+ /* write host counter to chipset (to ack) */
+ wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
+ offsetof(struct wl1271_tx_hw_res_if,
+ tx_result_host_counter), fw_counter);
+
+ count = fw_counter - wl->tx_results_count;
+ wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
/* verify that the result buffer is not getting overrun */
- if (count > TX_HW_RESULT_QUEUE_LEN) {
+ if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
wl1271_warning("TX result overflow from chipset: %d", count);
- count = TX_HW_RESULT_QUEUE_LEN;
- }
/* process the results */
for (i = 0; i < count; i++) {
@@ -397,11 +397,18 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
wl->tx_results_count++;
}
- /* write host counter to chipset (to ack) */
- wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
- offsetof(struct wl1271_tx_hw_res_if,
- tx_result_host_counter),
- le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
+ if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
+ skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
+ unsigned long flags;
+
+ /* firmware buffer has space, restart queues */
+ wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ ieee80211_wake_queues(wl->hw);
+ clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ }
}
/* caller must hold wl->mutex */
@@ -409,31 +416,19 @@ void wl1271_tx_flush(struct wl1271 *wl)
{
int i;
struct sk_buff *skb;
- struct ieee80211_tx_info *info;
/* TX failure */
/* control->flags = 0; FIXME */
while ((skb = skb_dequeue(&wl->tx_queue))) {
- info = IEEE80211_SKB_CB(skb);
-
wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb);
-
- if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
- continue;
-
ieee80211_tx_status(wl->hw, skb);
}
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
if (wl->tx_frames[i] != NULL) {
skb = wl->tx_frames[i];
- info = IEEE80211_SKB_CB(skb);
-
- if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
- continue;
-
- ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[i] = NULL;
+ ieee80211_tx_status(wl->hw, skb);
}
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 17e405a..3b8b7ac 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -26,7 +26,7 @@
#define __WL1271_TX_H__
#define TX_HW_BLOCK_SPARE 2
-#define TX_HW_BLOCK_SHIFT_DIV 8
+#define TX_HW_BLOCK_SIZE 252
#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
/* The chipset reference driver states, that the "aid" value 1
@@ -125,9 +125,6 @@ struct wl1271_tx_hw_res_if {
static inline int wl1271_tx_get_queue(int queue)
{
- /* FIXME: use best effort until WMM is enabled */
- return CONF_TX_AC_BE;
-
switch (queue) {
case 0:
return CONF_TX_AC_VO;
@@ -160,7 +157,9 @@ static inline int wl1271_tx_ac_to_tid(int ac)
}
void wl1271_tx_work(struct work_struct *work);
-void wl1271_tx_complete(struct wl1271 *wl, u32 count);
+void wl1271_tx_complete(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
+u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
+u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 5e5d24c..376c6b9 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1307,7 +1307,7 @@ static void wl3501_tx_timeout(struct net_device *dev)
printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
dev->name, rc);
else {
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -1326,7 +1326,6 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&this->lock, flags);
enabled = wl3501_block_interrupt(this);
- dev->trans_start = jiffies;
rc = wl3501_send_pkt(this, skb->data, skb->len);
if (enabled)
wl3501_unblock_interrupt(this);
@@ -1455,8 +1454,6 @@ static void wl3501_detach(struct pcmcia_device *link)
if (link->priv)
free_netdev(link->priv);
-
- return;
}
static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
@@ -1836,32 +1833,32 @@ out:
}
static const iw_handler wl3501_handler[] = {
- [SIOCGIWNAME - SIOCIWFIRST] = wl3501_get_name,
- [SIOCSIWFREQ - SIOCIWFIRST] = wl3501_set_freq,
- [SIOCGIWFREQ - SIOCIWFIRST] = wl3501_get_freq,
- [SIOCSIWMODE - SIOCIWFIRST] = wl3501_set_mode,
- [SIOCGIWMODE - SIOCIWFIRST] = wl3501_get_mode,
- [SIOCGIWSENS - SIOCIWFIRST] = wl3501_get_sens,
- [SIOCGIWRANGE - SIOCIWFIRST] = wl3501_get_range,
- [SIOCSIWSPY - SIOCIWFIRST] = iw_handler_set_spy,
- [SIOCGIWSPY - SIOCIWFIRST] = iw_handler_get_spy,
- [SIOCSIWTHRSPY - SIOCIWFIRST] = iw_handler_set_thrspy,
- [SIOCGIWTHRSPY - SIOCIWFIRST] = iw_handler_get_thrspy,
- [SIOCSIWAP - SIOCIWFIRST] = wl3501_set_wap,
- [SIOCGIWAP - SIOCIWFIRST] = wl3501_get_wap,
- [SIOCSIWSCAN - SIOCIWFIRST] = wl3501_set_scan,
- [SIOCGIWSCAN - SIOCIWFIRST] = wl3501_get_scan,
- [SIOCSIWESSID - SIOCIWFIRST] = wl3501_set_essid,
- [SIOCGIWESSID - SIOCIWFIRST] = wl3501_get_essid,
- [SIOCSIWNICKN - SIOCIWFIRST] = wl3501_set_nick,
- [SIOCGIWNICKN - SIOCIWFIRST] = wl3501_get_nick,
- [SIOCGIWRATE - SIOCIWFIRST] = wl3501_get_rate,
- [SIOCGIWRTS - SIOCIWFIRST] = wl3501_get_rts_threshold,
- [SIOCGIWFRAG - SIOCIWFIRST] = wl3501_get_frag_threshold,
- [SIOCGIWTXPOW - SIOCIWFIRST] = wl3501_get_txpow,
- [SIOCGIWRETRY - SIOCIWFIRST] = wl3501_get_retry,
- [SIOCGIWENCODE - SIOCIWFIRST] = wl3501_get_encode,
- [SIOCGIWPOWER - SIOCIWFIRST] = wl3501_get_power,
+ IW_HANDLER(SIOCGIWNAME, wl3501_get_name),
+ IW_HANDLER(SIOCSIWFREQ, wl3501_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, wl3501_get_freq),
+ IW_HANDLER(SIOCSIWMODE, wl3501_set_mode),
+ IW_HANDLER(SIOCGIWMODE, wl3501_get_mode),
+ IW_HANDLER(SIOCGIWSENS, wl3501_get_sens),
+ IW_HANDLER(SIOCGIWRANGE, wl3501_get_range),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWAP, wl3501_set_wap),
+ IW_HANDLER(SIOCGIWAP, wl3501_get_wap),
+ IW_HANDLER(SIOCSIWSCAN, wl3501_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, wl3501_get_scan),
+ IW_HANDLER(SIOCSIWESSID, wl3501_set_essid),
+ IW_HANDLER(SIOCGIWESSID, wl3501_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, wl3501_set_nick),
+ IW_HANDLER(SIOCGIWNICKN, wl3501_get_nick),
+ IW_HANDLER(SIOCGIWRATE, wl3501_get_rate),
+ IW_HANDLER(SIOCGIWRTS, wl3501_get_rts_threshold),
+ IW_HANDLER(SIOCGIWFRAG, wl3501_get_frag_threshold),
+ IW_HANDLER(SIOCGIWTXPOW, wl3501_get_txpow),
+ IW_HANDLER(SIOCGIWRETRY, wl3501_get_retry),
+ IW_HANDLER(SIOCGIWENCODE, wl3501_get_encode),
+ IW_HANDLER(SIOCGIWPOWER, wl3501_get_power),
};
static const struct iw_handler_def wl3501_handler_def = {
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 9d12778..390d77f 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -134,7 +134,6 @@ static void zd1201_usbfree(struct urb *urb)
kfree(urb->transfer_buffer);
usb_free_urb(urb);
- return;
}
/* cmdreq message:
@@ -185,7 +184,6 @@ static void zd1201_usbtx(struct urb *urb)
{
struct zd1201 *zd = urb->context;
netif_wake_queue(zd->dev);
- return;
}
/* Incoming data */
@@ -407,7 +405,6 @@ exit:
wake_up(&zd->rxdataq);
kfree(urb->transfer_buffer);
}
- return;
}
static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata,
@@ -827,7 +824,6 @@ static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb,
} else {
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
}
kfree_skb(skb);
@@ -845,7 +841,7 @@ static void zd1201_tx_timeout(struct net_device *dev)
usb_unlink_urb(zd->tx_urb);
dev->stats.tx_errors++;
/* Restart the timeout to quiet the watchdog: */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
static int zd1201_set_mac_address(struct net_device *dev, void *p)
@@ -876,7 +872,7 @@ static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
static void zd1201_set_multicast(struct net_device *dev)
{
struct zd1201 *zd = netdev_priv(dev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
int i;
@@ -884,8 +880,8 @@ static void zd1201_set_multicast(struct net_device *dev)
return;
i = 0;
- netdev_for_each_mc_addr(mc, dev)
- memcpy(reqbuf + i++ * ETH_ALEN, mc->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
netdev_mc_count(dev) * ETH_ALEN, 0);
}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 16fa289..b0b6660 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -948,20 +948,17 @@ static void set_rx_filter_handler(struct work_struct *work)
}
static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_mc_hash hash;
- int i;
+ struct netdev_hw_addr *ha;
zd_mc_clear(&hash);
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", mclist->dmi_addr);
- zd_mc_add_addr(&hash, mclist->dmi_addr);
- mclist = mclist->next;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", ha->addr);
+ zd_mc_add_addr(&hash, ha->addr);
}
return hash.low | ((u64)hash.high << 32);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index d91ad1a..c257940 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -664,15 +664,15 @@ static struct urb *alloc_rx_urb(struct zd_usb *usb)
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return NULL;
- buffer = usb_buffer_alloc(udev, USB_MAX_RX_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ buffer = usb_alloc_coherent(udev, USB_MAX_RX_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buffer) {
usb_free_urb(urb);
return NULL;
}
usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN),
- buffer, USB_MAX_RX_SIZE,
+ buffer, USB_MAX_RX_SIZE,
rx_urb_complete, usb);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -683,8 +683,8 @@ static void free_rx_urb(struct urb *urb)
{
if (!urb)
return;
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
}
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 1e783cc..a7db68d 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -558,7 +558,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
}
/* To exclude tx timeout */
- dev->trans_start = 0xffffffff - TX_TIMEOUT - TX_TIMEOUT;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* We're all ready to go. Start the queue */
netif_wake_queue(dev);
@@ -590,7 +590,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
dev->stats.tx_bytes += lp->deferred_skb->len;
dev_kfree_skb_irq(lp->deferred_skb);
lp->deferred_skb = NULL;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -639,7 +639,6 @@ static void xemaclite_rx_handler(struct net_device *dev)
}
skb_put(skb, len); /* Tell the skb how much data we got */
- skb->dev = dev; /* Fill out required meta-data */
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = CHECKSUM_NONE;
@@ -1055,7 +1054,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
dev->stats.tx_bytes += len;
dev_kfree_skb(new_skb);
- dev->trans_start = jiffies;
return 0;
}
@@ -1172,7 +1170,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
}
/* Get the virtual base address for the device */
- lp->base_addr = ioremap(r_mem.start, r_mem.end - r_mem.start + 1);
+ lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem));
if (NULL == lp->base_addr) {
dev_err(dev, "EmacLite: Could not allocate iomem\n");
rc = -EIO;
@@ -1225,7 +1223,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
return 0;
error1:
- release_mem_region(ndev->mem_start, r_mem.end - r_mem.start + 1);
+ release_mem_region(ndev->mem_start, resource_size(&r_mem));
error2:
xemaclite_remove_ndev(ndev);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index ede5b24..4eb67ae 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -564,7 +564,6 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value
for (i = 10000; i >= 0; i--)
if ((ioread16(ioaddr + MII_Status) & 1) == 0)
break;
- return;
}
@@ -1299,25 +1298,25 @@ static void set_rx_mode(struct net_device *dev)
/* Too many to filter well, or accept all multicasts. */
iowrite16(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 hash_table[4];
int i;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit;
/* Due to a bug in the early chip versions, multiple filter
slots must be set for each address. */
if (yp->drv_flags & HasMulticastBug) {
- bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
- bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
- bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
}
- bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
}
/* Copy the hash table to the chip. */
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index dbfef8d..c3a3292 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -587,7 +587,6 @@ static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore (&znet->lock, flags);
- dev->trans_start = jiffies;
netif_start_queue (dev);
if (znet_debug > 4)
@@ -802,7 +801,6 @@ static void znet_rx(struct net_device *dev)
/* If any worth-while packets have been received, dev_rint()
has done a mark_bh(INET_BH) for us and will work on them
when we get to the bottom-half routine. */
- return;
}
/* The inverse routine to znet_open(). */
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index 9548cbb..b78a38d 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -431,7 +431,6 @@ static void zorro8390_block_output(struct net_device *dev, int count,
z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static void __devexit zorro8390_remove_one(struct zorro_dev *z)
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 6ecbfb2..e525263 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -108,7 +108,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status);
static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
static int ibm_get_table_from_acpi(char **bufp);
-static ssize_t ibm_read_apci_table(struct kobject *kobj,
+static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size);
static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
@@ -351,6 +351,7 @@ read_table_done:
/**
* ibm_read_apci_table - callback for the sysfs apci_table file
+ * @filp: the open sysfs file
* @kobj: the kobject this binary attribute is a part of
* @bin_attr: struct bin_attribute for this file
* @buffer: the kernel space buffer to fill
@@ -364,7 +365,7 @@ read_table_done:
* things get really tricky here...
* our solution is to only allow reading the table in all at once.
*/
-static ssize_t ibm_read_apci_table(struct kobject *kobj,
+static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size)
{
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index fad9398..6309c5a 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -21,6 +21,7 @@
#include <linux/stat.h>
#include <linux/topology.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
@@ -357,7 +358,8 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
struct device_attribute vga_attr = __ATTR_RO(boot_vga);
static ssize_t
-pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
@@ -366,7 +368,7 @@ pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
u8 *data = (u8*) buf;
/* Several chips lock up trying to read undefined config space */
- if (capable(CAP_SYS_ADMIN)) {
+ if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
size = dev->cfg_size;
} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
size = 128;
@@ -430,7 +432,8 @@ pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_config(struct file* filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
@@ -487,7 +490,8 @@ pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
+read_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev =
@@ -502,7 +506,8 @@ read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
+write_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev =
@@ -519,6 +524,7 @@ write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
#ifdef HAVE_PCI_LEGACY
/**
* pci_read_legacy_io - read byte(s) from legacy I/O port space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to file to read from
* @bin_attr: struct bin_attribute for this file
* @buf: buffer to store results
@@ -529,7 +535,8 @@ write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
* callback routine (pci_legacy_read).
*/
static ssize_t
-pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -545,6 +552,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_write_legacy_io - write byte(s) to legacy I/O port space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to file to read from
* @bin_attr: struct bin_attribute for this file
* @buf: buffer containing value to be written
@@ -555,7 +563,8 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
* callback routine (pci_legacy_write).
*/
static ssize_t
-pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -570,6 +579,7 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_mmap_legacy_mem - map legacy PCI memory into user memory space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to device to be mapped
* @attr: struct bin_attribute for this file
* @vma: struct vm_area_struct passed to mmap
@@ -579,7 +589,8 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
* memory space.
*/
static int
-pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -591,6 +602,7 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
/**
* pci_mmap_legacy_io - map legacy PCI IO into user memory space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to device to be mapped
* @attr: struct bin_attribute for this file
* @vma: struct vm_area_struct passed to mmap
@@ -600,7 +612,8 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
* memory space. Returns -ENOSYS if the operation isn't supported
*/
static int
-pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -750,14 +763,16 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
}
static int
-pci_mmap_resource_uc(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
static int
-pci_mmap_resource_wc(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
@@ -861,6 +876,7 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
/**
* pci_write_rom - used to enable access to the PCI ROM display
+ * @filp: sysfs file
* @kobj: kernel object handle
* @bin_attr: struct bin_attribute for this file
* @buf: user input
@@ -870,7 +886,8 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
* writing anything except 0 enables it
*/
static ssize_t
-pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
@@ -885,6 +902,7 @@ pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_read_rom - read a PCI ROM
+ * @filp: sysfs file
* @kobj: kernel object handle
* @bin_attr: struct bin_attribute for this file
* @buf: where to put the data we read from the ROM
@@ -895,7 +913,8 @@ pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
* device corresponding to @kobj.
*/
static ssize_t
-pci_read_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 60d428b..8844bc3e 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1531,7 +1531,7 @@ static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf,
}
-static ssize_t pccard_show_cis(struct kobject *kobj,
+static ssize_t pccard_show_cis(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -1562,7 +1562,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
}
-static ssize_t pccard_store_cis(struct kobject *kobj,
+static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 8fefe5a..baefcf1 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -354,7 +354,7 @@ static enum power_supply_property olpc_bat_props[] = {
#define EEPROM_END 0x80
#define EEPROM_SIZE (EEPROM_END - EEPROM_START)
-static ssize_t olpc_bat_eeprom_read(struct kobject *kobj,
+static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
uint8_t ec_byte;
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index ba742e8..00b4756 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -68,7 +68,8 @@ struct device_attribute rio_dev_attrs[] = {
};
static ssize_t
-rio_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+rio_read_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct rio_dev *dev =
@@ -139,7 +140,8 @@ rio_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-rio_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+rio_write_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct rio_dev *dev =
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index e9aa814..ece4dbd 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -519,7 +519,8 @@ static const struct rtc_class_ops cmos_rtc_ops = {
#define NVRAM_OFFSET (RTC_REG_D + 1)
static ssize_t
-cmos_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+cmos_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
int retval;
@@ -547,7 +548,8 @@ cmos_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
}
static ssize_t
-cmos_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+cmos_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct cmos_rtc *cmos;
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 7836c9c..48da85e 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -542,7 +542,8 @@ static void msg_init(struct spi_message *m, struct spi_transfer *x,
}
static ssize_t
-ds1305_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+ds1305_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct spi_device *spi;
@@ -572,7 +573,8 @@ ds1305_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
}
static ssize_t
-ds1305_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+ds1305_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct spi_device *spi;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index c4ec5c1..de033b7 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -556,7 +556,8 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
#define NVRAM_SIZE 56
static ssize_t
-ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+ds1307_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client;
@@ -580,7 +581,8 @@ ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
}
static ssize_t
-ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+ds1307_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client;
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 06b8566..37268e9 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -423,8 +423,9 @@ static const struct rtc_class_ops ds1511_rtc_ops = {
};
static ssize_t
-ds1511_nvram_read(struct kobject *kobj, struct bin_attribute *ba,
- char *buf, loff_t pos, size_t size)
+ds1511_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *ba,
+ char *buf, loff_t pos, size_t size)
{
ssize_t count;
@@ -452,8 +453,9 @@ ds1511_nvram_read(struct kobject *kobj, struct bin_attribute *ba,
}
static ssize_t
-ds1511_nvram_write(struct kobject *kobj, struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t size)
+ds1511_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
{
ssize_t count;
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 244f999..ff432e2 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -252,7 +252,7 @@ static const struct rtc_class_ops ds1553_rtc_ops = {
.update_irq_enable = ds1553_rtc_update_irq_enable,
};
-static ssize_t ds1553_nvram_read(struct kobject *kobj,
+static ssize_t ds1553_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
@@ -267,7 +267,7 @@ static ssize_t ds1553_nvram_read(struct kobject *kobj,
return count;
}
-static ssize_t ds1553_nvram_write(struct kobject *kobj,
+static ssize_t ds1553_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 2b4b0bc..042630c 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -128,7 +128,7 @@ static const struct rtc_class_ops ds1742_rtc_ops = {
.set_time = ds1742_rtc_set_time,
};
-static ssize_t ds1742_nvram_read(struct kobject *kobj,
+static ssize_t ds1742_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
@@ -143,7 +143,7 @@ static ssize_t ds1742_nvram_read(struct kobject *kobj,
return count;
}
-static ssize_t ds1742_nvram_write(struct kobject *kobj,
+static ssize_t ds1742_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 365ff3a..be8359f 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -343,7 +343,7 @@ static const struct rtc_class_ops m48t02_rtc_ops = {
.set_time = m48t59_rtc_set_time,
};
-static ssize_t m48t59_nvram_read(struct kobject *kobj,
+static ssize_t m48t59_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
@@ -363,7 +363,7 @@ static ssize_t m48t59_nvram_read(struct kobject *kobj,
return cnt;
}
-static ssize_t m48t59_nvram_write(struct kobject *kobj,
+static ssize_t m48t59_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index b53a001..3b94367 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -244,7 +244,7 @@ static const struct rtc_class_ops stk17ta8_rtc_ops = {
.alarm_irq_enable = stk17ta8_rtc_alarm_irq_enable,
};
-static ssize_t stk17ta8_nvram_read(struct kobject *kobj,
+static ssize_t stk17ta8_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
@@ -259,7 +259,7 @@ static ssize_t stk17ta8_nvram_read(struct kobject *kobj,
return count;
}
-static ssize_t stk17ta8_nvram_write(struct kobject *kobj,
+static ssize_t stk17ta8_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 20bfc64..ec6313d 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -188,7 +188,7 @@ static const struct rtc_class_ops tx4939_rtc_ops = {
.alarm_irq_enable = tx4939_rtc_alarm_irq_enable,
};
-static ssize_t tx4939_rtc_nvram_read(struct kobject *kobj,
+static ssize_t tx4939_rtc_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
@@ -207,7 +207,7 @@ static ssize_t tx4939_rtc_nvram_read(struct kobject *kobj,
return count;
}
-static ssize_t tx4939_rtc_nvram_write(struct kobject *kobj,
+static ssize_t tx4939_rtc_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 1d16189..6c9fa15 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -135,7 +135,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
/*
* Channel measurement related functions
*/
-static ssize_t chp_measurement_chars_read(struct kobject *kobj,
+static ssize_t chp_measurement_chars_read(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -182,7 +183,7 @@ static void chp_measurement_copy_block(struct cmg_entry *buf,
} while (reference_buf.values[0] != buf->values[0]);
}
-static ssize_t chp_measurement_read(struct kobject *kobj,
+static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index e35713d..4ecafbf 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1364,8 +1364,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
ch->protocol = priv->protocol;
if (IS_MPC(priv)) {
- ch->discontact_th = (struct th_header *)
- kzalloc(TH_HEADER_LENGTH, gfp_type());
+ ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type());
if (ch->discontact_th == NULL)
goto nomem_return;
@@ -1379,8 +1378,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
} else
ccw_num = 8;
- ch->ccw = (struct ccw1 *)
- kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
if (ch->ccw == NULL)
goto nomem_return;
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 5978b39..87c24d2 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -669,8 +669,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
goto done;
}
- header = (struct th_sweep *)
- kmalloc(sizeof(struct th_sweep), gfp_type());
+ header = kmalloc(sizeof(struct th_sweep), gfp_type());
if (!header) {
dev_kfree_skb_any(sweep_skb);
@@ -1191,8 +1190,7 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
skb_pull(pskb, new_len); /* point to next PDU */
}
} else {
- mpcginfo = (struct mpcg_info *)
- kmalloc(sizeof(struct mpcg_info), gfp_type());
+ mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type());
if (mpcginfo == NULL)
goto done;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 9b19ea1..0f19d54 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1238,8 +1238,7 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
ipm = lcs_check_addr_entry(card, im4, buf);
if (ipm != NULL)
continue; /* Address already in list. */
- ipm = (struct lcs_ipm_list *)
- kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
+ ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
if (ipm == NULL) {
pr_info("Not enough memory to add"
" new multicast entry!\n");
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index fcd005a..7a44c38 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -179,25 +179,23 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
((prot == QETH_PROT_IPV6) ? \
qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
-#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
-#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
+#define QETH_IDX_FUNC_LEVEL_OSD 0x0101
#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
#define QETH_MODELLIST_ARRAY \
- {{0x1731, 0x01, 0x1732, 0x01, QETH_CARD_TYPE_OSAE, 1, \
- QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
- QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
- QETH_MAX_QUEUES, 0}, \
- {0x1731, 0x05, 0x1732, 0x05, QETH_CARD_TYPE_IQD, 0, \
- QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
- QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
- QETH_MAX_QUEUES, 0x103}, \
- {0x1731, 0x06, 0x1732, 0x06, QETH_CARD_TYPE_OSN, 0, \
- QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
- QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
- QETH_MAX_QUEUES, 0}, \
- {0, 0, 0, 0, 0, 0, 0, 0, 0} }
+ {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x05, 0x1732, QETH_CARD_TYPE_IQD, QETH_MAX_QUEUES, 0x103}, \
+ {0x1731, 0x06, 0x1732, QETH_CARD_TYPE_OSN, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSM, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSX, QETH_MAX_QUEUES, 0}, \
+ {0, 0, 0, 0, 0, 0} }
+#define QETH_CU_TYPE_IND 0
+#define QETH_CU_MODEL_IND 1
+#define QETH_DEV_TYPE_IND 2
+#define QETH_DEV_MODEL_IND 3
+#define QETH_QUEUE_NO_IND 4
+#define QETH_MULTICAST_IND 5
#define QETH_REAL_CARD 1
#define QETH_VLAN_CARD 2
@@ -351,7 +349,7 @@ enum qeth_header_ids {
#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
-#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
+#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
@@ -630,6 +628,7 @@ struct qeth_card_info {
int unique_id;
struct qeth_card_blkt blkt;
__u32 csum_mask;
+ __u32 tx_csum_mask;
enum qeth_ipa_promisc_modes promisc_mode;
};
@@ -739,6 +738,7 @@ struct qeth_card {
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
+ struct mutex conf_mutex;
};
struct qeth_card_list_struct {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 28f7134..13ef46b 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -53,7 +53,7 @@ struct kmem_cache *qeth_core_header_cache;
EXPORT_SYMBOL_GPL(qeth_core_header_cache);
static struct device *qeth_core_root_dev;
-static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
+static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
static struct lock_class_key qdio_out_skb_queue_key;
static void qeth_send_control_data_cb(struct qeth_channel *,
@@ -111,21 +111,29 @@ static inline const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
return " Guest LAN QDIO";
case QETH_CARD_TYPE_IQD:
return " Guest LAN Hiper";
+ case QETH_CARD_TYPE_OSM:
+ return " Guest LAN QDIO - OSM";
+ case QETH_CARD_TYPE_OSX:
+ return " Guest LAN QDIO - OSX";
default:
return " unknown";
}
} else {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
return " OSD Express";
case QETH_CARD_TYPE_IQD:
return " HiperSockets";
case QETH_CARD_TYPE_OSN:
return " OSN QDIO";
+ case QETH_CARD_TYPE_OSM:
+ return " OSM QDIO";
+ case QETH_CARD_TYPE_OSX:
+ return " OSX QDIO";
default:
return " unknown";
}
@@ -138,16 +146,20 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
{
if (card->info.guestlan) {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
return "GuestLAN QDIO";
case QETH_CARD_TYPE_IQD:
return "GuestLAN Hiper";
+ case QETH_CARD_TYPE_OSM:
+ return "GuestLAN OSM";
+ case QETH_CARD_TYPE_OSX:
+ return "GuestLAN OSX";
default:
return "unknown";
}
} else {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
switch (card->info.link_type) {
case QETH_LINK_TYPE_FAST_ETH:
return "OSD_100";
@@ -172,6 +184,10 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "HiperSockets";
case QETH_CARD_TYPE_OSN:
return "OSN";
+ case QETH_CARD_TYPE_OSM:
+ return "OSM_1000";
+ case QETH_CARD_TYPE_OSX:
+ return "OSX_10GIG";
default:
return "unknown";
}
@@ -419,7 +435,8 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
-static int qeth_check_idx_response(unsigned char *buffer)
+static int qeth_check_idx_response(struct qeth_card *card,
+ unsigned char *buffer)
{
if (!buffer)
return 0;
@@ -434,6 +451,12 @@ static int qeth_check_idx_response(unsigned char *buffer)
QETH_DBF_TEXT(TRACE, 2, "ckidxres");
QETH_DBF_TEXT(TRACE, 2, " idxterm");
QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
+ if (buffer[4] == 0xf6) {
+ dev_err(&card->gdev->dev,
+ "The qeth device is not configured "
+ "for the OSI layer required by z/VM\n");
+ return -EPERM;
+ }
return -EIO;
}
return 0;
@@ -528,18 +551,19 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
struct qeth_ipa_cmd *cmd;
unsigned long flags;
int keep_reply;
+ int rc = 0;
QETH_DBF_TEXT(TRACE, 4, "sndctlcb");
card = CARD_FROM_CDEV(channel->ccwdev);
- if (qeth_check_idx_response(iob->data)) {
+ rc = qeth_check_idx_response(card, iob->data);
+ switch (rc) {
+ case 0:
+ break;
+ case -EIO:
qeth_clear_ipacmd_list(card);
- if (((iob->data[2] & 0xc0) == 0xc0) && iob->data[4] == 0xf6)
- dev_err(&card->gdev->dev,
- "The qeth device is not configured "
- "for the OSI layer required by z/VM\n");
- else
- qeth_schedule_recovery(card);
+ qeth_schedule_recovery(card);
+ default:
goto out;
}
@@ -606,7 +630,7 @@ static int qeth_setup_channel(struct qeth_channel *channel)
QETH_DBF_TEXT(SETUP, 2, "setupch");
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
- channel->iob[cnt].data = (char *)
+ channel->iob[cnt].data =
kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
if (channel->iob[cnt].data == NULL)
break;
@@ -719,7 +743,7 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x ",
+ QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
dev_name(&cdev->dev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
@@ -998,9 +1022,8 @@ static void qeth_clean_channel(struct qeth_channel *channel)
kfree(channel->iob[cnt].data);
}
-static int qeth_is_1920_device(struct qeth_card *card)
+static void qeth_get_channel_path_desc(struct qeth_card *card)
{
- int single_queue = 0;
struct ccw_device *ccwdev;
struct channelPath_dsc {
u8 flags;
@@ -1013,17 +1036,25 @@ static int qeth_is_1920_device(struct qeth_card *card)
u8 chpp;
} *chp_dsc;
- QETH_DBF_TEXT(SETUP, 2, "chk_1920");
+ QETH_DBF_TEXT(SETUP, 2, "chp_desc");
ccwdev = card->data.ccwdev;
chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
if (chp_dsc != NULL) {
/* CHPP field bit 6 == 1 -> single queue */
- single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
+ if ((chp_dsc->chpp & 0x02) == 0x02)
+ card->qdio.no_out_queues = 1;
+ card->info.func_level = 0x4100 + chp_dsc->desc;
kfree(chp_dsc);
}
- QETH_DBF_TEXT_(SETUP, 2, "rc:%x", single_queue);
- return single_queue;
+ if (card->qdio.no_out_queues == 1) {
+ card->qdio.default_out_queue = 0;
+ dev_info(&card->gdev->dev,
+ "Priority Queueing not supported\n");
+ }
+ QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
+ QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+ return;
}
static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1100,6 +1131,7 @@ static int qeth_setup_card(struct qeth_card *card)
spin_lock_init(&card->lock);
spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
+ mutex_init(&card->conf_mutex);
card->thread_start_mask = 0;
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
@@ -1170,18 +1202,17 @@ static int qeth_determine_card_type(struct qeth_card *card)
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- while (known_devices[i][4]) {
- if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
- (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
- card->info.type = known_devices[i][4];
- card->qdio.no_out_queues = known_devices[i][8];
- card->info.is_multicast_different = known_devices[i][9];
- if (qeth_is_1920_device(card)) {
- dev_info(&card->gdev->dev,
- "Priority Queueing not supported\n");
- card->qdio.no_out_queues = 1;
- card->qdio.default_out_queue = 0;
- }
+ while (known_devices[i][QETH_DEV_MODEL_IND]) {
+ if ((CARD_RDEV(card)->id.dev_type ==
+ known_devices[i][QETH_DEV_TYPE_IND]) &&
+ (CARD_RDEV(card)->id.dev_model ==
+ known_devices[i][QETH_DEV_MODEL_IND])) {
+ card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
+ card->qdio.no_out_queues =
+ known_devices[i][QETH_QUEUE_NO_IND];
+ card->info.is_multicast_different =
+ known_devices[i][QETH_MULTICAST_IND];
+ qeth_get_channel_path_desc(card);
return 0;
}
i++;
@@ -1399,22 +1430,20 @@ static void qeth_init_tokens(struct qeth_card *card)
static void qeth_init_func_level(struct qeth_card *card)
{
- if (card->ipato.enabled) {
- if (card->info.type == QETH_CARD_TYPE_IQD)
- card->info.func_level =
- QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
- else
- card->info.func_level =
- QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
- } else {
- if (card->info.type == QETH_CARD_TYPE_IQD)
- /*FIXME:why do we have same values for dis and ena for
- osae??? */
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_IQD:
+ if (card->ipato.enabled)
card->info.func_level =
- QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
+ QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
else
card->info.func_level =
- QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
+ QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
+ break;
+ case QETH_CARD_TYPE_OSD:
+ card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
+ break;
+ default:
+ break;
}
}
@@ -1561,7 +1590,7 @@ static void qeth_idx_write_cb(struct qeth_channel *channel,
card = CARD_FROM_CDEV(channel->ccwdev);
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
dev_err(&card->write.ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
@@ -1597,27 +1626,35 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
}
card = CARD_FROM_CDEV(channel->ccwdev);
- if (qeth_check_idx_response(iob->data))
+ if (qeth_check_idx_response(card, iob->data))
goto out;
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
+ case QETH_IDX_ACT_ERR_EXCL:
dev_err(&card->write.ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
- else
+ break;
+ case QETH_IDX_ACT_ERR_AUTH:
+ dev_err(&card->read.ccwdev->dev,
+ "Setting the device online failed because of "
+ "insufficient LPAR authorization\n");
+ break;
+ default:
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
" negative reply\n",
dev_name(&card->read.ccwdev->dev));
+ }
goto out;
}
/**
- * temporary fix for microcode bug
- * to revert it,replace OR by AND
- */
+ * * temporary fix for microcode bug
+ * * to revert it,replace OR by AND
+ * */
if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
- (card->info.type == QETH_CARD_TYPE_OSAE))
+ (card->info.type == QETH_CARD_TYPE_OSD))
card->info.portname_required = 1;
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
@@ -1826,7 +1863,7 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
return 1500;
case QETH_CARD_TYPE_IQD:
return card->info.max_mtu;
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
switch (card->info.link_type) {
case QETH_LINK_TYPE_HSTR:
case QETH_LINK_TYPE_LANE_TR:
@@ -1834,6 +1871,9 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
default:
return 1492;
}
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
+ return 1492;
default:
return 1500;
}
@@ -1844,8 +1884,10 @@ static inline int qeth_get_max_mtu_for_card(int cardtype)
switch (cardtype) {
case QETH_CARD_TYPE_UNKNOWN:
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSN:
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
return 61440;
case QETH_CARD_TYPE_IQD:
return 57344;
@@ -1883,7 +1925,9 @@ static inline int qeth_get_mtu_outof_framesize(int framesize)
static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
{
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
return ((mtu >= 576) && (mtu <= 61440));
case QETH_CARD_TYPE_IQD:
return ((mtu >= 576) &&
@@ -1934,6 +1978,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
card->info.link_type = link_type;
} else
card->info.link_type = 0;
+ QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type);
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -1977,6 +2022,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_cmd_buffer *iob;
+ int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
@@ -1984,8 +2030,15 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
memcpy(&card->token.ulp_connection_r,
QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
+ if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
+ 3)) {
+ QETH_DBF_TEXT(SETUP, 2, "olmlimit");
+ dev_err(&card->gdev->dev, "A connection could not be "
+ "established because of an OLM limit\n");
+ rc = -EMLINK;
+ }
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
- return 0;
+ return rc;
}
static int qeth_ulp_setup(struct qeth_card *card)
@@ -2238,7 +2291,9 @@ static void qeth_print_status_no_portname(struct qeth_card *card)
void qeth_print_status_message(struct qeth_card *card)
{
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
/* VM will use a non-zero first character
* to indicate a HiperSockets like reporting
* of the level OSA sets the first character to zero
@@ -2545,9 +2600,11 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
QETH_DBF_TEXT(TRACE, 3, "quyadpcb");
cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
+ if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
+ QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
+ }
card->options.adp.supported_funcs =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
@@ -2937,7 +2994,8 @@ EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
- if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
+ if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX))
return card->qdio.default_out_queue;
switch (card->qdio.no_out_queues) {
case 4:
@@ -3499,13 +3557,14 @@ int qeth_set_access_ctrl_online(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 4, "setactlo");
- if (card->info.type == QETH_CARD_TYPE_OSAE &&
- qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+ if ((card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX) &&
+ qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation);
if (rc) {
QETH_DBF_MESSAGE(3,
- "IPA(SET_ACCESS_CTRL,%s,%d) sent failed",
+ "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
card->gdev->dev.kobj.name,
rc);
}
@@ -3845,9 +3904,16 @@ static void qeth_core_free_card(struct qeth_card *card)
}
static struct ccw_device_id qeth_ids[] = {
- {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE},
- {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD},
- {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
+ .driver_info = QETH_CARD_TYPE_OSD},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
+ .driver_info = QETH_CARD_TYPE_IQD},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
+ .driver_info = QETH_CARD_TYPE_OSN},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
+ .driver_info = QETH_CARD_TYPE_OSM},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
+ .driver_info = QETH_CARD_TYPE_OSX},
{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);
@@ -4251,25 +4317,25 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_card;
}
- if (card->info.type == QETH_CARD_TYPE_OSN) {
+ if (card->info.type == QETH_CARD_TYPE_OSN)
rc = qeth_core_create_osn_attributes(dev);
- if (rc)
- goto err_card;
+ else
+ rc = qeth_core_create_device_attributes(dev);
+ if (rc)
+ goto err_card;
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSN:
+ case QETH_CARD_TYPE_OSM:
rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
- if (rc) {
- qeth_core_remove_osn_attributes(dev);
- goto err_card;
- }
+ if (rc)
+ goto err_attr;
rc = card->discipline.ccwgdriver->probe(card->gdev);
- if (rc) {
- qeth_core_free_discipline(card);
- qeth_core_remove_osn_attributes(dev);
- goto err_card;
- }
- } else {
- rc = qeth_core_create_device_attributes(dev);
if (rc)
- goto err_card;
+ goto err_disc;
+ case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSX:
+ default:
+ break;
}
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
@@ -4279,6 +4345,13 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
qeth_determine_capabilities(card);
return 0;
+err_disc:
+ qeth_core_free_discipline(card);
+err_attr:
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ qeth_core_remove_osn_attributes(dev);
+ else
+ qeth_core_remove_device_attributes(dev);
err_card:
qeth_core_free_card(card);
err_dev:
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 104a335..f9ed24d 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -48,9 +48,11 @@ extern unsigned char IPA_PDU_HEADER[];
enum qeth_card_types {
QETH_CARD_TYPE_UNKNOWN = 0,
- QETH_CARD_TYPE_OSAE = 10,
- QETH_CARD_TYPE_IQD = 1234,
- QETH_CARD_TYPE_OSN = 11,
+ QETH_CARD_TYPE_OSD = 1,
+ QETH_CARD_TYPE_IQD = 5,
+ QETH_CARD_TYPE_OSN = 6,
+ QETH_CARD_TYPE_OSM = 3,
+ QETH_CARD_TYPE_OSX = 2,
};
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
@@ -614,6 +616,8 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
+#define QETH_IDX_ACT_ERR_EXCL 0x19
+#define QETH_IDX_ACT_ERR_AUTH 0x1E
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer + 0x0b)) + \
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 25dfd5a..2eb022f 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -122,23 +122,32 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
unsigned int portno, limit;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
portno = simple_strtoul(buf, &tmp, 16);
- if (portno > QETH_MAX_PORTNO)
- return -EINVAL;
+ if (portno > QETH_MAX_PORTNO) {
+ rc = -EINVAL;
+ goto out;
+ }
limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
- if (portno > limit)
- return -EINVAL;
-
+ if (portno > limit) {
+ rc = -EINVAL;
+ goto out;
+ }
card->info.portno = portno;
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
@@ -165,18 +174,23 @@ static ssize_t qeth_dev_portname_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
tmp = strsep((char **) &buf, "\n");
- if ((strlen(tmp) > 8) || (strlen(tmp) == 0))
- return -EINVAL;
+ if ((strlen(tmp) > 8) || (strlen(tmp) == 0)) {
+ rc = -EINVAL;
+ goto out;
+ }
card->info.portname[0] = strlen(tmp);
/* for beauty reasons */
@@ -184,8 +198,9 @@ static ssize_t qeth_dev_portname_store(struct device *dev,
card->info.portname[i] = ' ';
strcpy(card->info.portname + 1, tmp);
ASCEBC(card->info.portname + 1, 8);
-
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
@@ -215,20 +230,25 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
/* check if 1920 devices are supported ,
* if though we have to permit priority queueing
*/
if (card->qdio.no_out_queues == 1) {
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
- return -EPERM;
+ rc = -EPERM;
+ goto out;
}
tmp = strsep((char **) &buf, "\n");
@@ -251,10 +271,11 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
} else if (!strcmp(tmp, "no_prio_queueing")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
@@ -277,14 +298,17 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int cnt, old_cnt;
- int rc;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
old_cnt = card->qdio.in_buf_pool.buf_count;
cnt = simple_strtoul(buf, &tmp, 10);
@@ -293,7 +317,9 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
if (old_cnt != cnt) {
rc = qeth_realloc_buffer_pool(card, cnt);
}
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
@@ -337,25 +363,27 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
i = simple_strtoul(buf, &tmp, 16);
if ((i == 0) || (i == 1)) {
if (i == card->options.performance_stats)
- return count;
+ goto out;;
card->options.performance_stats = i;
if (i == 0)
memset(&card->perf_stats, 0,
sizeof(struct qeth_perf_stats));
card->perf_stats.initial_rx_packets = card->stats.rx_packets;
card->perf_stats.initial_tx_packets = card->stats.tx_packets;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
@@ -377,15 +405,17 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i, rc;
+ int i, rc = 0;
enum qeth_discipline_id newdis;
if (!card)
return -EINVAL;
- if (((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)))
- return -EPERM;
+ mutex_lock(&card->conf_mutex);
+ if (card->state != CARD_STATE_DOWN) {
+ rc = -EPERM;
+ goto out;
+ }
i = simple_strtoul(buf, &tmp, 16);
switch (i) {
@@ -396,12 +426,13 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
newdis = QETH_DISCIPLINE_LAYER2;
break;
default:
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
- if (card->options.layer2 == newdis) {
- return count;
- } else {
+ if (card->options.layer2 == newdis)
+ goto out;
+ else {
if (card->discipline.ccwgdriver) {
card->discipline.ccwgdriver->remove(card->gdev);
qeth_core_free_discipline(card);
@@ -410,12 +441,12 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
rc = qeth_core_load_discipline(card, newdis);
if (rc)
- return rc;
+ goto out;
rc = card->discipline.ccwgdriver->probe(card->gdev);
- if (rc)
- return rc;
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
@@ -454,13 +485,13 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
char *tmp, *curtoken;
curtoken = (char *) buf;
- if (!card) {
- rc = -EINVAL;
- goto out;
- }
+ if (!card)
+ return -EINVAL;
+ mutex_lock(&card->conf_mutex);
/* check for unknown, too, in case we do not yet know who we are */
- if (card->info.type != QETH_CARD_TYPE_OSAE &&
+ if (card->info.type != QETH_CARD_TYPE_OSD &&
+ card->info.type != QETH_CARD_TYPE_OSX &&
card->info.type != QETH_CARD_TYPE_UNKNOWN) {
rc = -EOPNOTSUPP;
dev_err(&card->gdev->dev, "Adapter does not "
@@ -491,6 +522,7 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
rc = ipa_rc;
}
out:
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -510,22 +542,25 @@ static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
-
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
i = simple_strtoul(buf, &tmp, 10);
- if (i <= max_value) {
+ if (i <= max_value)
*value = i;
- } else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_dev_blkt_total_show(struct device *dev,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 6a801dc..d43f57a 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -56,7 +56,9 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_GET_CARD_TYPE:
- if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
+ if ((card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX) &&
!card->info.guestlan)
return 1;
return 0;
@@ -309,6 +311,10 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct qeth_vlan_vid *id;
QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
+ if (card->info.type == QETH_CARD_TYPE_OSM) {
+ QETH_DBF_TEXT(TRACE, 3, "aidOSM");
+ return;
+ }
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_DBF_TEXT(TRACE, 3, "aidREC");
return;
@@ -329,6 +335,10 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ if (card->info.type == QETH_CARD_TYPE_OSM) {
+ QETH_DBF_TEXT(TRACE, 3, "kidOSM");
+ return;
+ }
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_DBF_TEXT(TRACE, 3, "kidREC");
return;
@@ -559,8 +569,10 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
"device %s: x%x\n", CARD_BUS_ID(card), rc);
}
- if ((card->info.type == QETH_CARD_TYPE_IQD) ||
- (card->info.guestlan)) {
+ if (card->info.type == QETH_CARD_TYPE_IQD ||
+ card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX ||
+ card->info.guestlan) {
rc = qeth_setadpparms_change_macaddr(card);
if (rc) {
QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
@@ -589,8 +601,10 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -EOPNOTSUPP;
}
- if (card->info.type == QETH_CARD_TYPE_OSN) {
- QETH_DBF_TEXT(TRACE, 3, "setmcOSN");
+ if (card->info.type == QETH_CARD_TYPE_OSN ||
+ card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX) {
+ QETH_DBF_TEXT(TRACE, 3, "setmcTYP");
return -EOPNOTSUPP;
}
QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
@@ -608,7 +622,6 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
static void qeth_l2_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- struct dev_addr_list *dm;
struct netdev_hw_addr *ha;
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -620,8 +633,8 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
return;
qeth_l2_del_all_mc(card);
spin_lock_bh(&card->mclock);
- for (dm = dev->mc_list; dm; dm = dm->next)
- qeth_l2_add_mc(card, dm->da_addr, 0);
+ netdev_for_each_mc_addr(ha, dev)
+ qeth_l2_add_mc(card, ha->addr, 0);
netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mc(card, ha->addr, 1);
@@ -886,9 +899,6 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
- card->dev = alloc_etherdev(0);
- break;
case QETH_CARD_TYPE_IQD:
card->dev = alloc_netdev(0, "hsi%d", ether_setup);
break;
@@ -925,6 +935,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
enum qeth_card_states recover_flag;
BUG_ON(!card);
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -957,18 +968,21 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
- return 0;
+ goto out;
}
rc = -ENODEV;
goto out_remove;
} else
card->lan_online = 1;
- if (card->info.type != QETH_CARD_TYPE_OSN) {
+ if ((card->info.type == QETH_CARD_TYPE_OSD) ||
+ (card->info.type == QETH_CARD_TYPE_OSX))
/* configure isolation level */
qeth_set_access_ctrl_online(card);
+
+ if (card->info.type != QETH_CARD_TYPE_OSN &&
+ card->info.type != QETH_CARD_TYPE_OSM)
qeth_l2_process_vlans(card, 0);
- }
netif_tx_disable(card->dev);
@@ -996,6 +1010,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
+out:
+ mutex_unlock(&card->conf_mutex);
return 0;
out_remove:
@@ -1008,6 +1024,7 @@ out_remove:
card->state = CARD_STATE_RECOVER;
else
card->state = CARD_STATE_DOWN;
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -1023,6 +1040,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
int rc = 0, rc2 = 0, rc3 = 0;
enum qeth_card_states recover_flag;
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -1041,6 +1059,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
+ mutex_unlock(&card->conf_mutex);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fc6ca1d..61adae2 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -54,16 +54,16 @@ int qeth_l3_set_large_send(struct qeth_card *card,
if (card->options.large_send == QETH_LARGE_SEND_TSO) {
if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM;
+ NETIF_F_IP_CSUM;
} else {
card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM);
+ NETIF_F_IP_CSUM);
card->options.large_send = QETH_LARGE_SEND_NO;
rc = -EOPNOTSUPP;
}
} else {
card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM);
+ NETIF_F_IP_CSUM);
card->options.large_send = QETH_LARGE_SEND_NO;
}
return rc;
@@ -1108,6 +1108,13 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask);
}
+ if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
+ cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+ card->info.tx_csum_mask =
+ cmd->data.setassparms.data.flags_32bit;
+ QETH_DBF_TEXT_(TRACE, 3, "tcsu:%d", card->info.tx_csum_mask);
+ }
+
return 0;
}
@@ -1536,6 +1543,28 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
return rc;
}
+static int qeth_l3_start_ipa_tx_checksum(struct qeth_card *card)
+{
+ int rc = 0;
+
+ if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+ return rc;
+ rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
+ IPA_CMD_ASS_START, 0);
+ if (rc)
+ goto err_out;
+ rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
+ IPA_CMD_ASS_ENABLE, card->info.tx_csum_mask);
+ if (rc)
+ goto err_out;
+ dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n");
+ return rc;
+err_out:
+ dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s "
+ "failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card));
+ return rc;
+}
+
static int qeth_l3_start_ipa_tso(struct qeth_card *card)
{
int rc;
@@ -1578,6 +1607,7 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
qeth_l3_start_ipa_ipv6(card); /* go on*/
qeth_l3_start_ipa_broadcast(card); /* go on*/
qeth_l3_start_ipa_checksum(card); /* go on*/
+ qeth_l3_start_ipa_tx_checksum(card);
qeth_l3_start_ipa_tso(card); /* go on*/
return 0;
}
@@ -1929,7 +1959,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev)
return;
- for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) {
+ list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (addr) {
memcpy(&addr->u.a6.addr, &ifa->addr,
@@ -2681,7 +2711,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_GET_CARD_TYPE:
- if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
+ if ((card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX) &&
!card->info.guestlan)
return 1;
return 0;
@@ -2817,6 +2848,21 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
}
}
+static inline void qeth_l3_hdr_csum(struct qeth_card *card,
+ struct qeth_hdr *hdr, struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ /* tcph->check contains already the pseudo hdr checksum
+ * so just set the header flags
+ */
+ if (iph->protocol == IPPROTO_UDP)
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+}
+
static void qeth_tso_fill_header(struct qeth_card *card,
struct qeth_hdr *qhdr, struct sk_buff *skb)
{
@@ -2852,21 +2898,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
}
}
-static void qeth_tx_csum(struct sk_buff *skb)
-{
- __wsum csum;
- int offset;
-
- skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb));
- offset = skb->csum_start - skb_headroom(skb);
- BUG_ON(offset >= skb_headlen(skb));
- csum = skb_checksum(skb, offset, skb->len - offset, 0);
-
- offset += skb->csum_offset;
- BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
-}
-
static inline int qeth_l3_tso_elements(struct sk_buff *skb)
{
unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
@@ -2923,12 +2954,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb_is_gso(skb))
large_send = card->options.large_send;
- else
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- qeth_tx_csum(skb);
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
- }
if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
(skb_shinfo(skb)->nr_frags == 0)) {
@@ -3007,6 +3032,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
cast_type);
hdr->hdr.l3.length = new_skb->len - data_offset;
}
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ qeth_l3_hdr_csum(card, hdr, new_skb);
}
elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
@@ -3132,10 +3160,25 @@ static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
return rc;
}
+static int qeth_l3_ethtool_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (data) {
+ if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ return -EPERM;
+ } else
+ dev->features &= ~NETIF_F_IP_CSUM;
+
+ return 0;
+}
+
static const struct ethtool_ops qeth_l3_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
+ .set_tx_csum = qeth_l3_ethtool_set_tx_csum,
.get_rx_csum = qeth_l3_ethtool_get_rx_csum,
.set_rx_csum = qeth_l3_ethtool_set_rx_csum,
.get_sg = ethtool_op_get_sg,
@@ -3206,7 +3249,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
- if (card->info.type == QETH_CARD_TYPE_OSAE) {
+ if (card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
#ifdef CONFIG_TR
@@ -3336,6 +3380,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
enum qeth_card_states recover_flag;
BUG_ON(!card);
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -3367,7 +3412,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
- return 0;
+ goto out;
}
rc = -ENODEV;
goto out_remove;
@@ -3414,6 +3459,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
+out:
+ mutex_unlock(&card->conf_mutex);
return 0;
out_remove:
card->use_hard_stop = 1;
@@ -3425,6 +3472,7 @@ out_remove:
card->state = CARD_STATE_RECOVER;
else
card->state = CARD_STATE_DOWN;
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -3440,6 +3488,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
int rc = 0, rc2 = 0, rc3 = 0;
enum qeth_card_states recover_flag;
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -3458,6 +3507,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
+ mutex_unlock(&card->conf_mutex);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 25b3e7a..fb5318b 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -70,10 +70,10 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
{
enum qeth_routing_types old_route_type = route->type;
char *tmp;
- int rc;
+ int rc = 0;
tmp = strsep((char **) &buf, "\n");
-
+ mutex_lock(&card->conf_mutex);
if (!strcmp(tmp, "no_router")) {
route->type = NO_ROUTER;
} else if (!strcmp(tmp, "primary_connector")) {
@@ -87,7 +87,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
} else if (!strcmp(tmp, "multicast_router")) {
route->type = MULTICAST_ROUTER;
} else {
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
if (((card->state == CARD_STATE_SOFTSETUP) ||
(card->state == CARD_STATE_UP)) &&
@@ -97,7 +98,9 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
else if (prot == QETH_PROT_IPV6)
rc = qeth_l3_setrouting_v6(card);
}
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_route4_store(struct device *dev,
@@ -157,22 +160,26 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
i = simple_strtoul(buf, &tmp, 16);
if ((i == 0) || (i == 1))
card->options.fake_broadcast = i;
- else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
@@ -200,31 +207,35 @@ static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
(card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
tmp = strsep((char **) &buf, "\n");
- if (!strcmp(tmp, "local")) {
+ if (!strcmp(tmp, "local"))
card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
- return count;
- } else if (!strcmp(tmp, "all_rings")) {
+ else if (!strcmp(tmp, "all_rings"))
card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
- return count;
- } else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show,
@@ -251,18 +262,22 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
(card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
i = simple_strtoul(buf, &tmp, 16);
@@ -270,10 +285,11 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
card->options.macaddr_mode = i?
QETH_TR_MACADDR_CANONICAL :
QETH_TR_MACADDR_NONCANONICAL;
- else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
@@ -297,11 +313,12 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
enum qeth_checksum_types csum_type;
char *tmp;
- int rc;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "sw_checksumming"))
csum_type = SW_CHECKSUMMING;
@@ -309,13 +326,15 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
csum_type = HW_CHECKSUMMING;
else if (!strcmp(tmp, "no_checksumming"))
csum_type = NO_CHECKSUMMING;
- else
- return -EINVAL;
+ else {
+ rc = -EINVAL;
+ goto out;
+ }
rc = qeth_l3_set_rx_csum(card, csum_type);
- if (rc)
- return rc;
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
@@ -336,7 +355,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- int ret;
+ int rc = 0;
unsigned long i;
if (!card)
@@ -345,19 +364,24 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
- ret = strict_strtoul(buf, 16, &i);
- if (ret)
- return -EINVAL;
+ rc = strict_strtoul(buf, 16, &i);
+ if (rc) {
+ rc = -EINVAL;
+ goto out;
+ }
switch (i) {
case 0:
card->options.sniffer = i;
break;
case 1:
- ret = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+ qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
card->options.sniffer = i;
if (card->qdio.init_pool.buf_count !=
@@ -366,11 +390,13 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
QETH_IN_BUF_COUNT_MAX);
break;
} else
- return -EPERM;
+ rc = -EPERM;
default: /* fall through */
- return -EINVAL;
+ rc = -EINVAL;
}
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
@@ -412,12 +438,11 @@ static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
else
return -EINVAL;
- if (card->options.large_send == type)
- return count;
- rc = qeth_l3_set_large_send(card, type);
- if (rc)
- return rc;
- return count;
+ mutex_lock(&card->conf_mutex);
+ if (card->options.large_send != type)
+ rc = qeth_l3_set_large_send(card, type);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
@@ -455,13 +480,17 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
@@ -470,10 +499,11 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
card->ipato.enabled = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.enabled = 0;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
@@ -497,10 +527,12 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
@@ -508,10 +540,10 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
card->ipato.invert4 = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.invert4 = 0;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
@@ -593,27 +625,28 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
struct qeth_ipato_entry *ipatoe;
u8 addr[16];
int mask_bits;
- int rc;
+ int rc = 0;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (rc)
- return rc;
+ goto out;
ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
if (!ipatoe) {
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ipatoe->proto = proto;
memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
ipatoe->mask_bits = mask_bits;
rc = qeth_l3_add_ipato_entry(card, ipatoe);
- if (rc) {
+ if (rc)
kfree(ipatoe);
- return rc;
- }
-
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
@@ -636,15 +669,14 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
{
u8 addr[16];
int mask_bits;
- int rc;
+ int rc = 0;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
- if (rc)
- return rc;
-
- qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
-
- return count;
+ if (!rc)
+ qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
@@ -677,10 +709,12 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
@@ -688,10 +722,10 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
card->ipato.invert6 = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.invert6 = 0;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
@@ -813,15 +847,12 @@ static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
u8 addr[16] = {0, };
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_vipae(buf, proto, addr);
- if (rc)
- return rc;
-
- rc = qeth_l3_add_vipa(card, proto, addr);
- if (rc)
- return rc;
-
- return count;
+ if (!rc)
+ rc = qeth_l3_add_vipa(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
@@ -845,13 +876,12 @@ static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
u8 addr[16];
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_vipae(buf, proto, addr);
- if (rc)
- return rc;
-
- qeth_l3_del_vipa(card, proto, addr);
-
- return count;
+ if (!rc)
+ qeth_l3_del_vipa(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
@@ -979,15 +1009,12 @@ static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
u8 addr[16] = {0, };
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
- if (rc)
- return rc;
-
- rc = qeth_l3_add_rxip(card, proto, addr);
- if (rc)
- return rc;
-
- return count;
+ if (!rc)
+ rc = qeth_l3_add_rxip(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
@@ -1011,13 +1038,12 @@ static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
u8 addr[16];
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
- if (rc)
- return rc;
-
- qeth_l3_del_rxip(card, proto, addr);
-
- return count;
+ if (!rc)
+ qeth_l3_del_rxip(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 1e6183a8..e331df2 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -425,7 +425,8 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
while (atomic_read(&adapter->stat_miss) > 0)
if (zfcp_fsf_status_read(adapter->qdio)) {
- if (atomic_read(&adapter->stat_miss) >= 16) {
+ if (atomic_read(&adapter->stat_miss) >=
+ adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
NULL);
return 1;
@@ -545,6 +546,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto failed;
+ /* report size limit per scatter-gather segment */
+ adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
+ adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
+
if (!zfcp_adapter_scsi_register(adapter))
return adapter;
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7131c7d..9fa1b06 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -44,23 +44,6 @@ struct zfcp_reqlist;
/********************* SCSI SPECIFIC DEFINES *********************************/
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
-/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
-
-/* DMQ bug workaround: don't use last SBALE */
-#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
-
-/* index of last SBALE (with respect to DMQ bug workaround) */
-#define ZFCP_LAST_SBALE_PER_SBAL (ZFCP_MAX_SBALES_PER_SBAL - 1)
-
-/* max. number of (data buffer) SBALEs in largest SBAL chain */
-#define ZFCP_MAX_SBALES_PER_REQ \
- (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
- /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
-
-#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
- /* max. number of (data buffer) SBALEs in largest SBAL chain
- multiplied with number of sectors per 4k block */
-
/********************* FSF SPECIFIC DEFINES *********************************/
/* ATTENTION: value must not be used by hardware */
@@ -181,6 +164,7 @@ struct zfcp_adapter {
stack abort/command
completion races */
atomic_t stat_miss; /* # missing status reads*/
+ unsigned int stat_read_buf_num;
struct work_struct stat_work;
atomic_t status; /* status of this adapter */
struct list_head erp_ready_head; /* error recovery for this
@@ -205,6 +189,7 @@ struct zfcp_adapter {
struct work_struct scan_work;
struct service_level service_level;
struct workqueue_struct *work_queue;
+ struct device_dma_parameters dma_parms;
};
struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 0be5e7e..e3dbeda 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -714,7 +714,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
- atomic_set(&act->adapter->stat_miss, 16);
+ atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
if (zfcp_status_read_refill(act->adapter))
return ZFCP_ERP_FAILED;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 8786a79..48a8f93 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#ifndef ZFCP_EXT_H
@@ -143,9 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
extern int zfcp_qdio_setup(struct zfcp_adapter *);
extern void zfcp_qdio_destroy(struct zfcp_qdio *);
+extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
-extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
- struct zfcp_qdio_req *, unsigned long,
+extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 2a1cbb7..6f8ab43 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -400,7 +400,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
struct zfcp_adapter *adapter = port->adapter;
int ret;
- adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC);
+ adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
if (!adisc)
return -ENOMEM;
@@ -493,7 +493,7 @@ static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
if (!gpn_ft)
return NULL;
- req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
+ req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
if (!req) {
kfree(gpn_ft);
gpn_ft = NULL;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index b3b1d2f..9ac6a6e 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -496,6 +496,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
adapter->hydra_version = bottom->adapter_type;
adapter->timer_ticks = bottom->timer_interval;
+ adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)16);
if (fc_host_permanent_port_name(shost) == -1)
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
@@ -640,37 +641,6 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
}
}
-static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
-{
- struct zfcp_qdio_queue *req_q = &qdio->req_q;
-
- spin_lock_bh(&qdio->req_q_lock);
- if (atomic_read(&req_q->count))
- return 1;
- spin_unlock_bh(&qdio->req_q_lock);
- return 0;
-}
-
-static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
-{
- struct zfcp_adapter *adapter = qdio->adapter;
- long ret;
-
- spin_unlock_bh(&qdio->req_q_lock);
- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
- zfcp_fsf_sbal_check(qdio), 5 * HZ);
- if (ret > 0)
- return 0;
- if (!ret) {
- atomic_inc(&qdio->req_q_full);
- /* assume hanging outbound queue, try queue recovery */
- zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
- }
-
- spin_lock_bh(&qdio->req_q_lock);
- return -EIO;
-}
-
static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
{
struct zfcp_fsf_req *req;
@@ -705,10 +675,9 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
}
static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
- u32 fsf_cmd, mempool_t *pool)
+ u32 fsf_cmd, u32 sbtype,
+ mempool_t *pool)
{
- struct qdio_buffer_element *sbale;
- struct zfcp_qdio_queue *req_q = &qdio->req_q;
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
@@ -725,14 +694,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->adapter = adapter;
req->fsf_command = fsf_cmd;
req->req_id = adapter->req_no;
- req->qdio_req.sbal_number = 1;
- req->qdio_req.sbal_first = req_q->first;
- req->qdio_req.sbal_last = req_q->first;
- req->qdio_req.sbale_curr = 1;
-
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].addr = (void *) req->req_id;
- sbale[0].flags |= SBAL_FLAGS0_COMMAND;
if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
if (likely(pool))
@@ -753,10 +714,11 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
req->qtcb->header.fsf_command = req->fsf_command;
- sbale[1].addr = (void *) req->qtcb;
- sbale[1].length = sizeof(struct fsf_qtcb);
}
+ zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
+ req->qtcb, sizeof(struct fsf_qtcb));
+
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
zfcp_fsf_req_free(req);
return ERR_PTR(-EIO);
@@ -803,24 +765,19 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
- struct qdio_buffer_element *sbale;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
adapter->pool.status_read_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
- req->qdio_req.sbale_curr = 2;
-
sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
if (!sr_buf) {
retval = -ENOMEM;
@@ -828,9 +785,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
}
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
- sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
- sbale->addr = (void *) sr_buf;
- sbale->length = sizeof(*sr_buf);
+
+ zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
retval = zfcp_fsf_req_send(req);
if (retval)
@@ -907,14 +864,14 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
struct zfcp_unit *unit)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct zfcp_qdio *qdio = unit->port->adapter->qdio;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.scsi_abort);
if (IS_ERR(req)) {
req = NULL;
@@ -925,9 +882,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
ZFCP_STATUS_COMMON_UNBLOCKED)))
goto out_error_free;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = unit;
req->handler = zfcp_fsf_abort_fcp_command_handler;
@@ -996,21 +951,14 @@ skip_fsfstatus:
ct->handler(ct->handler_data);
}
-static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
+static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp)
{
- sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
- sbale[2].addr = sg_virt(sg_req);
- sbale[2].length = sg_req->length;
- sbale[3].addr = sg_virt(sg_resp);
- sbale[3].length = sg_resp->length;
- sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
-}
-
-static int zfcp_fsf_one_sbal(struct scatterlist *sg)
-{
- return sg_is_last(sg) && sg->length <= PAGE_SIZE;
+ zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
+ zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
+ zfcp_qdio_set_sbale_last(qdio, q_req);
}
static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
@@ -1019,35 +967,34 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
int max_sbals)
{
struct zfcp_adapter *adapter = req->adapter;
- struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
- &req->qdio_req);
u32 feat = adapter->adapter_features;
int bytes;
if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
- if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
+ if (!zfcp_qdio_sg_one_sbale(sg_req) ||
+ !zfcp_qdio_sg_one_sbale(sg_resp))
return -EOPNOTSUPP;
- zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
+ zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+ sg_req, sg_resp);
return 0;
}
/* use single, unchained SBAL if it can hold the request */
- if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
- zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
+ if (zfcp_qdio_sg_one_sbale(sg_req) || zfcp_qdio_sg_one_sbale(sg_resp)) {
+ zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+ sg_req, sg_resp);
return 0;
}
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- SBAL_FLAGS0_TYPE_WRITE_READ,
sg_req, max_sbals);
if (bytes <= 0)
return -EIO;
req->qtcb->bottom.support.req_buf_length = bytes;
- req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+ zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- SBAL_FLAGS0_TYPE_WRITE_READ,
sg_resp, max_sbals);
req->qtcb->bottom.support.resp_buf_length = bytes;
if (bytes <= 0)
@@ -1091,10 +1038,11 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
int ret = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
+ SBAL_FLAGS0_TYPE_WRITE_READ, pool);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -1103,7 +1051,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
- FSF_MAX_SBALS_PER_REQ, timeout);
+ ZFCP_FSF_MAX_SBALS_PER_REQ, timeout);
if (ret)
goto failed_send;
@@ -1187,10 +1135,11 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
int ret = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
+ SBAL_FLAGS0_TYPE_WRITE_READ, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -1224,16 +1173,16 @@ out:
int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1242,9 +1191,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_CFDC |
@@ -1269,24 +1216,22 @@ out:
int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ SBAL_FLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out_unlock;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_config_data_handler;
req->qtcb->bottom.config.feature_selection =
@@ -1320,7 +1265,6 @@ out_unlock:
int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
{
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
int retval = -EIO;
@@ -1328,10 +1272,11 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
return -EOPNOTSUPP;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1340,9 +1285,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
req->erp_action = erp_action;
@@ -1368,7 +1311,6 @@ out:
int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
@@ -1376,10 +1318,11 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
return -EOPNOTSUPP;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+ SBAL_FLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -1389,9 +1332,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (data)
req->data = data;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1485,17 +1426,17 @@ out:
*/
int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_port *port = erp_action->port;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1504,9 +1445,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_port_handler;
hton24(req->qtcb->bottom.support.d_id, port->d_id);
@@ -1556,16 +1495,16 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1574,9 +1513,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_port_handler;
req->data = erp_action->port;
@@ -1633,16 +1570,16 @@ out:
*/
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (unlikely(IS_ERR(req))) {
@@ -1651,9 +1588,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_wka_port_handler;
hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
@@ -1688,16 +1623,16 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (unlikely(IS_ERR(req))) {
@@ -1706,9 +1641,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_wka_port_handler;
req->data = wka_port;
@@ -1782,16 +1715,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1800,9 +1733,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = erp_action->port;
req->qtcb->header.port_handle = erp_action->port->handle;
@@ -1954,17 +1885,17 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
+ SBAL_FLAGS0_TYPE_READ,
adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1973,9 +1904,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
@@ -2041,16 +1970,16 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -2059,9 +1988,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
req->qtcb->header.lun_handle = erp_action->unit->handle;
@@ -2289,8 +2216,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto out;
}
+ if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
+ sbtype = SBAL_FLAGS0_TYPE_WRITE;
+
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
- adapter->pool.scsi_req);
+ sbtype, adapter->pool.scsi_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -2298,7 +2228,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- get_device(&unit->dev);
req->unit = unit;
req->data = scsi_cmnd;
req->handler = zfcp_fsf_send_fcp_command_handler;
@@ -2323,20 +2252,21 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
break;
case DMA_TO_DEVICE:
req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
- sbtype = SBAL_FLAGS0_TYPE_WRITE;
break;
case DMA_BIDIRECTIONAL:
goto failed_scsi_cmnd;
}
+ get_device(&unit->dev);
+
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
- real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
+ real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
scsi_sglist(scsi_cmnd),
- FSF_MAX_SBALS_PER_REQ);
+ ZFCP_FSF_MAX_SBALS_PER_REQ);
if (unlikely(real_bytes < 0)) {
- if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
+ if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) {
dev_err(&adapter->ccw_device->dev,
"Oversize data package, unit 0x%016Lx "
"on port 0x%016Lx closed\n",
@@ -2371,7 +2301,6 @@ out:
*/
struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct fcp_cmnd *fcp_cmnd;
struct zfcp_qdio *qdio = unit->port->adapter->qdio;
@@ -2381,10 +2310,11 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
return NULL;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+ SBAL_FLAGS0_TYPE_WRITE,
qdio->adapter->pool.scsi_req);
if (IS_ERR(req)) {
@@ -2401,9 +2331,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
@@ -2432,7 +2360,6 @@ static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
struct zfcp_fsf_cfdc *fsf_cfdc)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req = NULL;
struct fsf_qtcb_bottom_support *bottom;
@@ -2453,10 +2380,10 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
}
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
+ req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
if (IS_ERR(req)) {
retval = -EPERM;
goto out;
@@ -2464,16 +2391,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
req->handler = zfcp_fsf_control_file_handler;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= direction;
-
bottom = &req->qtcb->bottom.support;
bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
bottom->option = fsf_cfdc->option;
bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
- direction, fsf_cfdc->sg,
- FSF_MAX_SBALS_PER_REQ);
+ fsf_cfdc->sg,
+ ZFCP_FSF_MAX_SBALS_PER_REQ);
if (bytes != ZFCP_CFDC_MAX_SIZE) {
zfcp_fsf_req_free(req);
goto out;
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index b3de682..519083f 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#ifndef FSF_H
@@ -152,7 +152,12 @@
#define FSF_CLASS_3 0x00000003
/* SBAL chaining */
-#define FSF_MAX_SBALS_PER_REQ 36
+#define ZFCP_FSF_MAX_SBALS_PER_REQ 36
+
+/* max. number of (data buffer) SBALEs in largest SBAL chain
+ * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
+#define ZFCP_FSF_MAX_SBALES_PER_REQ \
+ (ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
/* logging space behind QTCB */
#define FSF_QTCB_LOG_SIZE 1024
@@ -361,7 +366,7 @@ struct fsf_qtcb_bottom_config {
u32 adapter_type;
u8 res0;
u8 peer_d_id[3];
- u8 res1[2];
+ u16 status_read_buf_num;
u16 timer_interval;
u8 res2[9];
u8 s_id[3];
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbfa312..28117e1 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,7 +3,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -151,8 +151,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
}
static struct qdio_buffer_element *
-zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned long sbtype)
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct qdio_buffer_element *sbale;
@@ -180,17 +179,16 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
/* set storage-block type for new SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->flags |= sbtype;
+ sbale->flags |= q_req->sbtype;
return sbale;
}
static struct qdio_buffer_element *
-zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned int sbtype)
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
- if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
- return zfcp_qdio_sbal_chain(qdio, q_req, sbtype);
+ if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL)
+ return zfcp_qdio_sbal_chain(qdio, q_req);
q_req->sbale_curr++;
return zfcp_qdio_sbale_curr(qdio, q_req);
}
@@ -206,62 +204,38 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
zfcp_qdio_zero_sbals(sbal, first, count);
}
-static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
- struct zfcp_qdio_req *q_req,
- unsigned int sbtype, void *start_addr,
- unsigned int total_length)
-{
- struct qdio_buffer_element *sbale;
- unsigned long remaining, length;
- void *addr;
-
- /* split segment up */
- for (addr = start_addr, remaining = total_length; remaining > 0;
- addr += length, remaining -= length) {
- sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype);
- if (!sbale) {
- atomic_inc(&qdio->req_q_full);
- zfcp_qdio_undo_sbals(qdio, q_req);
- return -EINVAL;
- }
-
- /* new piece must not exceed next page boundary */
- length = min(remaining,
- (PAGE_SIZE - ((unsigned long)addr &
- (PAGE_SIZE - 1))));
- sbale->addr = addr;
- sbale->length = length;
- }
- return 0;
-}
-
/**
* zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
- * @fsf_req: request to be processed
- * @sbtype: SBALE flags
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_qdio_req
* @sg: scatter-gather list
* @max_sbals: upper bound for number of SBALs to be used
* Returns: number of bytes, or error (negativ)
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned long sbtype, struct scatterlist *sg,
- int max_sbals)
+ struct scatterlist *sg, int max_sbals)
{
struct qdio_buffer_element *sbale;
- int retval, bytes = 0;
+ int bytes = 0;
/* figure out last allowed SBAL */
zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
- sbale->flags |= sbtype;
+ sbale->flags |= q_req->sbtype;
for (; sg; sg = sg_next(sg)) {
- retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype,
- sg_virt(sg), sg->length);
- if (retval < 0)
- return retval;
+ sbale = zfcp_qdio_sbale_next(qdio, q_req);
+ if (!sbale) {
+ atomic_inc(&qdio->req_q_full);
+ zfcp_qdio_undo_sbals(qdio, q_req);
+ return -EINVAL;
+ }
+
+ sbale->addr = sg_virt(sg);
+ sbale->length = sg->length;
+
bytes += sg->length;
}
@@ -272,6 +246,46 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
return bytes;
}
+static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
+{
+ struct zfcp_qdio_queue *req_q = &qdio->req_q;
+
+ spin_lock_bh(&qdio->req_q_lock);
+ if (atomic_read(&req_q->count))
+ return 1;
+ spin_unlock_bh(&qdio->req_q_lock);
+ return 0;
+}
+
+/**
+ * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
+ * @qdio: pointer to struct zfcp_qdio
+ *
+ * The req_q_lock must be held by the caller of this function, and
+ * this function may only be called from process context; it will
+ * sleep when waiting for a free sbal.
+ *
+ * Returns: 0 on success, -EIO if there is no free sbal after waiting.
+ */
+int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
+{
+ long ret;
+
+ spin_unlock_bh(&qdio->req_q_lock);
+ ret = wait_event_interruptible_timeout(qdio->req_q_wq,
+ zfcp_qdio_sbal_check(qdio), 5 * HZ);
+ if (ret > 0)
+ return 0;
+ if (!ret) {
+ atomic_inc(&qdio->req_q_full);
+ /* assume hanging outbound queue, try queue recovery */
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
+ }
+
+ spin_lock_bh(&qdio->req_q_lock);
+ return -EIO;
+}
+
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @qdio: pointer to struct zfcp_qdio
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 8cca546..138fba5 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -11,6 +11,14 @@
#include <asm/qdio.h>
+#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
+
+/* DMQ bug workaround: don't use last SBALE */
+#define ZFCP_QDIO_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+
+/* index of last SBALE (with respect to DMQ bug workaround) */
+#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
+
/**
* struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
* @sbal: qdio buffers
@@ -49,6 +57,7 @@ struct zfcp_qdio {
/**
* struct zfcp_qdio_req - qdio queue related values for a request
+ * @sbtype: sbal type flags for sbale 0
* @sbal_number: number of free sbals
* @sbal_first: first sbal for this request
* @sbal_last: last sbal for this request
@@ -59,6 +68,7 @@ struct zfcp_qdio {
* @qdio_inb_usage: usage of inbound queue
*/
struct zfcp_qdio_req {
+ u32 sbtype;
u8 sbal_number;
u8 sbal_first;
u8 sbal_last;
@@ -106,4 +116,98 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
q_req->sbale_curr);
}
+/**
+ * zfcp_qdio_req_init - initialize qdio request
+ * @qdio: request queue where to start putting the request
+ * @q_req: the qdio request to start
+ * @req_id: The request id
+ * @sbtype: type flags to set for all sbals
+ * @data: First data block
+ * @len: Length of first data block
+ *
+ * This is the start of putting the request into the queue, the last
+ * step is passing the request to zfcp_qdio_send. The request queue
+ * lock must be held during the whole process from init to send.
+ */
+static inline
+void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ unsigned long req_id, u32 sbtype, void *data, u32 len)
+{
+ struct qdio_buffer_element *sbale;
+
+ q_req->sbal_first = q_req->sbal_last = qdio->req_q.first;
+ q_req->sbal_number = 1;
+ q_req->sbtype = sbtype;
+
+ sbale = zfcp_qdio_sbale_req(qdio, q_req);
+ sbale->addr = (void *) req_id;
+ sbale->flags |= SBAL_FLAGS0_COMMAND;
+ sbale->flags |= sbtype;
+
+ q_req->sbale_curr = 1;
+ sbale++;
+ sbale->addr = data;
+ if (likely(data))
+ sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ *
+ * This is only required for single sbal requests, calling it when
+ * wrapping around to the next sbal is a bug.
+ */
+static inline
+void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ void *data, u32 len)
+{
+ struct qdio_buffer_element *sbale;
+
+ BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL);
+ q_req->sbale_curr++;
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->addr = data;
+ sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ */
+static inline
+void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req)
+{
+ struct qdio_buffer_element *sbale;
+
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+}
+
+/**
+ * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
+ * @sg: The scatterlist where to check the data size
+ *
+ * Returns: 1 when one sbale is enough for the data in the scatterlist,
+ * 0 if not.
+ */
+static inline
+int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
+{
+ return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
+}
+
+/**
+ * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
+{
+ q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
+}
+
#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 174b6d5..be5d2c6 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -175,7 +175,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
- int retval = SUCCESS;
+ int retval = SUCCESS, ret;
int retry = 3;
char *dbf_tag;
@@ -200,7 +200,9 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
break;
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
@@ -231,7 +233,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
struct zfcp_fsf_req *fsf_req = NULL;
- int retval = SUCCESS;
+ int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
@@ -240,7 +242,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
break;
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
+
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
@@ -276,10 +281,13 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
+ int ret;
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
return SUCCESS;
}
@@ -669,11 +677,12 @@ struct zfcp_data zfcp_data = {
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
.can_queue = 4096,
.this_id = -1,
- .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
+ .sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ,
.cmd_per_lun = 1,
.use_clustering = 1,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
- .max_sectors = (ZFCP_MAX_SBALES_PER_REQ * 8),
+ .max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8),
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
},
};
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index e9788f5..1bb774b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1,10 +1,11 @@
/*
3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
- Modifications By: Tom Couch <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
+ Modifications By: Tom Couch <linuxraid@lsi.com>
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
+ Copyright (C) 2010 LSI Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -40,10 +41,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
Note: This version of the driver does not contain a bundled firmware
image.
@@ -77,6 +78,7 @@
Use pci_resource_len() for ioremap().
2.26.02.012 - Add power management support.
2.26.02.013 - Fix bug in twa_load_sgl().
+ 2.26.02.014 - Force 60 second timeout default.
*/
#include <linux/module.h>
@@ -102,14 +104,14 @@
#include "3w-9xxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "2.26.02.013"
+#define TW_DRIVER_VERSION "2.26.02.014"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
extern struct timezone sys_tz;
/* Module parameters */
-MODULE_AUTHOR ("AMCC");
+MODULE_AUTHOR ("LSI");
MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(TW_DRIVER_VERSION);
@@ -1990,6 +1992,15 @@ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
scsi_dma_unmap(cmd);
} /* End twa_unmap_scsi_data() */
+/* This function gets called when a disk is coming on-line */
+static int twa_slave_configure(struct scsi_device *sdev)
+{
+ /* Force 60 second timeout */
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+ return 0;
+} /* End twa_slave_configure() */
+
/* scsi_host_template initializer */
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -1999,6 +2010,7 @@ static struct scsi_host_template driver_template = {
.bios_param = twa_scsi_biosparam,
.change_queue_depth = twa_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
+ .slave_configure = twa_slave_configure,
.this_id = -1,
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 2893eec..3343824 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -1,10 +1,11 @@
/*
3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
- Modifications By: Tom Couch <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
+ Modifications By: Tom Couch <linuxraid@lsi.com>
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
+ Copyright (C) 2010 LSI Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -40,10 +41,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
*/
#ifndef _3W_9XXX_H
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 54c5ffb..d38000d 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -98,7 +98,7 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
/* Functions */
/* This function returns AENs through sysfs */
-static ssize_t twl_sysfs_aen_read(struct kobject *kobj,
+static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *outbuf, loff_t offset, size_t count)
{
@@ -129,7 +129,7 @@ static struct bin_attribute twl_sysfs_aen_read_attr = {
};
/* This function returns driver compatibility info through sysfs */
-static ssize_t twl_sysfs_compat_info(struct kobject *kobj,
+static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *outbuf, loff_t offset, size_t count)
{
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 5faf903..d119a61 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,12 +1,12 @@
/*
3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
- Copyright (C) 1999-2009 3ware Inc.
+ Copyright (C) 1999-2010 3ware Inc.
Kernel compatibility By: Andre Hedrick <andre@suse.com>
Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -47,10 +47,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
History
-------
@@ -194,6 +194,7 @@
1.26.02.002 - Free irq handler in __tw_shutdown().
Turn on RCD bit for caching mode page.
Serialize reset code.
+ 1.26.02.003 - Force 60 second timeout default.
*/
#include <linux/module.h>
@@ -219,13 +220,13 @@
#include "3w-xxxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "1.26.02.002"
+#define TW_DRIVER_VERSION "1.26.02.003"
static TW_Device_Extension *tw_device_extension_list[TW_MAX_SLOT];
static int tw_device_extension_count = 0;
static int twe_major = -1;
/* Module parameters */
-MODULE_AUTHOR("AMCC");
+MODULE_AUTHOR("LSI");
MODULE_DESCRIPTION("3ware Storage Controller Linux Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(TW_DRIVER_VERSION);
@@ -2245,6 +2246,15 @@ static void tw_shutdown(struct pci_dev *pdev)
__tw_shutdown(tw_dev);
} /* End tw_shutdown() */
+/* This function gets called when a disk is coming online */
+static int tw_slave_configure(struct scsi_device *sdev)
+{
+ /* Force 60 second timeout */
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+ return 0;
+} /* End tw_slave_configure() */
+
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "3ware Storage Controller",
@@ -2253,6 +2263,7 @@ static struct scsi_host_template driver_template = {
.bios_param = tw_scsi_biosparam,
.change_queue_depth = tw_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
+ .slave_configure = tw_slave_configure,
.this_id = -1,
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index a5a2ba2..8b9f9d1 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,12 +1,12 @@
/*
3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
- Copyright (C) 1999-2009 3ware Inc.
+ Copyright (C) 1999-2010 3ware Inc.
Kernel compatiblity By: Andre Hedrick <andre@suse.com>
Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -45,10 +45,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
*/
#ifndef _3W_XXXX_H
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 92a8c50..1c7ac49 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -162,6 +162,7 @@ scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
+scsi_mod-y += scsi_trace.o
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index d8fe5b7..308541f 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -19,186 +19,190 @@
#include "wd33c93.h"
#include "a2091.h"
-#include<linux/stat.h>
+#include <linux/stat.h>
-#define DMA(ptr) ((a2091_scsiregs *)((ptr)->base))
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
static int a2091_release(struct Scsi_Host *instance);
-static irqreturn_t a2091_intr (int irq, void *_instance)
+static irqreturn_t a2091_intr(int irq, void *data)
{
- unsigned long flags;
- unsigned int status;
- struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
-
- status = DMA(instance)->ISTR;
- if (!(status & (ISTR_INT_F|ISTR_INT_P)) || !(status & ISTR_INTS))
- return IRQ_NONE;
-
- spin_lock_irqsave(instance->host_lock, flags);
- wd33c93_intr(instance);
- spin_unlock_irqrestore(instance->host_lock, flags);
- return IRQ_HANDLED;
+ struct Scsi_Host *instance = data;
+ a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
+ unsigned int status = regs->ISTR;
+ unsigned long flags;
+
+ if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(instance->host_lock, flags);
+ wd33c93_intr(instance);
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
- struct Scsi_Host *instance = cmd->device->host;
-
- /* don't allow DMA if the physical address is bad */
- if (addr & A2091_XFER_MASK)
- {
- HDATA(instance)->dma_bounce_len = (cmd->SCp.this_residual + 511)
- & ~0x1ff;
- HDATA(instance)->dma_bounce_buffer =
- kmalloc (HDATA(instance)->dma_bounce_len, GFP_KERNEL);
-
- /* can't allocate memory; use PIO */
- if (!HDATA(instance)->dma_bounce_buffer) {
- HDATA(instance)->dma_bounce_len = 0;
- return 1;
- }
-
- /* get the physical address of the bounce buffer */
- addr = virt_to_bus(HDATA(instance)->dma_bounce_buffer);
+ struct Scsi_Host *instance = cmd->device->host;
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+ a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
+ unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
- /* the bounce buffer may not be in the first 16M of physmem */
+ /* don't allow DMA if the physical address is bad */
if (addr & A2091_XFER_MASK) {
- /* we could use chipmem... maybe later */
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- return 1;
+ hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len,
+ GFP_KERNEL);
+
+ /* can't allocate memory; use PIO */
+ if (!hdata->dma_bounce_buffer) {
+ hdata->dma_bounce_len = 0;
+ return 1;
+ }
+
+ /* get the physical address of the bounce buffer */
+ addr = virt_to_bus(hdata->dma_bounce_buffer);
+
+ /* the bounce buffer may not be in the first 16M of physmem */
+ if (addr & A2091_XFER_MASK) {
+ /* we could use chipmem... maybe later */
+ kfree(hdata->dma_bounce_buffer);
+ hdata->dma_bounce_buffer = NULL;
+ hdata->dma_bounce_len = 0;
+ return 1;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
}
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy (HDATA(instance)->dma_bounce_buffer,
- cmd->SCp.ptr, cmd->SCp.this_residual);
- }
- }
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= CNTR_DDIR;
- /* setup dma direction */
- if (!dir_in)
- cntr |= CNTR_DDIR;
+ /* remember direction */
+ hdata->dma_dir = dir_in;
- /* remember direction */
- HDATA(cmd->device->host)->dma_dir = dir_in;
+ regs->CNTR = cntr;
- DMA(cmd->device->host)->CNTR = cntr;
+ /* setup DMA *physical* address */
+ regs->ACR = addr;
- /* setup DMA *physical* address */
- DMA(cmd->device->host)->ACR = addr;
-
- if (dir_in){
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- }else{
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
- }
- /* start DMA */
- DMA(cmd->device->host)->ST_DMA = 1;
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
+ /* start DMA */
+ regs->ST_DMA = 1;
- /* return success */
- return 0;
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
- int status)
+ int status)
{
- /* disable SCSI interrupts */
- unsigned short cntr = CNTR_PDMD;
-
- if (!HDATA(instance)->dma_dir)
- cntr |= CNTR_DDIR;
-
- /* disable SCSI interrupts */
- DMA(instance)->CNTR = cntr;
-
- /* flush if we were reading */
- if (HDATA(instance)->dma_dir) {
- DMA(instance)->FLUSH = 1;
- while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
- ;
- }
-
- /* clear a possible interrupt */
- DMA(instance)->CINT = 1;
-
- /* stop DMA */
- DMA(instance)->SP_DMA = 1;
-
- /* restore the CONTROL bits (minus the direction flag) */
- DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
-
- /* copy from a bounce buffer, if necessary */
- if (status && HDATA(instance)->dma_bounce_buffer) {
- if( HDATA(instance)->dma_dir )
- memcpy (SCpnt->SCp.ptr,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- }
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+ a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
+
+ /* disable SCSI interrupts */
+ unsigned short cntr = CNTR_PDMD;
+
+ if (!hdata->dma_dir)
+ cntr |= CNTR_DDIR;
+
+ /* disable SCSI interrupts */
+ regs->CNTR = cntr;
+
+ /* flush if we were reading */
+ if (hdata->dma_dir) {
+ regs->FLUSH = 1;
+ while (!(regs->ISTR & ISTR_FE_FLG))
+ ;
+ }
+
+ /* clear a possible interrupt */
+ regs->CINT = 1;
+
+ /* stop DMA */
+ regs->SP_DMA = 1;
+
+ /* restore the CONTROL bits (minus the direction flag) */
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && hdata->dma_bounce_buffer) {
+ if (hdata->dma_dir)
+ memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+ kfree(hdata->dma_bounce_buffer);
+ hdata->dma_bounce_buffer = NULL;
+ hdata->dma_bounce_len = 0;
+ }
}
static int __init a2091_detect(struct scsi_host_template *tpnt)
{
- static unsigned char called = 0;
- struct Scsi_Host *instance;
- unsigned long address;
- struct zorro_dev *z = NULL;
- wd33c93_regs regs;
- int num_a2091 = 0;
-
- if (!MACH_IS_AMIGA || called)
- return 0;
- called = 1;
-
- tpnt->proc_name = "A2091";
- tpnt->proc_info = &wd33c93_proc_info;
-
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
- z->id != ZORRO_PROD_CBM_A590_A2091_2)
- continue;
- address = z->resource.start;
- if (!request_mem_region(address, 256, "wd33c93"))
- continue;
-
- instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
- if (instance == NULL)
- goto release;
- instance->base = ZTWO_VADDR(address);
- instance->irq = IRQ_AMIGA_PORTS;
- instance->unique_id = z->slotaddr;
- DMA(instance)->DAWR = DAWR_A2091;
- regs.SASR = &(DMA(instance)->SASR);
- regs.SCMD = &(DMA(instance)->SCMD);
- HDATA(instance)->no_sync = 0xff;
- HDATA(instance)->fast = 0;
- HDATA(instance)->dma_mode = CTRL_DMA;
- wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
- if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
- instance))
- goto unregister;
- DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
- num_a2091++;
- continue;
+ static unsigned char called = 0;
+ struct Scsi_Host *instance;
+ unsigned long address;
+ struct zorro_dev *z = NULL;
+ wd33c93_regs wdregs;
+ a2091_scsiregs *regs;
+ struct WD33C93_hostdata *hdata;
+ int num_a2091 = 0;
+
+ if (!MACH_IS_AMIGA || called)
+ return 0;
+ called = 1;
+
+ tpnt->proc_name = "A2091";
+ tpnt->proc_info = &wd33c93_proc_info;
+
+ while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
+ if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
+ z->id != ZORRO_PROD_CBM_A590_A2091_2)
+ continue;
+ address = z->resource.start;
+ if (!request_mem_region(address, 256, "wd33c93"))
+ continue;
+
+ instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
+ if (instance == NULL)
+ goto release;
+ instance->base = ZTWO_VADDR(address);
+ instance->irq = IRQ_AMIGA_PORTS;
+ instance->unique_id = z->slotaddr;
+ regs = (a2091_scsiregs *)(instance->base);
+ regs->DAWR = DAWR_A2091;
+ wdregs.SASR = &regs->SASR;
+ wdregs.SCMD = &regs->SCMD;
+ hdata = shost_priv(instance);
+ hdata->no_sync = 0xff;
+ hdata->fast = 0;
+ hdata->dma_mode = CTRL_DMA;
+ wd33c93_init(instance, wdregs, dma_setup, dma_stop,
+ WD33C93_FS_8_10);
+ if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
+ "A2091 SCSI", instance))
+ goto unregister;
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+ num_a2091++;
+ continue;
unregister:
- scsi_unregister(instance);
- wd33c93_release();
+ scsi_unregister(instance);
release:
- release_mem_region(address, 256);
- }
+ release_mem_region(address, 256);
+ }
- return num_a2091;
+ return num_a2091;
}
static int a2091_bus_reset(struct scsi_cmnd *cmd)
@@ -239,10 +243,11 @@ static struct scsi_host_template driver_template = {
static int a2091_release(struct Scsi_Host *instance)
{
#ifdef MODULE
- DMA(instance)->CNTR = 0;
+ a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
+
+ regs->CNTR = 0;
release_mem_region(ZTWO_PADDR(instance->base), 256);
free_irq(IRQ_AMIGA_PORTS, instance);
- wd33c93_release();
#endif
return 1;
}
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 252528f..1c3daa1 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -12,38 +12,38 @@
#include <linux/types.h>
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
/*
* if the transfer address ANDed with this results in a non-zero
* result, then we can't use DMA.
*/
-#define A2091_XFER_MASK (0xff000001)
+#define A2091_XFER_MASK (0xff000001)
typedef struct {
- unsigned char pad1[64];
- volatile unsigned short ISTR;
- volatile unsigned short CNTR;
- unsigned char pad2[60];
- volatile unsigned int WTC;
- volatile unsigned long ACR;
- unsigned char pad3[6];
- volatile unsigned short DAWR;
- unsigned char pad4;
- volatile unsigned char SASR;
- unsigned char pad5;
- volatile unsigned char SCMD;
- unsigned char pad6[76];
- volatile unsigned short ST_DMA;
- volatile unsigned short SP_DMA;
- volatile unsigned short CINT;
- unsigned char pad7[2];
- volatile unsigned short FLUSH;
+ unsigned char pad1[64];
+ volatile unsigned short ISTR;
+ volatile unsigned short CNTR;
+ unsigned char pad2[60];
+ volatile unsigned int WTC;
+ volatile unsigned long ACR;
+ unsigned char pad3[6];
+ volatile unsigned short DAWR;
+ unsigned char pad4;
+ volatile unsigned char SASR;
+ unsigned char pad5;
+ volatile unsigned char SCMD;
+ unsigned char pad6[76];
+ volatile unsigned short ST_DMA;
+ volatile unsigned short SP_DMA;
+ volatile unsigned short CINT;
+ unsigned char pad7[2];
+ volatile unsigned short FLUSH;
} a2091_scsiregs;
#define DAWR_A2091 (3)
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index c35fc55..bc6eb69 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -19,26 +19,25 @@
#include "wd33c93.h"
#include "a3000.h"
-#include<linux/stat.h>
+#include <linux/stat.h>
-#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base))
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
+
+#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base))
static struct Scsi_Host *a3000_host = NULL;
static int a3000_release(struct Scsi_Host *instance);
-static irqreturn_t a3000_intr (int irq, void *dummy)
+static irqreturn_t a3000_intr(int irq, void *dummy)
{
unsigned long flags;
unsigned int status = DMA(a3000_host)->ISTR;
if (!(status & ISTR_INT_P))
return IRQ_NONE;
- if (status & ISTR_INTS)
- {
+ if (status & ISTR_INTS) {
spin_lock_irqsave(a3000_host->host_lock, flags);
- wd33c93_intr (a3000_host);
+ wd33c93_intr(a3000_host);
spin_unlock_irqrestore(a3000_host->host_lock, flags);
return IRQ_HANDLED;
}
@@ -48,162 +47,165 @@ static irqreturn_t a3000_intr (int irq, void *dummy)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
-
- /*
- * if the physical address has the wrong alignment, or if
- * physical address is bad, or if it is a write and at the
- * end of a physical memory chunk, then allocate a bounce
- * buffer
- */
- if (addr & A3000_XFER_MASK)
- {
- HDATA(a3000_host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
- & ~0x1ff;
- HDATA(a3000_host)->dma_bounce_buffer =
- kmalloc (HDATA(a3000_host)->dma_bounce_len, GFP_KERNEL);
-
- /* can't allocate memory; use PIO */
- if (!HDATA(a3000_host)->dma_bounce_buffer) {
- HDATA(a3000_host)->dma_bounce_len = 0;
- return 1;
- }
-
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy (HDATA(a3000_host)->dma_bounce_buffer,
- cmd->SCp.ptr, cmd->SCp.this_residual);
+ struct WD33C93_hostdata *hdata = shost_priv(a3000_host);
+ unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+
+ /*
+ * if the physical address has the wrong alignment, or if
+ * physical address is bad, or if it is a write and at the
+ * end of a physical memory chunk, then allocate a bounce
+ * buffer
+ */
+ if (addr & A3000_XFER_MASK) {
+ hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len,
+ GFP_KERNEL);
+
+ /* can't allocate memory; use PIO */
+ if (!hdata->dma_bounce_buffer) {
+ hdata->dma_bounce_len = 0;
+ return 1;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
+
+ addr = virt_to_bus(hdata->dma_bounce_buffer);
}
- addr = virt_to_bus(HDATA(a3000_host)->dma_bounce_buffer);
- }
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= CNTR_DDIR;
- /* setup dma direction */
- if (!dir_in)
- cntr |= CNTR_DDIR;
+ /* remember direction */
+ hdata->dma_dir = dir_in;
- /* remember direction */
- HDATA(a3000_host)->dma_dir = dir_in;
+ DMA(a3000_host)->CNTR = cntr;
- DMA(a3000_host)->CNTR = cntr;
+ /* setup DMA *physical* address */
+ DMA(a3000_host)->ACR = addr;
- /* setup DMA *physical* address */
- DMA(a3000_host)->ACR = addr;
-
- if (dir_in)
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- else
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
- /* start DMA */
- mb(); /* make sure setup is completed */
- DMA(a3000_host)->ST_DMA = 1;
- mb(); /* make sure DMA has started before next IO */
+ /* start DMA */
+ mb(); /* make sure setup is completed */
+ DMA(a3000_host)->ST_DMA = 1;
+ mb(); /* make sure DMA has started before next IO */
- /* return success */
- return 0;
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
- /* disable SCSI interrupts */
- unsigned short cntr = CNTR_PDMD;
-
- if (!HDATA(instance)->dma_dir)
- cntr |= CNTR_DDIR;
-
- DMA(instance)->CNTR = cntr;
- mb(); /* make sure CNTR is updated before next IO */
-
- /* flush if we were reading */
- if (HDATA(instance)->dma_dir) {
- DMA(instance)->FLUSH = 1;
- mb(); /* don't allow prefetch */
- while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
- barrier();
- mb(); /* no IO until FLUSH is done */
- }
-
- /* clear a possible interrupt */
- /* I think that this CINT is only necessary if you are
- * using the terminal count features. HM 7 Mar 1994
- */
- DMA(instance)->CINT = 1;
-
- /* stop DMA */
- DMA(instance)->SP_DMA = 1;
- mb(); /* make sure DMA is stopped before next IO */
-
- /* restore the CONTROL bits (minus the direction flag) */
- DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
- mb(); /* make sure CNTR is updated before next IO */
-
- /* copy from a bounce buffer, if necessary */
- if (status && HDATA(instance)->dma_bounce_buffer) {
- if (SCpnt) {
- if (HDATA(instance)->dma_dir && SCpnt)
- memcpy (SCpnt->SCp.ptr,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- } else {
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+
+ /* disable SCSI interrupts */
+ unsigned short cntr = CNTR_PDMD;
+
+ if (!hdata->dma_dir)
+ cntr |= CNTR_DDIR;
+
+ DMA(instance)->CNTR = cntr;
+ mb(); /* make sure CNTR is updated before next IO */
+
+ /* flush if we were reading */
+ if (hdata->dma_dir) {
+ DMA(instance)->FLUSH = 1;
+ mb(); /* don't allow prefetch */
+ while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
+ barrier();
+ mb(); /* no IO until FLUSH is done */
+ }
+
+ /* clear a possible interrupt */
+ /* I think that this CINT is only necessary if you are
+ * using the terminal count features. HM 7 Mar 1994
+ */
+ DMA(instance)->CINT = 1;
+
+ /* stop DMA */
+ DMA(instance)->SP_DMA = 1;
+ mb(); /* make sure DMA is stopped before next IO */
+
+ /* restore the CONTROL bits (minus the direction flag) */
+ DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
+ mb(); /* make sure CNTR is updated before next IO */
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && hdata->dma_bounce_buffer) {
+ if (SCpnt) {
+ if (hdata->dma_dir && SCpnt)
+ memcpy(SCpnt->SCp.ptr,
+ hdata->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+ kfree(hdata->dma_bounce_buffer);
+ hdata->dma_bounce_buffer = NULL;
+ hdata->dma_bounce_len = 0;
+ } else {
+ kfree(hdata->dma_bounce_buffer);
+ hdata->dma_bounce_buffer = NULL;
+ hdata->dma_bounce_len = 0;
+ }
}
- }
}
static int __init a3000_detect(struct scsi_host_template *tpnt)
{
- wd33c93_regs regs;
-
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI))
- return 0;
- if (!request_mem_region(0xDD0000, 256, "wd33c93"))
- return 0;
-
- tpnt->proc_name = "A3000";
- tpnt->proc_info = &wd33c93_proc_info;
-
- a3000_host = scsi_register (tpnt, sizeof(struct WD33C93_hostdata));
- if (a3000_host == NULL)
- goto fail_register;
-
- a3000_host->base = ZTWO_VADDR(0xDD0000);
- a3000_host->irq = IRQ_AMIGA_PORTS;
- DMA(a3000_host)->DAWR = DAWR_A3000;
- regs.SASR = &(DMA(a3000_host)->SASR);
- regs.SCMD = &(DMA(a3000_host)->SCMD);
- HDATA(a3000_host)->no_sync = 0xff;
- HDATA(a3000_host)->fast = 0;
- HDATA(a3000_host)->dma_mode = CTRL_DMA;
- wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
- if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
- a3000_intr))
- goto fail_irq;
- DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN;
-
- return 1;
+ wd33c93_regs regs;
+ struct WD33C93_hostdata *hdata;
+
+ if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI))
+ return 0;
+ if (!request_mem_region(0xDD0000, 256, "wd33c93"))
+ return 0;
+
+ tpnt->proc_name = "A3000";
+ tpnt->proc_info = &wd33c93_proc_info;
+
+ a3000_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
+ if (a3000_host == NULL)
+ goto fail_register;
+
+ a3000_host->base = ZTWO_VADDR(0xDD0000);
+ a3000_host->irq = IRQ_AMIGA_PORTS;
+ DMA(a3000_host)->DAWR = DAWR_A3000;
+ regs.SASR = &(DMA(a3000_host)->SASR);
+ regs.SCMD = &(DMA(a3000_host)->SCMD);
+ hdata = shost_priv(a3000_host);
+ hdata->no_sync = 0xff;
+ hdata->fast = 0;
+ hdata->dma_mode = CTRL_DMA;
+ wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
+ if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
+ a3000_intr))
+ goto fail_irq;
+ DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN;
+
+ return 1;
fail_irq:
- wd33c93_release();
- scsi_unregister(a3000_host);
+ scsi_unregister(a3000_host);
fail_register:
- release_mem_region(0xDD0000, 256);
- return 0;
+ release_mem_region(0xDD0000, 256);
+ return 0;
}
static int a3000_bus_reset(struct scsi_cmnd *cmd)
{
/* FIXME perform bus-specific reset */
-
+
/* FIXME 2: kill this entire function, which should
cause mid-layer to call wd33c93_host_reset anyway? */
@@ -237,11 +239,10 @@ static struct scsi_host_template driver_template = {
static int a3000_release(struct Scsi_Host *instance)
{
- wd33c93_release();
- DMA(instance)->CNTR = 0;
- release_mem_region(0xDD0000, 256);
- free_irq(IRQ_AMIGA_PORTS, a3000_intr);
- return 1;
+ DMA(instance)->CNTR = 0;
+ release_mem_region(0xDD0000, 256);
+ free_irq(IRQ_AMIGA_PORTS, a3000_intr);
+ return 1;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index c7afe16..684813e 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -12,40 +12,40 @@
#include <linux/types.h>
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
/*
* if the transfer address ANDed with this results in a non-zero
* result, then we can't use DMA.
*/
-#define A3000_XFER_MASK (0x00000003)
+#define A3000_XFER_MASK (0x00000003)
typedef struct {
- unsigned char pad1[2];
- volatile unsigned short DAWR;
- volatile unsigned int WTC;
- unsigned char pad2[2];
- volatile unsigned short CNTR;
- volatile unsigned long ACR;
- unsigned char pad3[2];
- volatile unsigned short ST_DMA;
- unsigned char pad4[2];
- volatile unsigned short FLUSH;
- unsigned char pad5[2];
- volatile unsigned short CINT;
- unsigned char pad6[2];
- volatile unsigned short ISTR;
- unsigned char pad7[30];
- volatile unsigned short SP_DMA;
- unsigned char pad8;
- volatile unsigned char SASR;
- unsigned char pad9;
- volatile unsigned char SCMD;
+ unsigned char pad1[2];
+ volatile unsigned short DAWR;
+ volatile unsigned int WTC;
+ unsigned char pad2[2];
+ volatile unsigned short CNTR;
+ volatile unsigned long ACR;
+ unsigned char pad3[2];
+ volatile unsigned short ST_DMA;
+ unsigned char pad4[2];
+ volatile unsigned short FLUSH;
+ unsigned char pad5[2];
+ volatile unsigned short CINT;
+ unsigned char pad6[2];
+ volatile unsigned short ISTR;
+ unsigned char pad7[30];
+ volatile unsigned short SP_DMA;
+ unsigned char pad8;
+ volatile unsigned char SASR;
+ unsigned char pad9;
+ volatile unsigned char SCMD;
} a3000_scsiregs;
#define DAWR_A3000 (3)
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 7e26ebc..7df2dd1 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -328,6 +328,16 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
return status;
}
+static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
+{
+ char inq_data;
+ scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data));
+ if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
+ inq_data &= 0xdf;
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
+ }
+}
+
/**
* aac_get_containers - list containers
* @common: adapter to probe
@@ -1598,6 +1608,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
int status;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
+ int cid;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
/*
@@ -1647,6 +1658,22 @@ static int aac_read(struct scsi_cmnd * scsicmd)
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
break;
}
+
+ if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
+ cid = scmd_id(scsicmd);
+ dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 1;
+ }
+
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
if (aac_adapter_bounds(dev,scsicmd,lba))
@@ -1688,6 +1715,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
int status;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
+ int cid;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
/*
@@ -1727,6 +1755,22 @@ static int aac_write(struct scsi_cmnd * scsicmd)
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
fua = scsicmd->cmnd[1] & 0x8;
}
+
+ if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
+ cid = scmd_id(scsicmd);
+ dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 1;
+ }
+
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
if (aac_adapter_bounds(dev,scsicmd,lba))
@@ -2573,6 +2617,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsi_dma_unmap(scsicmd);
+ /* expose physical device if expose_physicald flag is on */
+ if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+ && expose_physicals > 0)
+ aac_expose_phy_device(scsicmd);
+
/*
* First check the fib status
*/
@@ -2678,8 +2727,22 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->cmnd[0],
le32_to_cpu(srbreply->scsi_status));
#endif
- scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
- break;
+ if ((scsicmd->cmnd[0] == ATA_12)
+ || (scsicmd->cmnd[0] == ATA_16)) {
+ if (scsicmd->cmnd[2] & (0x01 << 5)) {
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ }
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ }
}
if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) {
int len;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 619c02d..4dbcc05 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 24702
+# define AAC_DRIVER_BUILD 26400
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -26,6 +26,8 @@
#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256)
+#define AAC_DEBUG_INSTRUMENT_AIF_DELETE
+
/*
* These macros convert from physical channels to virtual channels
*/
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 94d2954..7007914 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -966,6 +966,16 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
device_config_needed =
(((__le32 *)aifcmd->data)[0] ==
cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
+ if (device_config_needed == ADD) {
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ channel,
+ id,
+ lun);
+ if (device) {
+ scsi_remove_device(device);
+ scsi_device_put(device);
+ }
+ }
break;
case AifEnEnclosureManagement:
@@ -1123,6 +1133,9 @@ retry_next:
if (device) {
switch (device_config_needed) {
case DELETE:
+#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
+ scsi_remove_device(device);
+#else
if (scsi_device_online(device)) {
scsi_device_set_state(device, SDEV_OFFLINE);
sdev_printk(KERN_INFO, device,
@@ -1131,6 +1144,7 @@ retry_next:
"array deleted" :
"enclosure services event");
}
+#endif
break;
case ADD:
if (!scsi_device_online(device)) {
@@ -1145,12 +1159,16 @@ retry_next:
case CHANGE:
if ((channel == CONTAINER_CHANNEL)
&& (!dev->fsa_dev[container].valid)) {
+#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
+ scsi_remove_device(device);
+#else
if (!scsi_device_online(device))
break;
scsi_device_set_state(device, SDEV_OFFLINE);
sdev_printk(KERN_INFO, device,
"Device offlined - %s\n",
"array failed");
+#endif
break;
}
scsi_rescan_device(&device->sdev_gendev);
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 5877f29..a4e04c5 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -59,7 +59,8 @@
struct device_attribute *arcmsr_host_attrs[];
-static ssize_t arcmsr_sysfs_iop_message_read(struct kobject *kobj,
+static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
@@ -105,7 +106,8 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct kobject *kobj,
return (allxfer_len);
}
-static ssize_t arcmsr_sysfs_iop_message_write(struct kobject *kobj,
+static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
@@ -153,7 +155,8 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct kobject *kobj,
}
}
-static ssize_t arcmsr_sysfs_iop_message_clear(struct kobject *kobj,
+static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
index 961fe43..53a616f 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
@@ -117,35 +117,6 @@ bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
}
/**
- * Get SG element for the I/O request given the SG element index
- */
-static inline union bfi_addr_u
-bfa_cb_ioim_get_sgaddr(struct bfad_ioim_s *dio, int sgeid)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
- struct scatterlist *sge;
- u64 addr;
-
- sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
- addr = (u64) sg_dma_address(sge);
-
- return *((union bfi_addr_u *) &addr);
-}
-
-static inline u32
-bfa_cb_ioim_get_sglen(struct bfad_ioim_s *dio, int sgeid)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
- struct scatterlist *sge;
- u32 len;
-
- sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
- len = sg_dma_len(sge);
-
- return len;
-}
-
-/**
* Get Command Reference Number for the I/O request. 0 if none.
*/
static inline u8
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
index 5b107ab..687f3d6 100644
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -731,6 +731,9 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
static struct fcp_cmnd_s cmnd_z0 = { 0 };
struct bfi_sge_s *sge;
u32 pgdlen = 0;
+ u64 addr;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
/**
* check for room in queue to send request now
@@ -754,8 +757,10 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
*/
sge = &m->sges[0];
if (ioim->nsges) {
- sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, 0);
- pgdlen = bfa_cb_ioim_get_sglen(ioim->dio, 0);
+ sg = (struct scatterlist *)scsi_sglist(cmnd);
+ addr = bfa_os_sgaddr(sg_dma_address(sg));
+ sge->sga = *(union bfi_addr_u *) &addr;
+ pgdlen = sg_dma_len(sg);
sge->sg_len = pgdlen;
sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
@@ -868,10 +873,16 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
struct bfi_sge_s *sge;
struct bfa_sgpg_s *sgpg;
u32 pgcumsz;
+ u64 addr;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
sgeid = BFI_SGE_INLINE;
ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
+ sg = scsi_sglist(cmnd);
+ sg = sg_next(sg);
+
do {
sge = sgpg->sgpg->sges;
nsges = ioim->nsges - sgeid;
@@ -879,9 +890,10 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
nsges = BFI_SGPG_DATA_SGES;
pgcumsz = 0;
- for (i = 0; i < nsges; i++, sge++, sgeid++) {
- sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, sgeid);
- sge->sg_len = bfa_cb_ioim_get_sglen(ioim->dio, sgeid);
+ for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
+ addr = bfa_os_sgaddr(sg_dma_address(sg));
+ sge->sga = *(union bfi_addr_u *) &addr;
+ sge->sg_len = sg_dma_len(sg);
pgcumsz += sge->sg_len;
/**
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
index 10a89f7..bd1cd3e 100644
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ b/drivers/scsi/bfa/bfa_os_inc.h
@@ -50,6 +50,10 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_transport.h>
+#ifdef __BIG_ENDIAN
+#define __BIGENDIAN
+#endif
+
#define BFA_ERR KERN_ERR
#define BFA_WARNING KERN_WARNING
#define BFA_NOTICE KERN_NOTICE
@@ -123,6 +127,15 @@ int bfa_os_MWB(void *);
(((_x) & 0x00ff0000) >> 8) | \
(((_x) & 0xff000000) >> 24))
+#define bfa_os_swap_sgaddr(_x) ((u64)( \
+ (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
+ (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
+ (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \
+ (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \
+ (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \
+ (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \
+ (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \
+ (((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
#ifndef __BIGENDIAN
#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
@@ -133,6 +146,7 @@ int bfa_os_MWB(void *);
#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
#define bfa_os_wtole(_x) (_x)
+#define bfa_os_sgaddr(_x) (_x)
#else
@@ -141,6 +155,7 @@ int bfa_os_MWB(void *);
#define bfa_os_hton3b(_x) (_x)
#define bfa_os_htonll(_x) (_x)
#define bfa_os_wtole(_x) bfa_os_swap32(_x)
+#define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x)
#endif
@@ -161,12 +176,12 @@ int bfa_os_MWB(void *);
#define bfa_os_addr_t char __iomem *
#define bfa_os_panic()
-#define bfa_os_reg_read(_raddr) bfa_os_wtole(readl(_raddr))
-#define bfa_os_reg_write(_raddr, _val) writel(bfa_os_wtole((_val)), (_raddr))
+#define bfa_os_reg_read(_raddr) readl(_raddr)
+#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
#define bfa_os_mem_read(_raddr, _off) \
- bfa_os_ntohl(readl(((_raddr) + (_off))))
+ bfa_os_swap32(readl(((_raddr) + (_off))))
#define bfa_os_mem_write(_raddr, _off, _val) \
- writel(bfa_os_htonl((_val)), ((_raddr) + (_off)))
+ writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
#define BFA_TRC_TS(_trcm) \
({ \
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 13f5feb..d4fc428 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -33,7 +33,7 @@
#include <fcb/bfa_fcb.h>
BFA_TRC_FILE(LDRV, BFAD);
-static DEFINE_MUTEX(bfad_mutex);
+DEFINE_MUTEX(bfad_mutex);
LIST_HEAD(bfad_list);
static int bfad_inst;
int bfad_supported_fc4s;
@@ -299,8 +299,6 @@ bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv)
complete(vport_drv->comp_del);
return;
}
-
- kfree(vport_drv);
}
/**
@@ -483,7 +481,7 @@ ext:
*/
bfa_status_t
bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
- struct bfa_port_cfg_s *port_cfg)
+ struct bfa_port_cfg_s *port_cfg, struct device *dev)
{
struct bfad_vport_s *vport;
int rc = BFA_STATUS_OK;
@@ -506,7 +504,8 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
goto ext_free_vport;
if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) {
- rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port);
+ rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
+ dev);
if (rc != BFA_STATUS_OK)
goto ext_free_fcs_vport;
}
@@ -591,7 +590,6 @@ bfad_init_timer(struct bfad_s *bfad)
int
bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
{
- unsigned long bar0_len;
int rc = -ENODEV;
if (pci_enable_device(pdev)) {
@@ -611,9 +609,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
goto out_release_region;
}
- bfad->pci_bar0_map = pci_resource_start(pdev, 0);
- bar0_len = pci_resource_len(pdev, 0);
- bfad->pci_bar0_kva = ioremap(bfad->pci_bar0_map, bar0_len);
+ bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
if (bfad->pci_bar0_kva == NULL) {
BFA_PRINTF(BFA_ERR, "Fail to map bar0\n");
@@ -646,11 +642,7 @@ out:
void
bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
{
-#if defined(__ia64__)
pci_iounmap(pdev, bfad->pci_bar0_kva);
-#else
- iounmap(bfad->pci_bar0_kva);
-#endif
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -848,7 +840,8 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role)
goto out;
}
- rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port);
+ rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
+ &bfad->pcidev->dev);
if (rc != BFA_STATUS_OK)
goto out;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 6a2efdd..e477bfb 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -364,6 +364,152 @@ bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
}
+static int
+bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ char *vname = fc_vport->symbolic_name;
+ struct Scsi_Host *shost = fc_vport->shost;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfa_port_cfg_s port_cfg;
+ int status = 0, rc;
+ unsigned long flags;
+
+ memset(&port_cfg, 0, sizeof(port_cfg));
+
+ port_cfg.pwwn = wwn_to_u64((u8 *) &fc_vport->port_name);
+ port_cfg.nwwn = wwn_to_u64((u8 *) &fc_vport->node_name);
+
+ if (strlen(vname) > 0)
+ strcpy((char *)&port_cfg.sym_name, vname);
+
+ port_cfg.roles = BFA_PORT_ROLE_FCP_IM;
+ rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
+
+ if (rc == BFA_STATUS_OK) {
+ struct bfad_vport_s *vport;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0,
+ port_cfg.pwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return VPCERR_BAD_WWN;
+ }
+
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+ if (disable) {
+ bfa_fcs_vport_stop(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ vport = fcs_vport->vport_drv;
+ vshost = vport->drv_port.im_port->shost;
+ fc_host_node_name(vshost) = wwn_to_u64((u8 *) &port_cfg.nwwn);
+ fc_host_port_name(vshost) = wwn_to_u64((u8 *) &port_cfg.pwwn);
+ fc_vport->dd_data = vport;
+ vport->drv_port.im_port->fc_vport = fc_vport;
+
+ } else if (rc == BFA_STATUS_INVALID_WWN)
+ return VPCERR_BAD_WWN;
+ else if (rc == BFA_STATUS_VPORT_EXISTS)
+ return VPCERR_BAD_WWN;
+ else if (rc == BFA_STATUS_VPORT_MAX)
+ return VPCERR_NO_FABRIC_SUPP;
+ else if (rc == BFA_STATUS_VPORT_WWN_BP)
+ return VPCERR_BAD_WWN;
+ else
+ return FC_VPORT_FAILED;
+
+ return status;
+}
+
+static int
+bfad_im_vport_delete(struct fc_vport *fc_vport)
+{
+ struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) vport->drv_port.im_port;
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_port_s *port;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+ wwn_t pwwn;
+ int rc;
+ unsigned long flags;
+ struct completion fcomp;
+
+ if (im_port->flags & BFAD_PORT_DELETE)
+ goto free_scsi_host;
+
+ port = im_port->port;
+
+ vshost = vport->drv_port.im_port->shost;
+ pwwn = wwn_to_u64((u8 *) &fc_host_port_name(vshost));
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (fcs_vport == NULL)
+ return VPCERR_BAD_WWN;
+
+ vport->drv_port.flags |= BFAD_PORT_DELETE;
+
+ vport->comp_del = &fcomp;
+ init_completion(vport->comp_del);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ rc = bfa_fcs_vport_delete(&vport->fcs_vport);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ wait_for_completion(vport->comp_del);
+
+free_scsi_host:
+ bfad_os_scsi_host_free(bfad, im_port);
+
+ kfree(vport);
+
+ return 0;
+}
+
+static int
+bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ struct bfad_vport_s *vport;
+ struct bfad_s *bfad;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+ wwn_t pwwn;
+ unsigned long flags;
+
+ vport = (struct bfad_vport_s *)fc_vport->dd_data;
+ bfad = vport->drv_port.bfad;
+ vshost = vport->drv_port.im_port->shost;
+ pwwn = wwn_to_u64((u8 *) &fc_vport->port_name);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (fcs_vport == NULL)
+ return VPCERR_BAD_WWN;
+
+ if (disable) {
+ bfa_fcs_vport_stop(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ } else {
+ bfa_fcs_vport_start(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+ }
+
+ return 0;
+}
+
struct fc_function_template bfad_im_fc_function_template = {
/* Target dynamic attributes */
@@ -413,6 +559,61 @@ struct fc_function_template bfad_im_fc_function_template = {
.show_rport_dev_loss_tmo = 1,
.get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
+
+ .vport_create = bfad_im_vport_create,
+ .vport_delete = bfad_im_vport_delete,
+ .vport_disable = bfad_im_vport_disable,
+};
+
+struct fc_function_template bfad_im_vport_fc_function_template = {
+
+ /* Target dynamic attributes */
+ .get_starget_port_id = bfad_im_get_starget_port_id,
+ .show_starget_port_id = 1,
+ .get_starget_node_name = bfad_im_get_starget_node_name,
+ .show_starget_node_name = 1,
+ .get_starget_port_name = bfad_im_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ /* Host dynamic attribute */
+ .get_host_port_id = bfad_im_get_host_port_id,
+ .show_host_port_id = 1,
+
+ /* Host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* More host dynamic attributes */
+ .show_host_port_type = 1,
+ .get_host_port_type = bfad_im_get_host_port_type,
+ .show_host_port_state = 1,
+ .get_host_port_state = bfad_im_get_host_port_state,
+ .show_host_active_fc4s = 1,
+ .get_host_active_fc4s = bfad_im_get_host_active_fc4s,
+ .show_host_speed = 1,
+ .get_host_speed = bfad_im_get_host_speed,
+ .show_host_fabric_name = 1,
+ .get_host_fabric_name = bfad_im_get_host_fabric_name,
+
+ .show_host_symbolic_name = 1,
+
+ /* Statistics */
+ .get_fc_host_stats = bfad_im_get_stats,
+ .reset_fc_host_stats = bfad_im_reset_stats,
+
+ /* Allocation length for host specific data */
+ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
+
+ /* Remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+ .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
+ .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
};
/**
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 107848c..6c920c1 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -162,7 +162,6 @@ struct bfad_s {
const char *pci_name;
struct bfa_pcidev_s hal_pcidev;
struct bfa_ioc_pci_attr_s pci_attr;
- unsigned long pci_bar0_map;
void __iomem *pci_bar0_kva;
struct completion comp;
struct completion suspend;
@@ -254,7 +253,7 @@ do { \
bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
- struct bfa_port_cfg_s *port_cfg);
+ struct bfa_port_cfg_s *port_cfg, struct device *dev);
bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
struct bfa_port_cfg_s *port_cfg);
bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
@@ -294,5 +293,6 @@ extern struct list_head bfad_list;
extern int bfa_lun_queue_depth;
extern int bfad_supported_fc4s;
extern int bfa_linkup_delay;
+extern struct mutex bfad_mutex;
#endif /* __BFAD_DRV_H__ */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 78f42aa..5b7cf53 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -30,6 +30,7 @@ BFA_TRC_FILE(LDRV, IM);
DEFINE_IDR(bfad_im_port_index);
struct scsi_transport_template *bfad_im_scsi_transport_template;
+struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
static void bfad_im_itnim_work_handler(struct work_struct *work);
static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
void (*done)(struct scsi_cmnd *));
@@ -252,7 +253,6 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
struct bfa_itnim_s *bfa_itnim;
bfa_status_t rc = BFA_STATUS_OK;
- bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
if (!tskim) {
BFA_DEV_PRINTF(bfad, BFA_ERR,
@@ -513,11 +513,14 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
* Allocate a Scsi_Host for a port.
*/
int
-bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
+bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
+ struct device *dev)
{
int error = 1;
+ mutex_lock(&bfad_mutex);
if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
+ mutex_unlock(&bfad_mutex);
printk(KERN_WARNING "idr_pre_get failure\n");
goto out;
}
@@ -525,10 +528,13 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
error = idr_get_new(&bfad_im_port_index, im_port,
&im_port->idr_id);
if (error) {
+ mutex_unlock(&bfad_mutex);
printk(KERN_WARNING "idr_get_new failure\n");
goto out;
}
+ mutex_unlock(&bfad_mutex);
+
im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
if (!im_port->shost) {
error = 1;
@@ -542,12 +548,15 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
im_port->shost->max_lun = MAX_FCP_LUN;
im_port->shost->max_cmd_len = 16;
im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
- im_port->shost->transportt = bfad_im_scsi_transport_template;
+ if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
+ im_port->shost->transportt = bfad_im_scsi_transport_template;
+ else
+ im_port->shost->transportt =
+ bfad_im_scsi_vport_transport_template;
- error = bfad_os_scsi_add_host(im_port->shost, im_port, bfad);
+ error = scsi_add_host(im_port->shost, dev);
if (error) {
- printk(KERN_WARNING "bfad_os_scsi_add_host failure %d\n",
- error);
+ printk(KERN_WARNING "scsi_add_host failure %d\n", error);
goto out_fc_rel;
}
@@ -559,7 +568,9 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
out_fc_rel:
scsi_host_put(im_port->shost);
out_free_idr:
+ mutex_lock(&bfad_mutex);
idr_remove(&bfad_im_port_index, im_port->idr_id);
+ mutex_unlock(&bfad_mutex);
out:
return error;
}
@@ -567,8 +578,6 @@ out:
void
bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
{
- unsigned long flags;
-
bfa_trc(bfad, bfad->inst_no);
bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE,
im_port->shost->host_no);
@@ -578,9 +587,9 @@ bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
scsi_remove_host(im_port->shost);
scsi_host_put(im_port->shost);
- spin_lock_irqsave(&bfad->bfad_lock, flags);
+ mutex_lock(&bfad_mutex);
idr_remove(&bfad_im_port_index, im_port->idr_id);
- spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ mutex_unlock(&bfad_mutex);
}
static void
@@ -589,9 +598,11 @@ bfad_im_port_delete_handler(struct work_struct *work)
struct bfad_im_port_s *im_port =
container_of(work, struct bfad_im_port_s, port_delete_work);
- bfad_im_scsi_host_free(im_port->bfad, im_port);
- bfad_im_port_clean(im_port);
- kfree(im_port);
+ if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
+ im_port->flags |= BFAD_PORT_DELETE;
+ fc_vport_terminate(im_port->fc_vport);
+ }
+
}
bfa_status_t
@@ -690,23 +701,6 @@ bfad_im_probe_undo(struct bfad_s *bfad)
}
}
-
-
-
-int
-bfad_os_scsi_add_host(struct Scsi_Host *shost, struct bfad_im_port_s *im_port,
- struct bfad_s *bfad)
-{
- struct device *dev;
-
- if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
- dev = &bfad->pcidev->dev;
- else
- dev = &bfad->pport.im_port->shost->shost_gendev;
-
- return scsi_add_host(shost, dev);
-}
-
struct Scsi_Host *
bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
{
@@ -725,7 +719,8 @@ bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
void
bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
{
- flush_workqueue(bfad->im->drv_workq);
+ if (!(im_port->flags & BFAD_PORT_DELETE))
+ flush_workqueue(bfad->im->drv_workq);
bfad_im_scsi_host_free(im_port->bfad, im_port);
bfad_im_port_clean(im_port);
kfree(im_port);
@@ -830,6 +825,13 @@ bfad_im_module_init(void)
if (!bfad_im_scsi_transport_template)
return BFA_STATUS_ENOMEM;
+ bfad_im_scsi_vport_transport_template =
+ fc_attach_transport(&bfad_im_vport_fc_function_template);
+ if (!bfad_im_scsi_vport_transport_template) {
+ fc_release_transport(bfad_im_scsi_transport_template);
+ return BFA_STATUS_ENOMEM;
+ }
+
return BFA_STATUS_OK;
}
@@ -838,6 +840,8 @@ bfad_im_module_exit(void)
{
if (bfad_im_scsi_transport_template)
fc_release_transport(bfad_im_scsi_transport_template);
+ if (bfad_im_scsi_vport_transport_template)
+ fc_release_transport(bfad_im_scsi_vport_transport_template);
}
void
@@ -938,6 +942,7 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
fc_host_port_name(host) =
bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port)));
+ fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
fc_host_supported_classes(host) = FC_COS_CLASS3;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 85ab2da..973cab4 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -34,7 +34,7 @@ void bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port);
void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port);
void bfad_im_port_clean(struct bfad_im_port_s *im_port);
int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
- struct bfad_im_port_s *im_port);
+ struct bfad_im_port_s *im_port, struct device *dev);
void bfad_im_scsi_host_free(struct bfad_s *bfad,
struct bfad_im_port_s *im_port);
@@ -64,9 +64,11 @@ struct bfad_im_port_s {
struct work_struct port_delete_work;
int idr_id;
u16 cur_scsi_id;
+ u16 flags;
struct list_head binding_list;
struct Scsi_Host *shost;
struct list_head itnim_mapped_list;
+ struct fc_vport *fc_vport;
};
enum bfad_itnim_state {
@@ -140,6 +142,8 @@ void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
extern struct scsi_host_template bfad_im_scsi_host_template;
extern struct scsi_host_template bfad_im_vport_template;
extern struct fc_function_template bfad_im_fc_function_template;
+extern struct fc_function_template bfad_im_vport_fc_function_template;
extern struct scsi_transport_template *bfad_im_scsi_transport_template;
+extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 18352ff..3a66ca2 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -347,6 +347,7 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ login_wqe->flags = ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN;
login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
login_wqe->resp_bd_list_addr_hi =
@@ -356,7 +357,6 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
(bnx2i_conn->gen_pdu.resp_buf_size <<
ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
login_wqe->resp_buffer = dword;
- login_wqe->flags = 0;
login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
login_wqe->bd_list_addr_hi =
(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 5d9296c..af6a00a 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.1.0"
-#define DRV_MODULE_RELDATE "Dec 06, 2009"
+#define DRV_MODULE_VERSION "2.1.1"
+#define DRV_MODULE_RELDATE "Mar 24, 2010"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -26,7 +26,8 @@ static char version[] __devinitdata =
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
+ " iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -289,6 +290,7 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
int rc;
mutex_lock(&bnx2i_dev_lock);
+ hba->cnic = cnic;
rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
if (!rc) {
hba->age++;
@@ -335,8 +337,7 @@ void bnx2i_ulp_init(struct cnic_dev *dev)
if (bnx2i_init_one(hba, dev)) {
printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
bnx2i_free_hba(hba);
- } else
- hba->cnic = dev;
+ }
}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
index d0ab23a..685af36 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -104,8 +104,10 @@ static int __init cxgb3i_init_module(void)
return err;
err = cxgb3i_pdu_init();
- if (err < 0)
+ if (err < 0) {
+ cxgb3i_iscsi_cleanup();
return err;
+ }
cxgb3_register_client(&t3c_client);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index e8a0bc3..6faf472 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -285,13 +285,11 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
switch (cmd) {
case MODE_SELECT:
len = sizeof(short_trespass);
- rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
rq->cmd[4] = len;
break;
case MODE_SELECT_10:
len = sizeof(long_trespass);
- rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
rq->cmd[8] = len;
break;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f01b9b4..9276121 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -74,6 +74,7 @@ static int fcoe_rcv(struct sk_buff *, struct net_device *,
static int fcoe_percpu_receive_thread(void *);
static void fcoe_clean_pending_queue(struct fc_lport *);
static void fcoe_percpu_clean(struct fc_lport *);
+static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
@@ -146,6 +147,7 @@ static int fcoe_vport_destroy(struct fc_vport *);
static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
+static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -153,6 +155,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.ddp_done = fcoe_ddp_done,
.elsct_send = fcoe_elsct_send,
.get_lesb = fcoe_get_lesb,
+ .lport_set_port_id = fcoe_set_port_id,
};
struct fc_function_template fcoe_transport_function = {
@@ -309,10 +312,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
* for multiple unicast MACs.
*/
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
- dev_unicast_add(netdev, flogi_maddr);
+ dev_uc_add(netdev, flogi_maddr);
if (fip->spma)
- dev_unicast_add(netdev, fip->ctl_src_addr);
- dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+ dev_uc_add(netdev, fip->ctl_src_addr);
+ dev_mc_add(netdev, FIP_ALL_ENODE_MACS);
/*
* setup the receive function from ethernet driver
@@ -395,10 +398,10 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
/* Delete secondary MAC addresses */
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
- dev_unicast_delete(netdev, flogi_maddr);
+ dev_uc_del(netdev, flogi_maddr);
if (fip->spma)
- dev_unicast_delete(netdev, fip->ctl_src_addr);
- dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+ dev_uc_del(netdev, fip->ctl_src_addr);
+ dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
/* Tell the LLD we are done w/ FCoE */
ops = netdev->netdev_ops;
@@ -491,9 +494,9 @@ static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
- dev_unicast_delete(fcoe->netdev, port->data_src_addr);
+ dev_uc_del(fcoe->netdev, port->data_src_addr);
if (!is_zero_ether_addr(addr))
- dev_unicast_add(fcoe->netdev, addr);
+ dev_uc_add(fcoe->netdev, addr);
memcpy(port->data_src_addr, addr, ETH_ALEN);
rtnl_unlock();
}
@@ -629,6 +632,8 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
port->fcoe_pending_queue_active = 0;
setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
+ fcoe_link_speed_update(lport);
+
if (!lport->vport) {
/*
* Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN:
@@ -653,15 +658,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
/**
* fcoe_shost_config() - Set up the SCSI host associated with a local port
* @lport: The local port
- * @shost: The SCSI host to associate with the local port
* @dev: The device associated with the SCSI host
*
* Must be called after fcoe_lport_config() and fcoe_netdev_config()
*
* Returns: 0 for success
*/
-static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
- struct device *dev)
+static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
{
int rc = 0;
@@ -669,6 +672,8 @@ static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
lport->host->max_lun = FCOE_MAX_LUN;
lport->host->max_id = FCOE_MAX_FCP_TARGET;
lport->host->max_channel = 0;
+ lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
+
if (lport->vport)
lport->host->transportt = fcoe_vport_transport_template;
else
@@ -796,6 +801,12 @@ skip_oem:
/**
* fcoe_if_destroy() - Tear down a SW FCoE instance
* @lport: The local port to be destroyed
+ *
+ * Locking: must be called with the RTNL mutex held and RTNL mutex
+ * needed to be dropped by this function since not dropping RTNL
+ * would cause circular locking warning on synchronous fip worker
+ * cancelling thru fcoe_interface_put invoked by this function.
+ *
*/
static void fcoe_if_destroy(struct fc_lport *lport)
{
@@ -818,9 +829,8 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Free existing transmit skbs */
fcoe_clean_pending_queue(lport);
- rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
- dev_unicast_delete(netdev, port->data_src_addr);
+ dev_uc_del(netdev, port->data_src_addr);
rtnl_unlock();
/* receives may not be stopped until after this */
@@ -841,6 +851,7 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Release the Scsi_Host */
scsi_host_put(lport->host);
+ module_put(THIS_MODULE);
}
/**
@@ -897,7 +908,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct net_device *netdev = fcoe->netdev;
struct fc_lport *lport = NULL;
struct fcoe_port *port;
- struct Scsi_Host *shost;
int rc;
/*
* parent is only a vport if npiv is 1,
@@ -919,7 +929,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
rc = -ENOMEM;
goto out;
}
- shost = lport->host;
port = lport_priv(lport);
port->lport = lport;
port->fcoe = fcoe;
@@ -934,7 +943,8 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
if (npiv) {
- FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n",
+ FCOE_NETDEV_DBG(netdev, "Setting vport names, "
+ "%16.16llx %16.16llx\n",
vport->node_name, vport->port_name);
fc_set_wwnn(lport, vport->node_name);
fc_set_wwpn(lport, vport->port_name);
@@ -949,7 +959,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
/* configure lport scsi host properties */
- rc = fcoe_shost_config(lport, shost, parent);
+ rc = fcoe_shost_config(lport, parent);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
"interface\n");
@@ -1073,7 +1083,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
struct sk_buff *skb;
#ifdef CONFIG_SMP
struct fcoe_percpu_s *p0;
- unsigned targ_cpu = smp_processor_id();
+ unsigned targ_cpu = get_cpu();
#endif /* CONFIG_SMP */
FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
@@ -1129,6 +1139,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
kfree_skb(skb);
spin_unlock_bh(&p->fcoe_rx_list.lock);
}
+ put_cpu();
#else
/*
* This a non-SMP scenario where the singular Rx thread is
@@ -1297,8 +1308,8 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
return 0;
err:
- fc_lport_get_stats(lport)->ErrorFrames++;
-
+ per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++;
+ put_cpu();
err2:
kfree_skb(skb);
return -1;
@@ -1444,7 +1455,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
return 0;
}
- if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
+ if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
return 0;
@@ -1527,9 +1538,10 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_shinfo(skb)->gso_size = 0;
}
/* update tx stats: regardless if LLD fails */
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->TxFrames++;
stats->TxWords += wlen;
+ put_cpu();
/* send down to lld */
fr_dev(fp) = lport;
@@ -1563,7 +1575,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
- u8 *mac = NULL;
struct fcoe_port *port;
struct fcoe_hdr *hp;
@@ -1583,13 +1594,9 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb_end_pointer(skb), skb->csum,
skb->dev ? skb->dev->name : "<NULL>");
- /*
- * Save source MAC address before discarding header.
- */
port = lport_priv(lport);
if (skb_is_nonlinear(skb))
skb_linearize(skb); /* not ideal */
- mac = eth_hdr(skb)->h_source;
/*
* Frame length checks and setting up the header pointers
@@ -1598,7 +1605,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
hp = (struct fcoe_hdr *) skb_network_header(skb);
fh = (struct fc_frame_header *) skb_transport_header(skb);
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
printk(KERN_WARNING "fcoe: FCoE version "
@@ -1607,9 +1614,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
"initiator supports version "
"%x\n", FC_FCOE_DECAPS_VER(hp),
FC_FCOE_VER);
- stats->ErrorFrames++;
- kfree_skb(skb);
- return;
+ goto drop;
}
skb_pull(skb, sizeof(struct fcoe_hdr));
@@ -1624,16 +1629,12 @@ static void fcoe_recv_frame(struct sk_buff *skb)
fr_sof(fp) = hp->fcoe_sof;
/* Copy out the CRC and EOF trailer for access */
- if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
- kfree_skb(skb);
- return;
- }
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
+ goto drop;
fr_eof(fp) = crc_eof.fcoe_eof;
fr_crc(fp) = crc_eof.fcoe_crc32;
- if (pskb_trim(skb, fr_len)) {
- kfree_skb(skb);
- return;
- }
+ if (pskb_trim(skb, fr_len))
+ goto drop;
/*
* We only check CRC if no offload is available and if it is
@@ -1647,25 +1648,27 @@ static void fcoe_recv_frame(struct sk_buff *skb)
fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
fh = fc_frame_header_get(fp);
- if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
- fh->fh_type == FC_TYPE_FCP) {
- fc_exch_recv(lport, fp);
- return;
- }
- if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+ if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA ||
+ fh->fh_type != FC_TYPE_FCP) &&
+ (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
if (le32_to_cpu(fr_crc(fp)) !=
~crc32(~0, skb->data, fr_len)) {
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping "
"frame with CRC error\n");
stats->InvalidCRCCount++;
- stats->ErrorFrames++;
- fc_frame_free(fp);
- return;
+ goto drop;
}
fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
}
+ put_cpu();
fc_exch_recv(lport, fp);
+ return;
+
+drop:
+ stats->ErrorFrames++;
+ put_cpu();
+ kfree_skb(skb);
}
/**
@@ -1835,11 +1838,15 @@ static int fcoe_device_notification(struct notifier_block *notifier,
FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
"from netdev netlink\n", event);
}
+
+ fcoe_link_speed_update(lport);
+
if (link_possible && !fcoe_link_ok(lport))
fcoe_ctlr_link_up(&fcoe->ctlr);
else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->LinkFailureCount++;
+ put_cpu();
fcoe_clean_pending_queue(lport);
}
out:
@@ -1901,13 +1908,19 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
- rtnl_lock();
+ if (!rtnl_trylock()) {
+ dev_put(netdev);
+ mutex_unlock(&fcoe_config_mutex);
+ return restart_syscall();
+ }
+
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
- if (fcoe)
+ if (fcoe) {
fc_fabric_logoff(fcoe->ctlr.lp);
- else
+ fcoe_ctlr_link_down(&fcoe->ctlr);
+ } else
rc = -ENODEV;
dev_put(netdev);
@@ -1950,13 +1963,20 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
- rtnl_lock();
+ if (!rtnl_trylock()) {
+ dev_put(netdev);
+ mutex_unlock(&fcoe_config_mutex);
+ return restart_syscall();
+ }
+
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
- if (fcoe)
+ if (fcoe) {
+ if (!fcoe_link_ok(fcoe->ctlr.lp))
+ fcoe_ctlr_link_up(&fcoe->ctlr);
rc = fc_fabric_login(fcoe->ctlr.lp);
- else
+ } else
rc = -ENODEV;
dev_put(netdev);
@@ -1999,7 +2019,12 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
- rtnl_lock();
+ if (!rtnl_trylock()) {
+ dev_put(netdev);
+ mutex_unlock(&fcoe_config_mutex);
+ return restart_syscall();
+ }
+
fcoe = fcoe_hostlist_lookup_port(netdev);
if (!fcoe) {
rtnl_unlock();
@@ -2008,9 +2033,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
}
list_del(&fcoe->list);
fcoe_interface_cleanup(fcoe);
- rtnl_unlock();
+ /* RTNL mutex is dropped by fcoe_if_destroy */
fcoe_if_destroy(fcoe->ctlr.lp);
- module_put(THIS_MODULE);
out_putdev:
dev_put(netdev);
@@ -2029,6 +2053,8 @@ static void fcoe_destroy_work(struct work_struct *work)
port = container_of(work, struct fcoe_port, destroy_work);
mutex_lock(&fcoe_config_mutex);
+ rtnl_lock();
+ /* RTNL mutex is dropped by fcoe_if_destroy */
fcoe_if_destroy(port->lport);
mutex_unlock(&fcoe_config_mutex);
}
@@ -2050,6 +2076,12 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
struct net_device *netdev;
mutex_lock(&fcoe_config_mutex);
+
+ if (!rtnl_trylock()) {
+ mutex_unlock(&fcoe_config_mutex);
+ return restart_syscall();
+ }
+
#ifdef CONFIG_FCOE_MODULE
/*
* Make sure the module has been initialized, and is not about to be
@@ -2058,7 +2090,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
*/
if (THIS_MODULE->state != MODULE_STATE_LIVE) {
rc = -ENODEV;
- goto out_nodev;
+ goto out_nomod;
}
#endif
@@ -2067,7 +2099,6 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
goto out_nomod;
}
- rtnl_lock();
netdev = fcoe_if_to_netdev(buffer);
if (!netdev) {
rc = -ENODEV;
@@ -2122,35 +2153,27 @@ out_free:
out_putdev:
dev_put(netdev);
out_nodev:
- rtnl_unlock();
module_put(THIS_MODULE);
out_nomod:
+ rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return rc;
}
/**
- * fcoe_link_ok() - Check if the link is OK for a local port
- * @lport: The local port to check link on
- *
- * Any permanently-disqualifying conditions have been previously checked.
- * This also updates the speed setting, which may change with link for 100/1000.
- *
- * This function should probably be checking for PAUSE support at some point
- * in the future. Currently Per-priority-pause is not determinable using
- * ethtool, so we shouldn't be restrictive until that problem is resolved.
- *
- * Returns: 0 if link is OK for use by FCoE.
+ * fcoe_link_speed_update() - Update the supported and actual link speeds
+ * @lport: The local port to update speeds for
*
+ * Returns: 0 if the ethtool query was successful
+ * -1 if the ethtool query failed
*/
-int fcoe_link_ok(struct fc_lport *lport)
+int fcoe_link_speed_update(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
struct net_device *netdev = port->fcoe->netdev;
struct ethtool_cmd ecmd = { ETHTOOL_GSET };
- if ((netdev->flags & IFF_UP) && netif_carrier_ok(netdev) &&
- (!dev_ethtool_get_settings(netdev, &ecmd))) {
+ if (!dev_ethtool_get_settings(netdev, &ecmd)) {
lport->link_supported_speeds &=
~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
if (ecmd.supported & (SUPPORTED_1000baseT_Half |
@@ -2170,6 +2193,23 @@ int fcoe_link_ok(struct fc_lport *lport)
}
/**
+ * fcoe_link_ok() - Check if the link is OK for a local port
+ * @lport: The local port to check link on
+ *
+ * Returns: 0 if link is UP and OK, -1 if not
+ *
+ */
+int fcoe_link_ok(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct net_device *netdev = port->fcoe->netdev;
+
+ if (netif_oper_up(netdev))
+ return 0;
+ return -1;
+}
+
+/**
* fcoe_percpu_clean() - Clear all pending skbs for an local port
* @lport: The local port whose skbs are to be cleared
*
@@ -2631,3 +2671,25 @@ static void fcoe_get_lesb(struct fc_lport *lport,
lesb->lesb_miss_fka = htonl(mdac);
lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors);
}
+
+/**
+ * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
+ * @lport: the local port
+ * @port_id: the port ID
+ * @fp: the received frame, if any, that caused the port_id to be set.
+ *
+ * This routine handles the case where we received a FLOGI and are
+ * entering point-to-point mode. We need to call fcoe_ctlr_recv_flogi()
+ * so it can set the non-mapped mode and gateway address.
+ *
+ * The FLOGI LS_ACC is handled by fcoe_flogi_resp().
+ */
+static void fcoe_set_port_id(struct fc_lport *lport,
+ u32 port_id, struct fc_frame *fp)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->fcoe;
+
+ if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
+ fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
+}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 3440da4..50aaa4b 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -51,7 +51,7 @@ MODULE_LICENSE("GPL v2");
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
static void fcoe_ctlr_timeout(unsigned long);
-static void fcoe_ctlr_link_work(struct work_struct *);
+static void fcoe_ctlr_timer_work(struct work_struct *);
static void fcoe_ctlr_recv_work(struct work_struct *);
static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
@@ -116,7 +116,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip)
spin_lock_init(&fip->lock);
fip->flogi_oxid = FC_XID_UNKNOWN;
setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
- INIT_WORK(&fip->link_work, fcoe_ctlr_link_work);
+ INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
skb_queue_head_init(&fip->fip_recv_list);
}
@@ -164,7 +164,7 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
fcoe_ctlr_reset_fcfs(fip);
spin_unlock_bh(&fip->lock);
del_timer_sync(&fip->timer);
- cancel_work_sync(&fip->link_work);
+ cancel_work_sync(&fip->timer_work);
}
EXPORT_SYMBOL(fcoe_ctlr_destroy);
@@ -257,14 +257,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
{
spin_lock_bh(&fip->lock);
if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) {
- fip->last_link = 1;
- fip->link = 1;
spin_unlock_bh(&fip->lock);
fc_linkup(fip->lp);
} else if (fip->state == FIP_ST_LINK_WAIT) {
fip->state = fip->mode;
- fip->last_link = 1;
- fip->link = 1;
spin_unlock_bh(&fip->lock);
if (fip->state == FIP_ST_AUTO)
LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
@@ -306,9 +302,7 @@ int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
LIBFCOE_FIP_DBG(fip, "link down.\n");
spin_lock_bh(&fip->lock);
fcoe_ctlr_reset(fip);
- link_dropped = fip->link;
- fip->link = 0;
- fip->last_link = 0;
+ link_dropped = fip->state != FIP_ST_LINK_WAIT;
fip->state = FIP_ST_LINK_WAIT;
spin_unlock_bh(&fip->lock);
@@ -349,7 +343,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
fcf = fip->sel_fcf;
lp = fip->lp;
- if (!fcf || !fc_host_port_id(lp->host))
+ if (!fcf || !lp->port_id)
return;
len = sizeof(*kal) + ports * sizeof(*vn);
@@ -380,8 +374,8 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
vn->fd_desc.fip_dtype = FIP_DT_VN_ID;
vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW;
memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
- hton24(vn->fd_fc_id, fc_host_port_id(lp->host));
- put_unaligned_be64(lp->wwpn, &vn->fd_wwpn);
+ hton24(vn->fd_fc_id, lport->port_id);
+ put_unaligned_be64(lport->wwpn, &vn->fd_wwpn);
}
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
@@ -445,13 +439,18 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac));
- memset(mac, 0, sizeof(mac));
+ memset(mac, 0, sizeof(*mac));
mac->fd_desc.fip_dtype = FIP_DT_MAC;
mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
- if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC)
+ if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) {
memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
- else if (fip->spma)
+ } else if (fip_flags & FIP_FL_SPMA) {
+ LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n");
memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
+ } else {
+ LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n");
+ /* FPMA only FLOGI must leave the MAC desc set to all 0s */
+ }
skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
@@ -556,7 +555,7 @@ EXPORT_SYMBOL(fcoe_ctlr_els_send);
* fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller
* @fip: The FCoE controller to free FCFs on
*
- * Called with lock held.
+ * Called with lock held and preemption disabled.
*
* An FCF is considered old if we have missed three advertisements.
* That is, there have been no valid advertisement from it for three
@@ -573,17 +572,20 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
struct fcoe_fcf *next;
unsigned long sel_time = 0;
unsigned long mda_time = 0;
+ struct fcoe_dev_stats *stats;
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
mda_time = fcf->fka_period + (fcf->fka_period >> 1);
if ((fip->sel_fcf == fcf) &&
(time_after(jiffies, fcf->time + mda_time))) {
mod_timer(&fip->timer, jiffies + mda_time);
- fc_lport_get_stats(fip->lp)->MissDiscAdvCount++;
+ stats = per_cpu_ptr(fip->lp->dev_stats,
+ smp_processor_id());
+ stats->MissDiscAdvCount++;
printk(KERN_INFO "libfcoe: host%d: Missing Discovery "
- "Advertisement for fab %llx count %lld\n",
+ "Advertisement for fab %16.16llx count %lld\n",
fip->lp->host->host_no, fcf->fabric_name,
- fc_lport_get_stats(fip->lp)->MissDiscAdvCount);
+ stats->MissDiscAdvCount);
}
if (time_after(jiffies, fcf->time + fcf->fka_period * 3 +
msecs_to_jiffies(FIP_FCF_FUZZ * 3))) {
@@ -593,7 +595,9 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
WARN_ON(!fip->fcf_count);
fip->fcf_count--;
kfree(fcf);
- fc_lport_get_stats(fip->lp)->VLinkFailureCount++;
+ stats = per_cpu_ptr(fip->lp->dev_stats,
+ smp_processor_id());
+ stats->VLinkFailureCount++;
} else if (fcoe_ctlr_mtu_valid(fcf) &&
(!sel_time || time_before(sel_time, fcf->time))) {
sel_time = fcf->time;
@@ -776,7 +780,8 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
mtu_valid = fcoe_ctlr_mtu_valid(fcf);
fcf->time = jiffies;
if (!found) {
- LIBFCOE_FIP_DBG(fip, "New FCF for fab %llx map %x val %d\n",
+ LIBFCOE_FIP_DBG(fip, "New FCF for fab %16.16llx "
+ "map %x val %d\n",
fcf->fabric_name, fcf->fc_map, mtu_valid);
}
@@ -906,9 +911,10 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
fr_eof(fp) = FC_EOF_T;
fr_dev(fp) = lport;
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->RxFrames++;
stats->RxWords += skb->len / FIP_BPW;
+ put_cpu();
fc_exch_recv(lport, fp);
return;
@@ -942,9 +948,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
u32 desc_mask;
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
- if (!fcf)
- return;
- if (!fcf || !fc_host_port_id(lport->host))
+
+ if (!fcf || !lport->port_id)
return;
/*
@@ -982,8 +987,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
if (compare_ether_addr(vp->fd_mac,
fip->get_src_addr(lport)) == 0 &&
get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
- ntoh24(vp->fd_fc_id) ==
- fc_host_port_id(lport->host))
+ ntoh24(vp->fd_fc_id) == lport->port_id)
desc_mask &= ~BIT(FIP_DT_VN_ID);
break;
default:
@@ -1006,7 +1010,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
spin_lock_bh(&fip->lock);
- fc_lport_get_stats(lport)->VLinkFailureCount++;
+ per_cpu_ptr(lport->dev_stats,
+ smp_processor_id())->VLinkFailureCount++;
fcoe_ctlr_reset(fip);
spin_unlock_bh(&fip->lock);
@@ -1102,15 +1107,17 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
struct fcoe_fcf *best = NULL;
list_for_each_entry(fcf, &fip->fcfs, list) {
- LIBFCOE_FIP_DBG(fip, "consider FCF for fab %llx VFID %d map %x "
- "val %d\n", fcf->fabric_name, fcf->vfid,
+ LIBFCOE_FIP_DBG(fip, "consider FCF for fab %16.16llx "
+ "VFID %d map %x val %d\n",
+ fcf->fabric_name, fcf->vfid,
fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
if (!fcoe_ctlr_fcf_usable(fcf)) {
- LIBFCOE_FIP_DBG(fip, "FCF for fab %llx map %x %svalid "
- "%savailable\n", fcf->fabric_name,
- fcf->fc_map, (fcf->flags & FIP_FL_SOL)
- ? "" : "in", (fcf->flags & FIP_FL_AVAIL)
- ? "" : "un");
+ LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
+ "map %x %svalid %savailable\n",
+ fcf->fabric_name, fcf->fc_map,
+ (fcf->flags & FIP_FL_SOL) ? "" : "in",
+ (fcf->flags & FIP_FL_AVAIL) ?
+ "" : "un");
continue;
}
if (!best) {
@@ -1175,7 +1182,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
"Starting FCF discovery.\n",
fip->lp->host->host_no);
fip->reset_req = 1;
- schedule_work(&fip->link_work);
+ schedule_work(&fip->timer_work);
}
}
@@ -1201,43 +1208,31 @@ static void fcoe_ctlr_timeout(unsigned long arg)
mod_timer(&fip->timer, next_timer);
}
if (fip->send_ctlr_ka || fip->send_port_ka)
- schedule_work(&fip->link_work);
+ schedule_work(&fip->timer_work);
spin_unlock_bh(&fip->lock);
}
/**
- * fcoe_ctlr_link_work() - Worker thread function for link changes
+ * fcoe_ctlr_timer_work() - Worker thread function for timer work
* @work: Handle to a FCoE controller
*
- * See if the link status has changed and if so, report it.
- *
- * This is here because fc_linkup() and fc_linkdown() must not
+ * Sends keep-alives and resets which must not
* be called from the timer directly, since they use a mutex.
*/
-static void fcoe_ctlr_link_work(struct work_struct *work)
+static void fcoe_ctlr_timer_work(struct work_struct *work)
{
struct fcoe_ctlr *fip;
struct fc_lport *vport;
u8 *mac;
- int link;
- int last_link;
int reset;
- fip = container_of(work, struct fcoe_ctlr, link_work);
+ fip = container_of(work, struct fcoe_ctlr, timer_work);
spin_lock_bh(&fip->lock);
- last_link = fip->last_link;
- link = fip->link;
- fip->last_link = link;
reset = fip->reset_req;
fip->reset_req = 0;
spin_unlock_bh(&fip->lock);
- if (last_link != link) {
- if (link)
- fc_linkup(fip->lp);
- else
- fc_linkdown(fip->lp);
- } else if (reset && link)
+ if (reset)
fc_lport_reset(fip->lp);
if (fip->send_ctlr_ka) {
@@ -1334,9 +1329,9 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) {
memcpy(fip->dest_addr, sa, ETH_ALEN);
fip->map_dest = 0;
- if (fip->state == FIP_ST_NON_FIP)
- LIBFCOE_FIP_DBG(fip, "received FLOGI REQ, "
- "using non-FIP mode\n");
+ if (fip->state == FIP_ST_AUTO)
+ LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. "
+ "Setting non-FIP mode\n");
fip->state = FIP_ST_NON_FIP;
}
spin_unlock_bh(&fip->lock);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 3966c71..19338e0 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -36,7 +36,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.4.0.98"
+#define DRV_VERSION "1.4.0.145"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -45,7 +45,7 @@
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define FNIC_DFLT_QUEUE_DEPTH 32
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
-#define FNIC_MAX_CMD_LEN 16 /* Supported CDB length */
+
/*
* Tag bits used for special requests.
*/
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 5259888..2b48d79 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -617,7 +617,7 @@ void fnic_flush_tx(struct fnic *fnic)
struct sk_buff *skb;
struct fc_frame *fp;
- while ((skb = skb_dequeue(&fnic->frame_queue))) {
+ while ((skb = skb_dequeue(&fnic->tx_queue))) {
fp = (struct fc_frame *)skb;
fnic_send_frame(fnic, fp);
}
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 97b2125..265e73d 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -556,7 +556,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
}
host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FNIC_MAX_CMD_LEN;
+ host->max_cmd_len = FCOE_MAX_CMD_LEN;
fnic_get_res_counts(fnic);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 35a4b30..a765fe7 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -3842,7 +3842,7 @@ int __init option_setup(char *str)
TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
- while (cur && isdigit(*cur) && i <= MAXHA) {
+ while (cur && isdigit(*cur) && i < MAXHA) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL) cur++;
}
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 48f4068..18b7102 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -19,332 +19,334 @@
#include "wd33c93.h"
#include "gvp11.h"
-#include<linux/stat.h>
+#include <linux/stat.h>
-#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base))
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
-static irqreturn_t gvp11_intr (int irq, void *_instance)
+#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base))
+
+static irqreturn_t gvp11_intr(int irq, void *_instance)
{
- unsigned long flags;
- unsigned int status;
- struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
-
- status = DMA(instance)->CNTR;
- if (!(status & GVP11_DMAC_INT_PENDING))
- return IRQ_NONE;
-
- spin_lock_irqsave(instance->host_lock, flags);
- wd33c93_intr(instance);
- spin_unlock_irqrestore(instance->host_lock, flags);
- return IRQ_HANDLED;
+ unsigned long flags;
+ unsigned int status;
+ struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
+
+ status = DMA(instance)->CNTR;
+ if (!(status & GVP11_DMAC_INT_PENDING))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(instance->host_lock, flags);
+ wd33c93_intr(instance);
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
}
static int gvp11_xfer_mask = 0;
-void gvp11_setup (char *str, int *ints)
+void gvp11_setup(char *str, int *ints)
{
- gvp11_xfer_mask = ints[1];
+ gvp11_xfer_mask = ints[1];
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned short cntr = GVP11_DMAC_INT_ENABLE;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
- int bank_mask;
- static int scsi_alloc_out_of_range = 0;
-
- /* use bounce buffer if the physical address is bad */
- if (addr & HDATA(cmd->device->host)->dma_xfer_mask)
- {
- HDATA(cmd->device->host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
- & ~0x1ff;
-
- if( !scsi_alloc_out_of_range ) {
- HDATA(cmd->device->host)->dma_bounce_buffer =
- kmalloc (HDATA(cmd->device->host)->dma_bounce_len, GFP_KERNEL);
- HDATA(cmd->device->host)->dma_buffer_pool = BUF_SCSI_ALLOCED;
- }
+ struct Scsi_Host *instance = cmd->device->host;
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+ unsigned short cntr = GVP11_DMAC_INT_ENABLE;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ int bank_mask;
+ static int scsi_alloc_out_of_range = 0;
+
+ /* use bounce buffer if the physical address is bad */
+ if (addr & hdata->dma_xfer_mask) {
+ hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+
+ if (!scsi_alloc_out_of_range) {
+ hdata->dma_bounce_buffer =
+ kmalloc(hdata->dma_bounce_len, GFP_KERNEL);
+ hdata->dma_buffer_pool = BUF_SCSI_ALLOCED;
+ }
- if (scsi_alloc_out_of_range ||
- !HDATA(cmd->device->host)->dma_bounce_buffer) {
- HDATA(cmd->device->host)->dma_bounce_buffer =
- amiga_chip_alloc(HDATA(cmd->device->host)->dma_bounce_len,
- "GVP II SCSI Bounce Buffer");
+ if (scsi_alloc_out_of_range ||
+ !hdata->dma_bounce_buffer) {
+ hdata->dma_bounce_buffer =
+ amiga_chip_alloc(hdata->dma_bounce_len,
+ "GVP II SCSI Bounce Buffer");
- if(!HDATA(cmd->device->host)->dma_bounce_buffer)
- {
- HDATA(cmd->device->host)->dma_bounce_len = 0;
- return 1;
- }
+ if (!hdata->dma_bounce_buffer) {
+ hdata->dma_bounce_len = 0;
+ return 1;
+ }
- HDATA(cmd->device->host)->dma_buffer_pool = BUF_CHIP_ALLOCED;
- }
+ hdata->dma_buffer_pool = BUF_CHIP_ALLOCED;
+ }
- /* check if the address of the bounce buffer is OK */
- addr = virt_to_bus(HDATA(cmd->device->host)->dma_bounce_buffer);
-
- if (addr & HDATA(cmd->device->host)->dma_xfer_mask) {
- /* fall back to Chip RAM if address out of range */
- if( HDATA(cmd->device->host)->dma_buffer_pool == BUF_SCSI_ALLOCED) {
- kfree (HDATA(cmd->device->host)->dma_bounce_buffer);
- scsi_alloc_out_of_range = 1;
- } else {
- amiga_chip_free (HDATA(cmd->device->host)->dma_bounce_buffer);
- }
-
- HDATA(cmd->device->host)->dma_bounce_buffer =
- amiga_chip_alloc(HDATA(cmd->device->host)->dma_bounce_len,
- "GVP II SCSI Bounce Buffer");
-
- if(!HDATA(cmd->device->host)->dma_bounce_buffer)
- {
- HDATA(cmd->device->host)->dma_bounce_len = 0;
- return 1;
- }
-
- addr = virt_to_bus(HDATA(cmd->device->host)->dma_bounce_buffer);
- HDATA(cmd->device->host)->dma_buffer_pool = BUF_CHIP_ALLOCED;
- }
-
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy (HDATA(cmd->device->host)->dma_bounce_buffer,
- cmd->SCp.ptr, cmd->SCp.this_residual);
+ /* check if the address of the bounce buffer is OK */
+ addr = virt_to_bus(hdata->dma_bounce_buffer);
+
+ if (addr & hdata->dma_xfer_mask) {
+ /* fall back to Chip RAM if address out of range */
+ if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) {
+ kfree(hdata->dma_bounce_buffer);
+ scsi_alloc_out_of_range = 1;
+ } else {
+ amiga_chip_free(hdata->dma_bounce_buffer);
+ }
+
+ hdata->dma_bounce_buffer =
+ amiga_chip_alloc(hdata->dma_bounce_len,
+ "GVP II SCSI Bounce Buffer");
+
+ if (!hdata->dma_bounce_buffer) {
+ hdata->dma_bounce_len = 0;
+ return 1;
+ }
+
+ addr = virt_to_bus(hdata->dma_bounce_buffer);
+ hdata->dma_buffer_pool = BUF_CHIP_ALLOCED;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
}
- }
- /* setup dma direction */
- if (!dir_in)
- cntr |= GVP11_DMAC_DIR_WRITE;
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= GVP11_DMAC_DIR_WRITE;
- HDATA(cmd->device->host)->dma_dir = dir_in;
- DMA(cmd->device->host)->CNTR = cntr;
+ hdata->dma_dir = dir_in;
+ DMA(cmd->device->host)->CNTR = cntr;
- /* setup DMA *physical* address */
- DMA(cmd->device->host)->ACR = addr;
+ /* setup DMA *physical* address */
+ DMA(cmd->device->host)->ACR = addr;
- if (dir_in)
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- else
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
- if ((bank_mask = (~HDATA(cmd->device->host)->dma_xfer_mask >> 18) & 0x01c0))
- DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18);
+ bank_mask = (~hdata->dma_xfer_mask >> 18) & 0x01c0;
+ if (bank_mask)
+ DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18);
- /* start DMA */
- DMA(cmd->device->host)->ST_DMA = 1;
+ /* start DMA */
+ DMA(cmd->device->host)->ST_DMA = 1;
- /* return success */
- return 0;
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
- /* stop DMA */
- DMA(instance)->SP_DMA = 1;
- /* remove write bit from CONTROL bits */
- DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
-
- /* copy from a bounce buffer, if necessary */
- if (status && HDATA(instance)->dma_bounce_buffer) {
- if (HDATA(instance)->dma_dir && SCpnt)
- memcpy (SCpnt->SCp.ptr,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
-
- if (HDATA(instance)->dma_buffer_pool == BUF_SCSI_ALLOCED)
- kfree (HDATA(instance)->dma_bounce_buffer);
- else
- amiga_chip_free(HDATA(instance)->dma_bounce_buffer);
-
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- }
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+
+ /* stop DMA */
+ DMA(instance)->SP_DMA = 1;
+ /* remove write bit from CONTROL bits */
+ DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && hdata->dma_bounce_buffer) {
+ if (hdata->dma_dir && SCpnt)
+ memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+
+ if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED)
+ kfree(hdata->dma_bounce_buffer);
+ else
+ amiga_chip_free(hdata->dma_bounce_buffer);
+
+ hdata->dma_bounce_buffer = NULL;
+ hdata->dma_bounce_len = 0;
+ }
}
#define CHECK_WD33C93
int __init gvp11_detect(struct scsi_host_template *tpnt)
{
- static unsigned char called = 0;
- struct Scsi_Host *instance;
- unsigned long address;
- unsigned int epc;
- struct zorro_dev *z = NULL;
- unsigned int default_dma_xfer_mask;
- wd33c93_regs regs;
- int num_gvp11 = 0;
+ static unsigned char called = 0;
+ struct Scsi_Host *instance;
+ unsigned long address;
+ unsigned int epc;
+ struct zorro_dev *z = NULL;
+ unsigned int default_dma_xfer_mask;
+ struct WD33C93_hostdata *hdata;
+ wd33c93_regs regs;
+ int num_gvp11 = 0;
#ifdef CHECK_WD33C93
- volatile unsigned char *sasr_3393, *scmd_3393;
- unsigned char save_sasr;
- unsigned char q, qq;
+ volatile unsigned char *sasr_3393, *scmd_3393;
+ unsigned char save_sasr;
+ unsigned char q, qq;
#endif
- if (!MACH_IS_AMIGA || called)
- return 0;
- called = 1;
-
- tpnt->proc_name = "GVP11";
- tpnt->proc_info = &wd33c93_proc_info;
-
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- /*
- * This should (hopefully) be the correct way to identify
- * all the different GVP SCSI controllers (except for the
- * SERIES I though).
- */
-
- if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI ||
- z->id == ZORRO_PROD_GVP_SERIES_II)
- default_dma_xfer_mask = ~0x00ffffff;
- else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI ||
- z->id == ZORRO_PROD_GVP_A530_SCSI ||
- z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI)
- default_dma_xfer_mask = ~0x01ffffff;
- else if (z->id == ZORRO_PROD_GVP_A1291 ||
- z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1)
- default_dma_xfer_mask = ~0x07ffffff;
- else
- continue;
-
- /*
- * Rumors state that some GVP ram boards use the same product
- * code as the SCSI controllers. Therefore if the board-size
- * is not 64KB we asume it is a ram board and bail out.
- */
- if (z->resource.end-z->resource.start != 0xffff)
- continue;
-
- address = z->resource.start;
- if (!request_mem_region(address, 256, "wd33c93"))
- continue;
+ if (!MACH_IS_AMIGA || called)
+ return 0;
+ called = 1;
+
+ tpnt->proc_name = "GVP11";
+ tpnt->proc_info = &wd33c93_proc_info;
+
+ while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
+ /*
+ * This should (hopefully) be the correct way to identify
+ * all the different GVP SCSI controllers (except for the
+ * SERIES I though).
+ */
+
+ if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI ||
+ z->id == ZORRO_PROD_GVP_SERIES_II)
+ default_dma_xfer_mask = ~0x00ffffff;
+ else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI ||
+ z->id == ZORRO_PROD_GVP_A530_SCSI ||
+ z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI)
+ default_dma_xfer_mask = ~0x01ffffff;
+ else if (z->id == ZORRO_PROD_GVP_A1291 ||
+ z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1)
+ default_dma_xfer_mask = ~0x07ffffff;
+ else
+ continue;
+
+ /*
+ * Rumors state that some GVP ram boards use the same product
+ * code as the SCSI controllers. Therefore if the board-size
+ * is not 64KB we asume it is a ram board and bail out.
+ */
+ if (z->resource.end - z->resource.start != 0xffff)
+ continue;
+
+ address = z->resource.start;
+ if (!request_mem_region(address, 256, "wd33c93"))
+ continue;
#ifdef CHECK_WD33C93
- /*
- * These darn GVP boards are a problem - it can be tough to tell
- * whether or not they include a SCSI controller. This is the
- * ultimate Yet-Another-GVP-Detection-Hack in that it actually
- * probes for a WD33c93 chip: If we find one, it's extremely
- * likely that this card supports SCSI, regardless of Product_
- * Code, Board_Size, etc.
- */
-
- /* Get pointers to the presumed register locations and save contents */
-
- sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR);
- scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD);
- save_sasr = *sasr_3393;
-
- /* First test the AuxStatus Reg */
-
- q = *sasr_3393; /* read it */
- if (q & 0x08) /* bit 3 should always be clear */
- goto release;
- *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
- if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
- *sasr_3393 = save_sasr; /* Oops - restore this byte */
- goto release;
+ /*
+ * These darn GVP boards are a problem - it can be tough to tell
+ * whether or not they include a SCSI controller. This is the
+ * ultimate Yet-Another-GVP-Detection-Hack in that it actually
+ * probes for a WD33c93 chip: If we find one, it's extremely
+ * likely that this card supports SCSI, regardless of Product_
+ * Code, Board_Size, etc.
+ */
+
+ /* Get pointers to the presumed register locations and save contents */
+
+ sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR);
+ scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD);
+ save_sasr = *sasr_3393;
+
+ /* First test the AuxStatus Reg */
+
+ q = *sasr_3393; /* read it */
+ if (q & 0x08) /* bit 3 should always be clear */
+ goto release;
+ *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
+ if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
+ *sasr_3393 = save_sasr; /* Oops - restore this byte */
+ goto release;
}
- if (*sasr_3393 != q) { /* should still read the same */
- *sasr_3393 = save_sasr; /* Oops - restore this byte */
- goto release;
+ if (*sasr_3393 != q) { /* should still read the same */
+ *sasr_3393 = save_sasr; /* Oops - restore this byte */
+ goto release;
}
- if (*scmd_3393 != q) /* and so should the image at 0x1f */
- goto release;
-
-
- /* Ok, we probably have a wd33c93, but let's check a few other places
- * for good measure. Make sure that this works for both 'A and 'B
- * chip versions.
- */
-
- *sasr_3393 = WD_SCSI_STATUS;
- q = *scmd_3393;
- *sasr_3393 = WD_SCSI_STATUS;
- *scmd_3393 = ~q;
- *sasr_3393 = WD_SCSI_STATUS;
- qq = *scmd_3393;
- *sasr_3393 = WD_SCSI_STATUS;
- *scmd_3393 = q;
- if (qq != q) /* should be read only */
- goto release;
- *sasr_3393 = 0x1e; /* this register is unimplemented */
- q = *scmd_3393;
- *sasr_3393 = 0x1e;
- *scmd_3393 = ~q;
- *sasr_3393 = 0x1e;
- qq = *scmd_3393;
- *sasr_3393 = 0x1e;
- *scmd_3393 = q;
- if (qq != q || qq != 0xff) /* should be read only, all 1's */
- goto release;
- *sasr_3393 = WD_TIMEOUT_PERIOD;
- q = *scmd_3393;
- *sasr_3393 = WD_TIMEOUT_PERIOD;
- *scmd_3393 = ~q;
- *sasr_3393 = WD_TIMEOUT_PERIOD;
- qq = *scmd_3393;
- *sasr_3393 = WD_TIMEOUT_PERIOD;
- *scmd_3393 = q;
- if (qq != (~q & 0xff)) /* should be read/write */
- goto release;
+ if (*scmd_3393 != q) /* and so should the image at 0x1f */
+ goto release;
+
+ /*
+ * Ok, we probably have a wd33c93, but let's check a few other places
+ * for good measure. Make sure that this works for both 'A and 'B
+ * chip versions.
+ */
+
+ *sasr_3393 = WD_SCSI_STATUS;
+ q = *scmd_3393;
+ *sasr_3393 = WD_SCSI_STATUS;
+ *scmd_3393 = ~q;
+ *sasr_3393 = WD_SCSI_STATUS;
+ qq = *scmd_3393;
+ *sasr_3393 = WD_SCSI_STATUS;
+ *scmd_3393 = q;
+ if (qq != q) /* should be read only */
+ goto release;
+ *sasr_3393 = 0x1e; /* this register is unimplemented */
+ q = *scmd_3393;
+ *sasr_3393 = 0x1e;
+ *scmd_3393 = ~q;
+ *sasr_3393 = 0x1e;
+ qq = *scmd_3393;
+ *sasr_3393 = 0x1e;
+ *scmd_3393 = q;
+ if (qq != q || qq != 0xff) /* should be read only, all 1's */
+ goto release;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ q = *scmd_3393;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ *scmd_3393 = ~q;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ qq = *scmd_3393;
+ *sasr_3393 = WD_TIMEOUT_PERIOD;
+ *scmd_3393 = q;
+ if (qq != (~q & 0xff)) /* should be read/write */
+ goto release;
#endif
- instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
- if(instance == NULL)
- goto release;
- instance->base = ZTWO_VADDR(address);
- instance->irq = IRQ_AMIGA_PORTS;
- instance->unique_id = z->slotaddr;
-
- if (gvp11_xfer_mask)
- HDATA(instance)->dma_xfer_mask = gvp11_xfer_mask;
- else
- HDATA(instance)->dma_xfer_mask = default_dma_xfer_mask;
-
-
- DMA(instance)->secret2 = 1;
- DMA(instance)->secret1 = 0;
- DMA(instance)->secret3 = 15;
- while (DMA(instance)->CNTR & GVP11_DMAC_BUSY) ;
- DMA(instance)->CNTR = 0;
-
- DMA(instance)->BANK = 0;
-
- epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
-
- /*
- * Check for 14MHz SCSI clock
- */
- regs.SASR = &(DMA(instance)->SASR);
- regs.SCMD = &(DMA(instance)->SCMD);
- HDATA(instance)->no_sync = 0xff;
- HDATA(instance)->fast = 0;
- HDATA(instance)->dma_mode = CTRL_DMA;
- wd33c93_init(instance, regs, dma_setup, dma_stop,
- (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
- : WD33C93_FS_12_15);
-
- if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI",
- instance))
- goto unregister;
- DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
- num_gvp11++;
- continue;
+ instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
+ if (instance == NULL)
+ goto release;
+ instance->base = ZTWO_VADDR(address);
+ instance->irq = IRQ_AMIGA_PORTS;
+ instance->unique_id = z->slotaddr;
+
+ hdata = shost_priv(instance);
+ if (gvp11_xfer_mask)
+ hdata->dma_xfer_mask = gvp11_xfer_mask;
+ else
+ hdata->dma_xfer_mask = default_dma_xfer_mask;
+
+ DMA(instance)->secret2 = 1;
+ DMA(instance)->secret1 = 0;
+ DMA(instance)->secret3 = 15;
+ while (DMA(instance)->CNTR & GVP11_DMAC_BUSY)
+ ;
+ DMA(instance)->CNTR = 0;
+
+ DMA(instance)->BANK = 0;
+
+ epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
+
+ /*
+ * Check for 14MHz SCSI clock
+ */
+ regs.SASR = &(DMA(instance)->SASR);
+ regs.SCMD = &(DMA(instance)->SCMD);
+ hdata->no_sync = 0xff;
+ hdata->fast = 0;
+ hdata->dma_mode = CTRL_DMA;
+ wd33c93_init(instance, regs, dma_setup, dma_stop,
+ (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
+ : WD33C93_FS_12_15);
+
+ if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
+ "GVP11 SCSI", instance))
+ goto unregister;
+ DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
+ num_gvp11++;
+ continue;
unregister:
- scsi_unregister(instance);
- wd33c93_release();
+ scsi_unregister(instance);
release:
- release_mem_region(address, 256);
- }
+ release_mem_region(address, 256);
+ }
- return num_gvp11;
+ return num_gvp11;
}
static int gvp11_bus_reset(struct scsi_cmnd *cmd)
@@ -389,12 +391,11 @@ static struct scsi_host_template driver_template = {
int gvp11_release(struct Scsi_Host *instance)
{
#ifdef MODULE
- DMA(instance)->CNTR = 0;
- release_mem_region(ZTWO_PADDR(instance->base), 256);
- free_irq(IRQ_AMIGA_PORTS, instance);
- wd33c93_release();
+ DMA(instance)->CNTR = 0;
+ release_mem_region(ZTWO_PADDR(instance->base), 256);
+ free_irq(IRQ_AMIGA_PORTS, instance);
#endif
- return 1;
+ return 1;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index bf22859..e2efdf9 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -15,11 +15,11 @@ int gvp11_detect(struct scsi_host_template *);
int gvp11_release(struct Scsi_Host *);
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
#ifndef HOSTS_C
@@ -28,24 +28,24 @@ int gvp11_release(struct Scsi_Host *);
* if the transfer address ANDed with this results in a non-zero
* result, then we can't use DMA.
*/
-#define GVP11_XFER_MASK (0xff000001)
+#define GVP11_XFER_MASK (0xff000001)
typedef struct {
- unsigned char pad1[64];
- volatile unsigned short CNTR;
- unsigned char pad2[31];
- volatile unsigned char SASR;
- unsigned char pad3;
- volatile unsigned char SCMD;
- unsigned char pad4[4];
- volatile unsigned short BANK;
- unsigned char pad5[6];
- volatile unsigned long ACR;
- volatile unsigned short secret1; /* store 0 here */
- volatile unsigned short ST_DMA;
- volatile unsigned short SP_DMA;
- volatile unsigned short secret2; /* store 1 here */
- volatile unsigned short secret3; /* store 15 here */
+ unsigned char pad1[64];
+ volatile unsigned short CNTR;
+ unsigned char pad2[31];
+ volatile unsigned char SASR;
+ unsigned char pad3;
+ volatile unsigned char SCMD;
+ unsigned char pad4[4];
+ volatile unsigned short BANK;
+ unsigned char pad5[6];
+ volatile unsigned long ACR;
+ volatile unsigned short secret1; /* store 0 here */
+ volatile unsigned short ST_DMA;
+ volatile unsigned short SP_DMA;
+ volatile unsigned short secret2; /* store 1 here */
+ volatile unsigned short secret3; /* store 15 here */
} gvp11_scsiregs;
/* bits in CNTR */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 183d3a4..c016426b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2708,14 +2708,6 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[8] = (size >> 8) & 0xFF;
c->Request.CDB[9] = size & 0xFF;
break;
-
- case HPSA_READ_CAPACITY:
- c->Request.CDBLen = 10;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- break;
case HPSA_CACHE_FLUSH:
c->Request.CDBLen = 12;
c->Request.Type.Attribute = ATTR_SIMPLE;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 56fb982..78de9b6 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -152,21 +152,6 @@ struct SenseSubsystem_info {
u8 reserved1[1108];
};
-#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
-struct ReadCapdata {
- u8 total_size[4]; /* Total size in blocks */
- u8 block_size[4]; /* Size of blocks in bytes */
-};
-
-#if 0
-/* 12 byte commands not implemented in firmware yet. */
-#define HPSA_READ 0xa8
-#define HPSA_WRITE 0xaa
-#endif
-
-#define HPSA_READ 0x28 /* Read(10) */
-#define HPSA_WRITE 0x2a /* Write(10) */
-
/* BMIC commands */
#define BMIC_READ 0x26
#define BMIC_WRITE 0x27
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index c2eea71..3eb2b7b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2245,7 +2245,7 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
DECLARE_COMPLETION_ONSTACK(comp);
int wait;
unsigned long flags;
- signed long timeout = init_timeout * HZ;
+ signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
ENTER;
do {
@@ -2919,6 +2919,7 @@ static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
* ibmvfc_read_trace - Dump the adapter trace
+ * @filp: open sysfs file
* @kobj: kobject struct
* @bin_attr: bin_attribute struct
* @buf: buffer
@@ -2928,7 +2929,7 @@ static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ibmvfc_read_trace(struct kobject *kobj,
+static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -3013,6 +3014,7 @@ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
if (crq->valid & 0x80) {
if (++async_crq->cur == async_crq->size)
async_crq->cur = 0;
+ rmb();
} else
crq = NULL;
@@ -3035,6 +3037,7 @@ static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
+ rmb();
} else
crq = NULL;
@@ -3083,12 +3086,14 @@ static void ibmvfc_tasklet(void *data)
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ wmb();
}
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
+ wmb();
}
vio_enable_interrupts(vdev);
@@ -3096,10 +3101,12 @@ static void ibmvfc_tasklet(void *data)
vio_disable_interrupts(vdev);
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ wmb();
} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
+ wmb();
} else
done = 1;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index d25106a..7e97427 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -38,6 +38,7 @@
#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \
(IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT)
#define IBMVFC_INIT_TIMEOUT 120
+#define IBMVFC_ABORT_WAIT_TIMEOUT 40
#define IBMVFC_MAX_REQUESTS_DEFAULT 100
#define IBMVFC_DEBUG 0
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b90c118..6a6661c 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3120,6 +3120,7 @@ restart:
#ifdef CONFIG_SCSI_IPR_TRACE
/**
* ipr_read_trace - Dump the adapter trace
+ * @filp: open sysfs file
* @kobj: kobject struct
* @bin_attr: bin_attribute struct
* @buf: buffer
@@ -3129,7 +3130,7 @@ restart:
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_read_trace(struct kobject *kobj,
+static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -3764,6 +3765,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
#ifdef CONFIG_SCSI_IPR_DUMP
/**
* ipr_read_dump - Dump the adapter
+ * @filp: open sysfs file
* @kobj: kobject struct
* @bin_attr: bin_attribute struct
* @buf: buffer
@@ -3773,7 +3775,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_read_dump(struct kobject *kobj,
+static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -3927,6 +3929,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
/**
* ipr_write_dump - Setup dump state of adapter
+ * @filp: open sysfs file
* @kobj: kobject struct
* @bin_attr: bin_attribute struct
* @buf: buffer
@@ -3936,7 +3939,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_write_dump(struct kobject *kobj,
+static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 02143af..bf55d30 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -206,8 +206,10 @@ static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
}
static void
-iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_sw_tcp_conn *tcp_sw_conn)
+iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct sock *sk = tcp_sw_conn->sock->sk;
/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@ -555,7 +557,7 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
return;
sock_hold(sock->sk);
- iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
+ iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
spin_lock_bh(&session->lock);
@@ -599,9 +601,9 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
- if (sock->sk->sk_sleep) {
+ if (sk_sleep(sock->sk)) {
sock->sk->sk_err = EIO;
- wake_up_interruptible(sock->sk->sk_sleep);
+ wake_up_interruptible(sk_sleep(sock->sk));
}
iscsi_conn_stop(cls_conn, flag);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index ca6b7bc..94644ba 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -36,7 +36,6 @@ struct iscsi_sw_tcp_send {
};
struct iscsi_sw_tcp_conn {
- struct iscsi_conn *iscsi_conn;
struct socket *sock;
struct iscsi_sw_tcp_send out;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 1087a7f1..c7985da 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -132,7 +132,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
switch (fmt) {
case ELS_ADDR_FMT_PORT:
FC_DISC_DBG(disc, "Port address format for port "
- "(%6x)\n", ntoh24(pp->rscn_fid));
+ "(%6.6x)\n", ntoh24(pp->rscn_fid));
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp) {
redisc = 1;
@@ -440,7 +440,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
ids.port_id = ntoh24(np->fp_fid);
ids.port_name = ntohll(np->fp_wwpn);
- if (ids.port_id != fc_host_port_id(lport->host) &&
+ if (ids.port_id != lport->port_id &&
ids.port_name != lport->wwpn) {
rdata = lport->tt.rport_create(lport, ids.port_id);
if (rdata) {
@@ -449,7 +449,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
} else {
printk(KERN_WARNING "libfc: Failed to allocate "
"memory for the newly discovered port "
- "(%6x)\n", ids.port_id);
+ "(%6.6x)\n", ids.port_id);
error = -ENOMEM;
}
}
@@ -607,7 +607,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->ids.port_name = port_name;
else if (rdata->ids.port_name != port_name) {
FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
- "Port-id %x wwpn %llx\n",
+ "Port-id %6.6x wwpn %16.16llx\n",
rdata->ids.port_id, port_name);
lport->tt.rport_logoff(rdata);
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 53748724..e9412b7 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -63,7 +63,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
return NULL;
}
- fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
+ fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index e5df0d4..104e0fb 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -488,7 +488,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
*/
spin_lock_bh(&ep->ex_lock);
ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
- if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
+ if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat &= ~ESB_ST_SEQ_INIT;
spin_unlock_bh(&ep->ex_lock);
return error;
@@ -676,9 +676,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
}
memset(ep, 0, sizeof(*ep));
- cpu = smp_processor_id();
+ cpu = get_cpu();
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
+ put_cpu();
index = pool->next_index;
/* allocate new exch from pool */
while (fc_exch_ptr_get(pool, index)) {
@@ -734,19 +735,14 @@ err:
* EM is selected when a NULL match function pointer is encountered
* or when a call to a match function returns true.
*/
-static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
- struct fc_frame *fp)
+static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+ struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
- struct fc_exch *ep;
- list_for_each_entry(ema, &lport->ema_list, ema_list) {
- if (!ema->match || ema->match(fp)) {
- ep = fc_exch_em_alloc(lport, ema->mp);
- if (ep)
- return ep;
- }
- }
+ list_for_each_entry(ema, &lport->ema_list, ema_list)
+ if (!ema->match || ema->match(fp))
+ return fc_exch_em_alloc(lport, ema->mp);
return NULL;
}
@@ -920,13 +916,9 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
* Find or create the sequence.
*/
if (fc_sof_is_init(fr_sof(fp))) {
- sp = fc_seq_start_next(&ep->seq);
- if (!sp) {
- reject = FC_RJT_SEQ_XS; /* exchange shortage */
- goto rel;
- }
- sp->id = fh->fh_seq_id;
+ sp = &ep->seq;
sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
} else {
sp = &ep->seq;
if (sp->id != fh->fh_seq_id) {
@@ -1250,9 +1242,6 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_seq *sp = NULL;
struct fc_exch *ep = NULL;
- enum fc_sof sof;
- enum fc_eof eof;
- u32 f_ctl;
enum fc_pf_rjt_reason reject;
/* We can have the wrong fc_lport at this point with NPIV, which is a
@@ -1269,9 +1258,6 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
if (reject == FC_RJT_NONE) {
sp = fr_seq(fp); /* sequence will be held */
ep = fc_seq_exch(sp);
- sof = fr_sof(fp);
- eof = fr_eof(fp);
- f_ctl = ntoh24(fh->fh_f_ctl);
fc_seq_send_ack(sp, fp);
/*
@@ -1336,17 +1322,15 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
goto rel;
}
sof = fr_sof(fp);
+ sp = &ep->seq;
if (fc_sof_is_init(sof)) {
- sp = fc_seq_start_next(&ep->seq);
- sp->id = fh->fh_seq_id;
sp->ssb_stat |= SSB_ST_RESP;
- } else {
- sp = &ep->seq;
- if (sp->id != fh->fh_seq_id) {
- atomic_inc(&mp->stats.seq_not_found);
- goto rel;
- }
+ sp->id = fh->fh_seq_id;
+ } else if (sp->id != fh->fh_seq_id) {
+ atomic_inc(&mp->stats.seq_not_found);
+ goto rel;
}
+
f_ctl = ntoh24(fh->fh_f_ctl);
fr_seq(fp) = sp;
if (f_ctl & FC_FC_SEQ_INIT)
@@ -1763,7 +1747,6 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
fc_exch_done(sp);
goto out;
}
- sp = fc_seq_start_next(sp);
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
acc->reca_cmd = ELS_LS_ACC;
@@ -1944,7 +1927,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
did = ep->sid;
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
- fc_host_port_id(lport->host), FC_TYPE_ELS,
+ lport->port_id, FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 17396c7..ec1f66c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -97,7 +97,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
-static void fc_timeout_error(struct fc_fcp_pkt *);
+static void fc_fcp_recovery(struct fc_fcp_pkt *);
static void fc_fcp_timeout(unsigned long);
static void fc_fcp_rec(struct fc_fcp_pkt *);
static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
@@ -121,7 +121,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
#define FC_DATA_UNDRUN 7
#define FC_ERROR 8
#define FC_HRD_ERROR 9
-#define FC_CMD_TIME_OUT 10
+#define FC_CMD_RECOVERY 10
/*
* Error recovery timeout values.
@@ -446,9 +446,16 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
len = fr_len(fp) - sizeof(*fh);
buf = fc_frame_payload_get(fp, 0);
- /* if this I/O is ddped, update xfer len */
- fc_fcp_ddp_done(fsp);
-
+ /*
+ * if this I/O is ddped then clear it
+ * and initiate recovery since data
+ * frames are expected to be placed
+ * directly in that case.
+ */
+ if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
+ fc_fcp_ddp_done(fsp);
+ goto err;
+ }
if (offset + len > fsp->data_len) {
/* this should never happen */
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
@@ -456,8 +463,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
goto crc_err;
FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
"data_len %x\n", len, offset, fsp->data_len);
- fc_fcp_retry_cmd(fsp);
- return;
+ goto err;
}
if (offset != fsp->xfer_len)
fsp->state |= FC_SRB_DISCONTIG;
@@ -478,13 +484,14 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (~crc != le32_to_cpu(fr_crc(fp))) {
crc_err:
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->ErrorFrames++;
- /* FIXME - per cpu count, not total count! */
+ /* per cpu count, not total count, but OK for limit */
if (stats->InvalidCRCCount++ < 5)
printk(KERN_WARNING "libfc: CRC error on data "
- "frame for port (%6x)\n",
- fc_host_port_id(lport->host));
+ "frame for port (%6.6x)\n",
+ lport->port_id);
+ put_cpu();
/*
* Assume the frame is total garbage.
* We may have copied it over the good part
@@ -493,7 +500,7 @@ crc_err:
* Otherwise, ignore it.
*/
if (fsp->state & FC_SRB_DISCONTIG)
- fc_fcp_retry_cmd(fsp);
+ goto err;
return;
}
}
@@ -509,6 +516,9 @@ crc_err:
if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
fc_fcp_complete_locked(fsp);
+ return;
+err:
+ fc_fcp_recovery(fsp);
}
/**
@@ -834,8 +844,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* exit here
*/
return;
- } else
- goto err;
+ }
}
if (flags & FCP_SNS_LEN_VAL) {
snsl = ntohl(rp_ex->fr_sns_len);
@@ -885,7 +894,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
return;
}
fsp->status_code = FC_DATA_OVRRUN;
- FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, "
+ FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, "
"len %x, data len %x\n",
fsp->rport->port_id,
fsp->xfer_len, expected_len, fsp->data_len);
@@ -1100,7 +1109,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
rpriv = rport->dd_data;
fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
- fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP,
+ rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
@@ -1341,7 +1350,7 @@ static void fc_fcp_timeout(unsigned long data)
else if (fsp->state & FC_SRB_RCV_STATUS)
fc_fcp_complete_locked(fsp);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
unlock:
fc_fcp_unlock_pkt(fsp);
@@ -1373,7 +1382,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
fr_seq(fp) = fsp->seq_ptr;
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
- fc_host_port_id(rpriv->local_port->host), FC_TYPE_ELS,
+ rpriv->local_port->port_id, FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
fc_fcp_rec_resp, fsp,
@@ -1385,7 +1394,7 @@ retry:
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
}
/**
@@ -1454,7 +1463,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fc_fcp_retry_cmd(fsp);
break;
}
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
}
} else if (opcode == ELS_LS_ACC) {
@@ -1553,7 +1562,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
break;
default:
- FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n",
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
fsp, fsp->rport->port_id, error);
fsp->status_code = FC_CMD_PLOGO;
/* fall through */
@@ -1563,13 +1572,13 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Assume REC or LS_ACC was lost.
* The exchange manager will have aborted REC, so retry.
*/
- FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n",
+ FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
fsp->rport->port_id, error, fsp->recov_retry,
FC_MAX_RECOV_RETRY);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
}
fc_fcp_unlock_pkt(fsp);
@@ -1578,12 +1587,12 @@ out:
}
/**
- * fc_timeout_error() - Handler for fcp_pkt timeouts
- * @fsp: The FCP packt that has timed out
+ * fc_fcp_recovery() - Handler for fcp_pkt recovery
+ * @fsp: The FCP pkt that needs to be aborted
*/
-static void fc_timeout_error(struct fc_fcp_pkt *fsp)
+static void fc_fcp_recovery(struct fc_fcp_pkt *fsp)
{
- fsp->status_code = FC_CMD_TIME_OUT;
+ fsp->status_code = FC_CMD_RECOVERY;
fsp->cdb_status = 0;
fsp->io_status = 0;
/*
@@ -1631,7 +1640,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
srr->srr_rel_off = htonl(offset);
fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
- fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP,
+ rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
@@ -1689,7 +1698,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
case ELS_LS_RJT:
default:
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
}
fc_fcp_unlock_pkt(fsp);
@@ -1715,7 +1724,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
case -FC_EX_CLOSED: /* e.g., link failure */
/* fall through */
@@ -1810,7 +1819,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
/*
* setup the data direction
*/
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
fsp->req_flags = FC_SRB_READ;
stats->InputRequests++;
@@ -1823,6 +1832,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
fsp->req_flags = 0;
stats->ControlRequests++;
}
+ put_cpu();
fsp->tgt_flags = rpriv->flags;
@@ -1907,6 +1917,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
}
break;
case FC_ERROR:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_ERROR\n");
sc_cmd->result = DID_ERROR << 16;
break;
case FC_DATA_UNDRUN:
@@ -1915,12 +1927,19 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
* scsi status is good but transport level
* underrun.
*/
- sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ?
- DID_OK : DID_ERROR) << 16;
+ if (fsp->state & FC_SRB_RCV_STATUS) {
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml"
+ " due to FC_DATA_UNDRUN (trans)\n");
+ sc_cmd->result = DID_ERROR << 16;
+ }
} else {
/*
* scsi got underrun, this is an error
*/
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_DATA_UNDRUN (scsi)\n");
CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
}
@@ -1929,12 +1948,16 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
/*
* overrun is an error
*/
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_DATA_OVRRUN\n");
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
break;
case FC_CMD_ABORTED:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
break;
- case FC_CMD_TIME_OUT:
+ case FC_CMD_RECOVERY:
sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
break;
case FC_CMD_RESET:
@@ -1944,6 +1967,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_NO_CONNECT << 16);
break;
default:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to unknown error\n");
sc_cmd->result = (DID_ERROR << 16);
break;
}
@@ -2028,7 +2053,7 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
if (lport->state != LPORT_ST_READY)
return rc;
- FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id);
+ FC_SCSI_DBG(lport, "Resetting rport (%6.6x)\n", rport->port_id);
fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
if (fsp == NULL) {
@@ -2076,12 +2101,12 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
if (fc_fcp_lport_queue_ready(lport)) {
shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
- "on port (%6x)\n", fc_host_port_id(lport->host));
+ "on port (%6.6x)\n", lport->port_id);
return SUCCESS;
} else {
shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
- "port (%6x) is not ready.\n",
- fc_host_port_id(lport->host));
+ "port (%6.6x) is not ready.\n",
+ lport->port_id);
return FAILED;
}
}
@@ -2166,7 +2191,7 @@ void fc_fcp_destroy(struct fc_lport *lport)
if (!list_empty(&si->scsi_pkt_queue))
printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
- "port (%6x)\n", fc_host_port_id(lport->host));
+ "port (%6.6x)\n", lport->port_id);
mempool_destroy(si->scsi_pkt_pool);
kfree(si);
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index 741fd5c..f5c0ca4 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -45,9 +45,9 @@ extern unsigned int fc_debug_logging;
#define FC_LPORT_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
- printk(KERN_INFO "host%u: lport %6x: " fmt, \
+ printk(KERN_INFO "host%u: lport %6.6x: " fmt, \
(lport)->host->host_no, \
- fc_host_port_id((lport)->host), ##args))
+ (lport)->port_id, ##args))
#define FC_DISC_DBG(disc, fmt, args...) \
FC_CHECK_LOGGING(FC_DISC_LOGGING, \
@@ -57,7 +57,7 @@ extern unsigned int fc_debug_logging;
#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
- printk(KERN_INFO "host%u: rport %6x: " fmt, \
+ printk(KERN_INFO "host%u: rport %6.6x: " fmt, \
(lport)->host->host_no, \
(port_id), ##args))
@@ -66,7 +66,7 @@ extern unsigned int fc_debug_logging;
#define FC_FCP_DBG(pkt, fmt, args...) \
FC_CHECK_LOGGING(FC_FCP_LOGGING, \
- printk(KERN_INFO "host%u: fcp: %6x: " fmt, \
+ printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \
(pkt)->lp->host->host_no, \
pkt->rport->port_id, ##args))
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index d126ecf..79c9e3c 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -172,7 +172,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
struct fc_rport_priv *rdata,
enum fc_rport_event event)
{
- FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
+ FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
rdata->ids.port_id);
mutex_lock(&lport->lp_mutex);
@@ -183,7 +183,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
} else {
FC_LPORT_DBG(lport, "Received an READY event "
- "on port (%6x) for the directory "
+ "on port (%6.6x) for the directory "
"server, but the lport is not "
"in the DNS state, it's in the "
"%d state", rdata->ids.port_id,
@@ -228,9 +228,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
u64 remote_wwnn)
{
mutex_lock(&lport->disc.disc_mutex);
- if (lport->ptp_rdata)
+ if (lport->ptp_rdata) {
lport->tt.rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ }
lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+ kref_get(&lport->ptp_rdata->kref);
lport->ptp_rdata->ids.port_name = remote_wwpn;
lport->ptp_rdata->ids.node_name = remote_wwnn;
mutex_unlock(&lport->disc.disc_mutex);
@@ -241,17 +244,6 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
}
/**
- * fc_get_host_port_type() - Return the port type of the given Scsi_Host
- * @shost: The SCSI host whose port type is to be determined
- */
-void fc_get_host_port_type(struct Scsi_Host *shost)
-{
- /* TODO - currently just NPORT */
- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
-}
-EXPORT_SYMBOL(fc_get_host_port_type);
-
-/**
* fc_get_host_port_state() - Return the port state of the given Scsi_Host
* @shost: The SCSI host whose port state is to be determined
*/
@@ -572,8 +564,8 @@ void __fc_linkup(struct fc_lport *lport)
*/
void fc_linkup(struct fc_lport *lport)
{
- printk(KERN_INFO "host%d: libfc: Link up on port (%6x)\n",
- lport->host->host_no, fc_host_port_id(lport->host));
+ printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
mutex_lock(&lport->lp_mutex);
__fc_linkup(lport);
@@ -602,8 +594,8 @@ void __fc_linkdown(struct fc_lport *lport)
*/
void fc_linkdown(struct fc_lport *lport)
{
- printk(KERN_INFO "host%d: libfc: Link down on port (%6x)\n",
- lport->host->host_no, fc_host_port_id(lport->host));
+ printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
mutex_lock(&lport->lp_mutex);
__fc_linkdown(lport);
@@ -704,8 +696,8 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
break;
case DISC_EV_FAILED:
printk(KERN_ERR "host%d: libfc: "
- "Discovery failed for port (%6x)\n",
- lport->host->host_no, fc_host_port_id(lport->host));
+ "Discovery failed for port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
mutex_lock(&lport->lp_mutex);
fc_lport_enter_reset(lport);
mutex_unlock(&lport->lp_mutex);
@@ -750,10 +742,14 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
struct fc_frame *fp)
{
if (port_id)
- printk(KERN_INFO "host%d: Assigned Port ID %6x\n",
+ printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
lport->host->host_no, port_id);
+ lport->port_id = port_id;
+
+ /* Update the fc_host */
fc_host_port_id(lport->host) = port_id;
+
if (lport->tt.lport_set_port_id)
lport->tt.lport_set_port_id(lport, port_id, fp);
}
@@ -797,11 +793,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
if (remote_wwpn == lport->wwpn) {
printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
- "with same WWPN %llx\n",
+ "with same WWPN %16.16llx\n",
lport->host->host_no, remote_wwpn);
goto out;
}
- FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
+ FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
/*
* XXX what is the right thing to do for FIDs?
@@ -832,7 +828,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
*/
f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
ep = fc_seq_exch(sp);
- fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, remote_fid, local_fid,
FC_TYPE_ELS, f_ctl, 0);
lport->tt.seq_send(lport, sp, fp);
@@ -947,14 +943,18 @@ static void fc_lport_reset_locked(struct fc_lport *lport)
if (lport->dns_rdata)
lport->tt.rport_logoff(lport->dns_rdata);
- lport->ptp_rdata = NULL;
+ if (lport->ptp_rdata) {
+ lport->tt.rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ lport->ptp_rdata = NULL;
+ }
lport->tt.disc_stop(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
fc_host_fabric_name(lport->host) = 0;
- if (fc_host_port_id(lport->host))
+ if (lport->port_id)
fc_lport_set_port_id(lport, 0, NULL);
}
@@ -1492,7 +1492,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
lport->r_a_tov = 2 * e_d_tov;
fc_lport_set_port_id(lport, did, fp);
printk(KERN_INFO "host%d: libfc: "
- "Port (%6x) entered "
+ "Port (%6.6x) entered "
"point-to-point mode\n",
lport->host->host_no, did);
fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
@@ -1699,7 +1699,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_ELS_REQ;
hton24(fh->fh_d_id, did);
- hton24(fh->fh_s_id, fc_host_port_id(lport->host));
+ hton24(fh->fh_s_id, lport->port_id);
fh->fh_type = FC_TYPE_ELS;
hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
FC_FC_END_SEQ | FC_FC_SEQ_INIT);
@@ -1759,7 +1759,7 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
hton24(fh->fh_d_id, did);
- hton24(fh->fh_s_id, fc_host_port_id(lport->host));
+ hton24(fh->fh_s_id, lport->port_id);
fh->fh_type = FC_TYPE_CT;
hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
FC_FC_END_SEQ | FC_FC_SEQ_INIT);
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
index c68f6c7..dd2b43b 100644
--- a/drivers/scsi/libfc/fc_npiv.c
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -69,12 +69,15 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
struct fc_lport *lport = NULL;
struct fc_lport *vn_port;
- if (fc_host_port_id(n_port->host) == port_id)
+ if (n_port->port_id == port_id)
return n_port;
+ if (port_id == FC_FID_FLOGI)
+ return n_port; /* for point-to-point */
+
mutex_lock(&n_port->lp_mutex);
list_for_each_entry(vn_port, &n_port->vports, list) {
- if (fc_host_port_id(vn_port->host) == port_id) {
+ if (vn_port->port_id == port_id) {
lport = vn_port;
break;
}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b37d0ff..39e440f 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1442,136 +1442,115 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
struct fc_els_spp *spp; /* response spp */
unsigned int len;
unsigned int plen;
- enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
- enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
enum fc_els_spp_resp resp;
struct fc_seq_els_data rjt_data;
u32 f_ctl;
u32 fcp_parm;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
- rjt_data.fp = NULL;
+ rjt_data.fp = NULL;
fh = fc_frame_header_get(rx_fp);
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
- switch (rdata->rp_state) {
- case RPORT_ST_PRLI:
- case RPORT_ST_RTV:
- case RPORT_ST_READY:
- case RPORT_ST_ADISC:
- reason = ELS_RJT_NONE;
- break;
- default:
- fc_frame_free(rx_fp);
- return;
- break;
- }
len = fr_len(rx_fp) - sizeof(*fh);
pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
- if (pp == NULL) {
- reason = ELS_RJT_PROT;
- explan = ELS_EXPL_INV_LEN;
- } else {
- plen = ntohs(pp->prli.prli_len);
- if ((plen % 4) != 0 || plen > len) {
- reason = ELS_RJT_PROT;
- explan = ELS_EXPL_INV_LEN;
- } else if (plen < len) {
- len = plen;
- }
- plen = pp->prli.prli_spp_len;
- if ((plen % 4) != 0 || plen < sizeof(*spp) ||
- plen > len || len < sizeof(*pp)) {
- reason = ELS_RJT_PROT;
- explan = ELS_EXPL_INV_LEN;
- }
- rspp = &pp->spp;
+ if (!pp)
+ goto reject_len;
+ plen = ntohs(pp->prli.prli_len);
+ if ((plen % 4) != 0 || plen > len || plen < 16)
+ goto reject_len;
+ if (plen < len)
+ len = plen;
+ plen = pp->prli.prli_spp_len;
+ if ((plen % 4) != 0 || plen < sizeof(*spp) ||
+ plen > len || len < sizeof(*pp) || plen < 12)
+ goto reject_len;
+ rspp = &pp->spp;
+
+ fp = fc_frame_alloc(lport, len);
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto reject;
}
- if (reason != ELS_RJT_NONE ||
- (fp = fc_frame_alloc(lport, len)) == NULL) {
- rjt_data.reason = reason;
- rjt_data.explan = explan;
- lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
- } else {
- sp = lport->tt.seq_start_next(sp);
- WARN_ON(!sp);
- pp = fc_frame_payload_get(fp, len);
- WARN_ON(!pp);
- memset(pp, 0, len);
- pp->prli.prli_cmd = ELS_LS_ACC;
- pp->prli.prli_spp_len = plen;
- pp->prli.prli_len = htons(len);
- len -= sizeof(struct fc_els_prli);
-
- /* reinitialize remote port roles */
- rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
-
- /*
- * Go through all the service parameter pages and build
- * response. If plen indicates longer SPP than standard,
- * use that. The entire response has been pre-cleared above.
- */
- spp = &pp->spp;
- while (len >= plen) {
- spp->spp_type = rspp->spp_type;
- spp->spp_type_ext = rspp->spp_type_ext;
- spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
- resp = FC_SPP_RESP_ACK;
- if (rspp->spp_flags & FC_SPP_RPA_VAL)
- resp = FC_SPP_RESP_NO_PA;
- switch (rspp->spp_type) {
- case 0: /* common to all FC-4 types */
- break;
- case FC_TYPE_FCP:
- fcp_parm = ntohl(rspp->spp_params);
- if (fcp_parm & FCP_SPPF_RETRY)
- rdata->flags |= FC_RP_FLAGS_RETRY;
- rdata->supported_classes = FC_COS_CLASS3;
- if (fcp_parm & FCP_SPPF_INIT_FCN)
- roles |= FC_RPORT_ROLE_FCP_INITIATOR;
- if (fcp_parm & FCP_SPPF_TARG_FCN)
- roles |= FC_RPORT_ROLE_FCP_TARGET;
- rdata->ids.roles = roles;
-
- spp->spp_params =
- htonl(lport->service_params);
- break;
- default:
- resp = FC_SPP_RESP_INVL;
- break;
- }
- spp->spp_flags |= resp;
- len -= plen;
- rspp = (struct fc_els_spp *)((char *)rspp + plen);
- spp = (struct fc_els_spp *)((char *)spp + plen);
- }
+ sp = lport->tt.seq_start_next(sp);
+ WARN_ON(!sp);
+ pp = fc_frame_payload_get(fp, len);
+ WARN_ON(!pp);
+ memset(pp, 0, len);
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = plen;
+ pp->prli.prli_len = htons(len);
+ len -= sizeof(struct fc_els_prli);
- /*
- * Send LS_ACC. If this fails, the originator should retry.
- */
- f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
- f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
- ep = fc_seq_exch(sp);
- fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
- FC_TYPE_ELS, f_ctl, 0);
- lport->tt.seq_send(lport, sp, fp);
+ /* reinitialize remote port roles */
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
- /*
- * Get lock and re-check state.
- */
- switch (rdata->rp_state) {
- case RPORT_ST_PRLI:
- fc_rport_enter_ready(rdata);
+ /*
+ * Go through all the service parameter pages and build
+ * response. If plen indicates longer SPP than standard,
+ * use that. The entire response has been pre-cleared above.
+ */
+ spp = &pp->spp;
+ while (len >= plen) {
+ spp->spp_type = rspp->spp_type;
+ spp->spp_type_ext = rspp->spp_type_ext;
+ spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+ resp = FC_SPP_RESP_ACK;
+
+ switch (rspp->spp_type) {
+ case 0: /* common to all FC-4 types */
break;
- case RPORT_ST_READY:
- case RPORT_ST_ADISC:
+ case FC_TYPE_FCP:
+ fcp_parm = ntohl(rspp->spp_params);
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ rdata->supported_classes = FC_COS_CLASS3;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+ rdata->ids.roles = roles;
+
+ spp->spp_params = htonl(lport->service_params);
break;
default:
+ resp = FC_SPP_RESP_INVL;
break;
}
+ spp->spp_flags |= resp;
+ len -= plen;
+ rspp = (struct fc_els_spp *)((char *)rspp + plen);
+ spp = (struct fc_els_spp *)((char *)spp + plen);
+ }
+
+ /*
+ * Send LS_ACC. If this fails, the originator should retry.
+ */
+ f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+ f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+ ep = fc_seq_exch(sp);
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ FC_TYPE_ELS, f_ctl, 0);
+ lport->tt.seq_send(lport, sp, fp);
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_PRLI:
+ fc_rport_enter_ready(rdata);
+ break;
+ default:
+ break;
}
+ goto drop;
+
+reject_len:
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+reject:
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+drop:
fc_frame_free(rx_fp);
}
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 5c92620..8eeb39f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -421,7 +421,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
struct iscsi_conn *conn = tcp_conn->iscsi_conn;
struct hash_desc *rx_hash = NULL;
- if (conn->datadgst_en &
+ if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
rx_hash = tcp_conn->rx_hash;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 88f7446..8c496b5 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -395,12 +395,13 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
void sas_ata_task_abort(struct sas_task *task)
{
struct ata_queued_cmd *qc = task->uldd_task;
- struct request_queue *q = qc->scsicmd->device->request_queue;
struct completion *waiting;
- unsigned long flags;
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
+ struct request_queue *q = qc->scsicmd->device->request_queue;
+ unsigned long flags;
+
spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(qc->scsicmd->request);
spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index b71b6d4..a7890c6 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -1030,8 +1030,6 @@ int __sas_task_abort(struct sas_task *task)
void sas_task_abort(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
- struct request_queue *q = sc->device->request_queue;
- unsigned long flags;
/* Escape for libsas internal commands */
if (!sc) {
@@ -1043,13 +1041,15 @@ void sas_task_abort(struct sas_task *task)
if (dev_is_sata(task->dev)) {
sas_ata_task_abort(task);
- return;
- }
+ } else {
+ struct request_queue *q = sc->device->request_queue;
+ unsigned long flags;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_abort_request(sc->request);
- spin_unlock_irqrestore(q->queue_lock, flags);
- scsi_schedule_eh(sc->device->host);
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_abort_request(sc->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ scsi_schedule_eh(sc->device->host);
+ }
}
int sas_slave_alloc(struct scsi_device *scsi_dev)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 565e16d..e35a4c7 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -310,7 +310,9 @@ struct lpfc_vport {
#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
#define FC_FABRIC 0x100 /* We are fabric attached */
+#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
+#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
@@ -554,6 +556,7 @@ struct lpfc_hba {
struct lpfc_dmabuf slim2p;
MAILBOX_t *mbox;
+ uint32_t *mbox_ext;
uint32_t *inb_ha_copy;
uint32_t *inb_counter;
uint32_t inb_last_counter;
@@ -622,6 +625,7 @@ struct lpfc_hba {
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
uint32_t cfg_enable_bg;
+ uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
uint32_t cfg_suppress_link_up;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1849e33..bf33b31 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -869,6 +869,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
+ uint32_t max_vpi;
/*
* prevent udev from issuing mailbox commands until the port is
@@ -916,11 +917,17 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
if (axri)
*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
phba->sli4_hba.max_cfg_param.xri_used;
+
+ /* Account for differences with SLI-3. Get vpi count from
+ * mailbox data and subtract one for max vpi value.
+ */
+ max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
+ (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+
if (mvpi)
- *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+ *mvpi = max_vpi;
if (avpi)
- *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
- phba->sli4_hba.max_cfg_param.vpi_used;
+ *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
} else {
if (mrpi)
*mrpi = pmb->un.varRdConfig.max_rpi;
@@ -1925,13 +1932,12 @@ MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
-int lpfc_enable_npiv = 0;
+int lpfc_enable_npiv = 1;
module_param(lpfc_enable_npiv, int, 0);
MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
lpfc_param_show(enable_npiv);
-lpfc_param_init(enable_npiv, 0, 0, 1);
-static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
- lpfc_enable_npiv_show, NULL);
+lpfc_param_init(enable_npiv, 1, 0, 1);
+static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
/*
# lpfc_suppress_link_up: Bring link up at initialization
@@ -2637,6 +2643,7 @@ static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
/**
* sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
+ * @filp: sysfs file
* @kobj: Pointer to the kernel object
* @bin_attr: Attribute object
* @buff: Buffer pointer
@@ -2648,7 +2655,8 @@ static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
* applications.
**/
static ssize_t
-sysfs_drvr_stat_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device,
@@ -3356,6 +3364,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
/**
* sysfs_ctlreg_write - Write method for writing to ctlreg
+ * @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: contains the data to be written to the adapter IOREG space.
@@ -3373,7 +3382,8 @@ struct device_attribute *lpfc_vport_attrs[] = {
* value of count, buf contents written
**/
static ssize_t
-sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
@@ -3409,6 +3419,7 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* sysfs_ctlreg_read - Read method for reading from ctlreg
+ * @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: if successful contains the data from the adapter IOREG space.
@@ -3425,7 +3436,8 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
* value of count, buf contents read
**/
static ssize_t
-sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
@@ -3490,6 +3502,7 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
/**
* sysfs_mbox_write - Write method for writing information via mbox
+ * @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: contains the data to be written to sysfs mbox.
@@ -3510,7 +3523,8 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
* count number of bytes transferred
**/
static ssize_t
-sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_mbox_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -3565,6 +3579,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* sysfs_mbox_read - Read method for reading information via mbox
+ * @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: contains the data to be read from sysfs mbox.
@@ -3587,7 +3602,8 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
* count number of bytes transferred
**/
static ssize_t
-sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_mbox_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index d62b3e4..dcf0882 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -79,6 +79,12 @@ struct lpfc_bsg_iocb {
struct lpfc_bsg_mbox {
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *mb;
+ struct lpfc_dmabuf *rxbmp; /* for BIU diags */
+ struct lpfc_dmabufext *dmp; /* for BIU diags */
+ uint8_t *ext; /* extended mailbox data */
+ uint32_t mbOffset; /* from app */
+ uint32_t inExtWLen; /* from app */
+ uint32_t outExtWLen; /* from app */
/* job waiting for this mbox command to finish */
struct fc_bsg_job *set_job;
@@ -1708,21 +1714,26 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (dmabuf) {
dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
- INIT_LIST_HEAD(&dmabuf->list);
- bpl = (struct ulp_bde64 *) dmabuf->virt;
- memset(bpl, 0, sizeof(*bpl));
- ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
- bpl->addrHigh =
- le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
- bpl->addrLow =
- le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ if (dmabuf->virt) {
+ INIT_LIST_HEAD(&dmabuf->list);
+ bpl = (struct ulp_bde64 *) dmabuf->virt;
+ memset(bpl, 0, sizeof(*bpl));
+ ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
+ bpl->addrHigh =
+ le32_to_cpu(putPaddrHigh(dmabuf->phys +
+ sizeof(*bpl)));
+ bpl->addrLow =
+ le32_to_cpu(putPaddrLow(dmabuf->phys +
+ sizeof(*bpl)));
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ }
}
if (cmdiocbq == NULL || rspiocbq == NULL ||
- dmabuf == NULL || bpl == NULL || ctreq == NULL) {
+ dmabuf == NULL || bpl == NULL || ctreq == NULL ||
+ dmabuf->virt == NULL) {
ret_val = ENOMEM;
goto err_get_xri_exit;
}
@@ -1918,9 +1929,11 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (rxbmp != NULL) {
rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+ if (rxbmp->virt) {
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+ }
}
if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
@@ -2174,14 +2187,16 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
if (txbmp) {
txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
- INIT_LIST_HEAD(&txbmp->list);
- txbpl = (struct ulp_bde64 *) txbmp->virt;
- if (txbpl)
+ if (txbmp->virt) {
+ INIT_LIST_HEAD(&txbmp->list);
+ txbpl = (struct ulp_bde64 *) txbmp->virt;
txbuffer = diag_cmd_data_alloc(phba,
txbpl, full_size, 0);
+ }
}
- if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
+ if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
+ !txbmp->virt) {
rc = -ENOMEM;
goto err_loopback_test_exit;
}
@@ -2377,35 +2392,90 @@ void
lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- MAILBOX_t *pmb;
- MAILBOX_t *mb;
struct fc_bsg_job *job;
uint32_t size;
unsigned long flags;
+ uint8_t *to;
+ uint8_t *from;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
+ /* job already timed out? */
if (!dd_data) {
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return;
}
- pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
- mb = dd_data->context_un.mbox.mb;
+ /* build the outgoing buffer to do an sg copy
+ * the format is the response mailbox followed by any extended
+ * mailbox data
+ */
+ from = (uint8_t *)&pmboxq->u.mb;
+ to = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(to, from, sizeof(MAILBOX_t));
+ if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
+ /* copy the extended data if any, count is in words */
+ if (dd_data->context_un.mbox.outExtWLen) {
+ from = (uint8_t *)dd_data->context_un.mbox.ext;
+ to += sizeof(MAILBOX_t);
+ size = dd_data->context_un.mbox.outExtWLen *
+ sizeof(uint32_t);
+ memcpy(to, from, size);
+ } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
+ from = (uint8_t *)dd_data->context_un.mbox.
+ dmp->dma.virt;
+ to += sizeof(MAILBOX_t);
+ size = dd_data->context_un.mbox.dmp->size;
+ memcpy(to, from, size);
+ } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
+ from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
+ virt;
+ to += sizeof(MAILBOX_t);
+ size = pmboxq->u.mb.un.varWords[5];
+ memcpy(to, from, size);
+ } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
+ from = (uint8_t *)dd_data->context_un.
+ mbox.dmp->dma.virt;
+ to += sizeof(MAILBOX_t);
+ size = dd_data->context_un.mbox.dmp->size;
+ memcpy(to, from, size);
+ }
+ }
+
+ from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
- memcpy(mb, pmb, sizeof(*pmb));
- size = job->request_payload.payload_len;
+ size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
- mb, size);
+ from, size);
job->reply->result = 0;
+
dd_data->context_un.mbox.set_job = NULL;
job->dd_data = NULL;
job->job_done(job);
+ /* need to hold the lock until we call job done to hold off
+ * the timeout handler returning to the midlayer while
+ * we are stillprocessing the job
+ */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ kfree(dd_data->context_un.mbox.mb);
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
- kfree(mb);
+ kfree(dd_data->context_un.mbox.ext);
+ if (dd_data->context_un.mbox.dmp) {
+ dma_free_coherent(&phba->pcidev->dev,
+ dd_data->context_un.mbox.dmp->size,
+ dd_data->context_un.mbox.dmp->dma.virt,
+ dd_data->context_un.mbox.dmp->dma.phys);
+ kfree(dd_data->context_un.mbox.dmp);
+ }
+ if (dd_data->context_un.mbox.rxbmp) {
+ lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
+ dd_data->context_un.mbox.rxbmp->phys);
+ kfree(dd_data->context_un.mbox.rxbmp);
+ }
kfree(dd_data);
return;
}
@@ -2464,10 +2534,12 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
case MBX_SET_DEBUG:
case MBX_WRITE_WWN:
case MBX_SLI4_CONFIG:
+ case MBX_READ_EVENT_LOG:
case MBX_READ_EVENT_LOG_STATUS:
case MBX_WRITE_EVENT_LOG:
case MBX_PORT_CAPABILITIES:
case MBX_PORT_IOV_CONTROL:
+ case MBX_RUN_BIU_DIAG64:
break;
case MBX_SET_VARIABLE:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -2482,8 +2554,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
phba->fc_topology = TOPOLOGY_PT_PT;
}
break;
- case MBX_RUN_BIU_DIAG64:
- case MBX_READ_EVENT_LOG:
case MBX_READ_SPARM64:
case MBX_READ_LA:
case MBX_READ_LA64:
@@ -2518,97 +2588,365 @@ static uint32_t
lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_vport *vport)
{
- LPFC_MBOXQ_t *pmboxq;
- MAILBOX_t *pmb;
- MAILBOX_t *mb;
- struct bsg_job_data *dd_data;
+ LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
+ MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
+ /* a 4k buffer to hold the mb and extended data from/to the bsg */
+ MAILBOX_t *mb = NULL;
+ struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
uint32_t size;
+ struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
+ struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
+ struct ulp_bde64 *rxbpl = NULL;
+ struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ uint8_t *ext = NULL;
int rc = 0;
+ uint8_t *from;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ /* check if requested extended data lengths are valid */
+ if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
+ (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2727 Failed allocation of dd_data\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
- mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
if (!mb) {
- kfree(dd_data);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
- kfree(dd_data);
- kfree(mb);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
size = job->request_payload.payload_len;
- job->reply->reply_payload_rcv_len =
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- mb, size);
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ mb, size);
rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
- if (rc != 0) {
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
- return rc; /* must be negative */
- }
+ if (rc != 0)
+ goto job_done; /* must be negative */
- memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmb = &pmboxq->u.mb;
memcpy(pmb, mb, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
- pmboxq->context1 = NULL;
pmboxq->vport = vport;
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
- rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT) {
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
+ /* If HBA encountered an error attention, allow only DUMP
+ * or RESTART mailbox commands until the HBA is restarted.
+ */
+ if (phba->pport->stopped &&
+ pmb->mbxCommand != MBX_DUMP_MEMORY &&
+ pmb->mbxCommand != MBX_RESTART &&
+ pmb->mbxCommand != MBX_WRITE_VPARMS &&
+ pmb->mbxCommand != MBX_WRITE_WWN)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "2797 mbox: Issued mailbox cmd "
+ "0x%x while in stopped state.\n",
+ pmb->mbxCommand);
+
+ /* Don't allow mailbox commands to be sent when blocked
+ * or when in the middle of discovery
+ */
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ rc = -EAGAIN;
+ goto job_done;
+ }
+
+ /* extended mailbox commands will need an extended buffer */
+ if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
+ ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
+ if (!ext) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ /* any data for the device? */
+ if (mbox_req->inExtWLen) {
+ from = (uint8_t *)mb;
+ from += sizeof(MAILBOX_t);
+ memcpy((uint8_t *)ext, from,
+ mbox_req->inExtWLen * sizeof(uint32_t));
+ }
+
+ pmboxq->context2 = ext;
+ pmboxq->in_ext_byte_len =
+ mbox_req->inExtWLen *
+ sizeof(uint32_t);
+ pmboxq->out_ext_byte_len =
+ mbox_req->outExtWLen *
+ sizeof(uint32_t);
+ pmboxq->mbox_offset_word =
+ mbox_req->mbOffset;
+ pmboxq->context2 = ext;
+ pmboxq->in_ext_byte_len =
+ mbox_req->inExtWLen * sizeof(uint32_t);
+ pmboxq->out_ext_byte_len =
+ mbox_req->outExtWLen * sizeof(uint32_t);
+ pmboxq->mbox_offset_word = mbox_req->mbOffset;
+ }
+
+ /* biu diag will need a kernel buffer to transfer the data
+ * allocate our own buffer and setup the mailbox command to
+ * use ours
+ */
+ if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
+ uint32_t transmit_length = pmb->un.varWords[1];
+ uint32_t receive_length = pmb->un.varWords[4];
+ /* transmit length cannot be greater than receive length or
+ * mailbox extension size
+ */
+ if ((transmit_length > receive_length) ||
+ (transmit_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ if (!rxbmp->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
+ putPaddrHigh(dmp->dma.phys);
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
+ putPaddrLow(dmp->dma.phys);
+
+ pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
+ putPaddrHigh(dmp->dma.phys +
+ pmb->un.varBIUdiag.un.s2.
+ xmit_bde64.tus.f.bdeSize);
+ pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
+ putPaddrLow(dmp->dma.phys +
+ pmb->un.varBIUdiag.un.s2.
+ xmit_bde64.tus.f.bdeSize);
+
+ /* copy the transmit data found in the mailbox extension area */
+ from = (uint8_t *)mb;
+ from += sizeof(MAILBOX_t);
+ memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
+ } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
+ struct READ_EVENT_LOG_VAR *rdEventLog =
+ &pmb->un.varRdEventLog ;
+ uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
+ uint32_t mode = bf_get(lpfc_event_log, rdEventLog);
+
+ /* receive length cannot be greater than mailbox
+ * extension size
+ */
+ if (receive_length > MAILBOX_EXT_SIZE) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ /* mode zero uses a bde like biu diags command */
+ if (mode == 0) {
+
+ /* rebuild the command for sli4 using our own buffers
+ * like we do for biu diags
+ */
+
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
}
- return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ if (rxbpl) {
+ INIT_LIST_HEAD(&rxbmp->list);
+ dmp = diag_cmd_data_alloc(phba, rxbpl,
+ receive_length, 0);
+ }
+
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
+ pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
}
+ } else if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
+ /* rebuild the command for sli4 using our own buffers
+ * like we do for biu diags
+ */
+ uint32_t receive_length = pmb->un.varWords[2];
+ /* receive length cannot be greater than mailbox
+ * extension size
+ */
+ if ((receive_length == 0) ||
+ (receive_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
- memcpy(mb, pmb, sizeof(*pmb));
- job->reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- mb, size);
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
- /* not waiting mbox already done */
- return 0;
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ if (!rxbmp->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
+ 0);
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
+ pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
+ pmb->un.varUpdateCfg.co) {
+ struct ulp_bde64 *bde =
+ (struct ulp_bde64 *)&pmb->un.varWords[4];
+
+ /* bde size cannot be greater than mailbox ext size */
+ if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ if (!rxbmp->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ dmp = diag_cmd_data_alloc(phba, rxbpl,
+ bde->tus.f.bdeSize, 0);
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ bde->addrHigh = putPaddrHigh(dmp->dma.phys);
+ bde->addrLow = putPaddrLow(dmp->dma.phys);
+
+ /* copy the transmit data found in the mailbox
+ * extension area
+ */
+ from = (uint8_t *)mb;
+ from += sizeof(MAILBOX_t);
+ memcpy((uint8_t *)dmp->dma.virt, from,
+ bde->tus.f.bdeSize);
+ }
}
+ dd_data->context_un.mbox.rxbmp = rxbmp;
+ dd_data->context_un.mbox.dmp = dmp;
+
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = mb;
dd_data->context_un.mbox.set_job = job;
+ dd_data->context_un.mbox.ext = ext;
+ dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
+ dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
+ dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
job->dd_data = dd_data;
+
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+ goto job_done;
+ }
+
+ /* job finished, copy the data */
+ memcpy(mb, pmb, sizeof(*pmb));
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ mb, size);
+ /* not waiting mbox already done */
+ rc = 0;
+ goto job_done;
+ }
+
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
- if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
- kfree(dd_data);
- kfree(mb);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
+ return 1; /* job started */
+
+job_done:
+ /* common exit for error or job completed inline */
+ kfree(mb);
+ if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
- return -EIO;
+ kfree(ext);
+ if (dmp) {
+ dma_free_coherent(&phba->pcidev->dev,
+ dmp->size, dmp->dma.virt,
+ dmp->dma.phys);
+ kfree(dmp);
}
+ if (rxbmp) {
+ lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
+ kfree(rxbmp);
+ }
+ kfree(dd_data);
- return 1;
+ return rc;
}
/**
@@ -2633,7 +2971,12 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
goto job_error;
}
- if (job->request_payload.payload_len != PAGE_SIZE) {
+ if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
rc = -EINVAL;
goto job_error;
}
@@ -3094,6 +3437,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
job->dd_data = NULL;
job->reply->reply_payload_rcv_len = 0;
job->reply->result = -EAGAIN;
+ /* the mbox completion handler can now be run */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 5bc6308..a2c33e7 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -91,11 +91,12 @@ struct get_mgmt_rev_reply {
struct MgmtRevInfo info;
};
+#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
struct dfc_mbox_req {
uint32_t command;
+ uint32_t mbOffset;
uint32_t inExtWLen;
uint32_t outExtWLen;
- uint8_t mbOffset;
};
/* Used for menlo command or menlo data. The xri is only used for menlo data */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 5087c42..fbc9bae 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -65,6 +65,7 @@ void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
+void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 2851d75ff..36257a6 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -38,6 +38,7 @@ enum lpfc_work_type {
LPFC_EVT_ELS_RETRY,
LPFC_EVT_DEV_LOSS,
LPFC_EVT_FASTPATH_MGMT_EVT,
+ LPFC_EVT_RESET_HBA,
};
/* structure used to queue event to the discovery tasklet */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 5fbdb22..c4c7f0a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -584,6 +584,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vport, np);
}
+ lpfc_cleanup_pending_mbox(vport);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
@@ -864,6 +865,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+ vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
spin_unlock_irq(shost->host_lock);
/*
@@ -893,11 +895,14 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (!rc) {
/* Mark the FCF discovery process done */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
- "2769 FLOGI successful on FCF record: "
- "current_fcf_index:x%x, terminate FCF "
- "round robin failover process\n",
- phba->fcf.current_rec.fcf_indx);
+ if (phba->hba_flag & HBA_FIP_SUPPORT)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
+ LOG_ELS,
+ "2769 FLOGI successful on FCF "
+ "record: current_fcf_index:"
+ "x%x, terminate FCF round "
+ "robin failover process\n",
+ phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
@@ -5366,7 +5371,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
sizeof(struct lpfc_name));
pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
cmdiocbp->context2)->virt);
- lsrjt_event.command = *pcmd;
+ lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
@@ -6050,7 +6055,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- if (vport->port_type == LPFC_PHYSICAL_PORT)
+ if (vport->port_type == LPFC_PHYSICAL_PORT
+ && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
lpfc_initial_flogi(vport);
else
lpfc_initial_fdisc(vport);
@@ -6286,6 +6292,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+ vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
vport->fc_flag |= FC_FABRIC;
if (vport->phba->fc_topology == TOPOLOGY_LOOP)
vport->fc_flag |= FC_PUBLIC_LOOP;
@@ -6310,11 +6317,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vport, np);
}
+ lpfc_cleanup_pending_mbox(vport);
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
if (phba->sli_rev == LPFC_SLI_REV4)
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ else
+ vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
spin_unlock_irq(shost->host_lock);
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e1466ee..1f87b4f 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -475,6 +475,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
lpfc_send_fastpath_evt(phba, evtp);
free_evt = 0;
break;
+ case LPFC_EVT_RESET_HBA:
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_reset_hba(phba);
+ break;
}
if (free_evt)
kfree(evtp);
@@ -1531,7 +1535,37 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
}
/**
- * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
+ * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_cnt: number of eligible fcf record seen so far.
+ *
+ * This function makes an running random selection decision on FCF record to
+ * use through a sequence of @fcf_cnt eligible FCF records with equal
+ * probability. To perform integer manunipulation of random numbers with
+ * size unit32_t, the lower 16 bits of the 32-bit random number returned
+ * from random32() are taken as the random random number generated.
+ *
+ * Returns true when outcome is for the newly read FCF record should be
+ * chosen; otherwise, return false when outcome is for keeping the previously
+ * chosen FCF record.
+ **/
+static bool
+lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
+{
+ uint32_t rand_num;
+
+ /* Get 16-bit uniform random number */
+ rand_num = (0xFFFF & random32());
+
+ /* Decision with probability 1/fcf_cnt */
+ if ((fcf_cnt * rand_num) < 0xFFFF)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
* @next_fcf_index: pointer to holder of next fcf index.
@@ -1592,7 +1626,9 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
new_fcf_record = (struct fcf_record *)(virt_addr +
sizeof(struct lpfc_mbx_read_fcf_tbl));
lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
- sizeof(struct fcf_record));
+ offsetof(struct fcf_record, vlan_bitmap));
+ new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
+ new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
return new_fcf_record;
}
@@ -1679,6 +1715,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint16_t fcf_index, next_fcf_index;
struct lpfc_fcf_rec *fcf_rec = NULL;
uint16_t vlan_id;
+ uint32_t seed;
+ bool select_new_fcf;
int rc;
/* If there is pending FCoE event restart FCF table scan */
@@ -1809,9 +1847,21 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* than the driver FCF record, use the new record.
*/
if (new_fcf_record->fip_priority < fcf_rec->priority) {
- /* Choose this FCF record */
+ /* Choose the new FCF record with lower priority */
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
addr_mode, vlan_id, 0);
+ /* Reset running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt = 1;
+ } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
+ /* Update running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt++;
+ select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
+ phba->fcf.eligible_fcf_cnt);
+ if (select_new_fcf)
+ /* Choose the new FCF by random selection */
+ __lpfc_update_fcf_record(phba, fcf_rec,
+ new_fcf_record,
+ addr_mode, vlan_id, 0);
}
spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
@@ -1825,6 +1875,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
addr_mode, vlan_id, (boot_flag ?
BOOT_ENABLE : 0));
phba->fcf.fcf_flag |= FCF_AVAILABLE;
+ /* Setup initial running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt = 1;
+ /* Seeding the random number generator for random selection */
+ seed = (uint32_t)(0xFFFFFFFF & jiffies);
+ srandom32(seed);
}
spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
@@ -2686,11 +2741,18 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
switch (mb->mbxStatus) {
case 0x0011:
case 0x0020:
- case 0x9700:
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0911 cmpl_unreg_vpi, mb status = 0x%x\n",
mb->mbxStatus);
break;
+ /* If VPI is busy, reset the HBA */
+ case 0x9700:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
+ vport->vpi, mb->mbxStatus);
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_workq_post_event(phba, NULL, NULL,
+ LPFC_EVT_RESET_HBA);
}
spin_lock_irq(shost->host_lock);
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
@@ -2965,7 +3027,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
- lpfc_start_fdiscs(phba);
+ /* when physical port receive logo donot start
+ * vport discovery */
+ if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+ lpfc_start_fdiscs(phba);
+ else
+ vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -3177,7 +3244,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (new_state == NLP_STE_UNMAPPED_NODE) {
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
ndlp->nlp_type |= NLP_FC_NODE;
}
@@ -4935,6 +5001,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
if (ndlp)
lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+ lpfc_cleanup_pending_mbox(vports[i]);
lpfc_mbx_unreg_vpi(vports[i]);
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 89ff7c0..e654d01 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1565,95 +1565,83 @@ enum lpfc_protgrp_type {
};
/* PDE Descriptors */
-#define LPFC_PDE1_DESCRIPTOR 0x81
-#define LPFC_PDE2_DESCRIPTOR 0x82
-#define LPFC_PDE3_DESCRIPTOR 0x83
-
-/* BlockGuard Profiles */
-enum lpfc_bg_prof_codes {
- LPFC_PROF_INVALID,
- LPFC_PROF_A1 = 128, /* Full Protection */
- LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */
- LPFC_PROF_A3,
- LPFC_PROF_A4,
- LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */
- LPFC_PROF_B2,
- LPFC_PROF_B3,
- LPFC_PROF_C1, /* Separate DIFs: C1~C3 */
- LPFC_PROF_C2,
- LPFC_PROF_C3,
- LPFC_PROF_D1, /* Full Protection */
- LPFC_PROF_D2, /* Partial Protection & Check Disabling */
- LPFC_PROF_D3,
- LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */
- LPFC_PROF_E2,
- LPFC_PROF_E3,
- LPFC_PROF_E4,
- LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */
- /* F1 Translation BDE */
- LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */
- LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */
- LPFC_PROF_ANT2,
- LPFC_PROF_AST2
+#define LPFC_PDE5_DESCRIPTOR 0x85
+#define LPFC_PDE6_DESCRIPTOR 0x86
+#define LPFC_PDE7_DESCRIPTOR 0x87
+
+/* BlockGuard Opcodes */
+#define BG_OP_IN_NODIF_OUT_CRC 0x0
+#define BG_OP_IN_CRC_OUT_NODIF 0x1
+#define BG_OP_IN_NODIF_OUT_CSUM 0x2
+#define BG_OP_IN_CSUM_OUT_NODIF 0x3
+#define BG_OP_IN_CRC_OUT_CRC 0x4
+#define BG_OP_IN_CSUM_OUT_CSUM 0x5
+#define BG_OP_IN_CRC_OUT_CSUM 0x6
+#define BG_OP_IN_CSUM_OUT_CRC 0x7
+
+struct lpfc_pde5 {
+ uint32_t word0;
+#define pde5_type_SHIFT 24
+#define pde5_type_MASK 0x000000ff
+#define pde5_type_WORD word0
+#define pde5_rsvd0_SHIFT 0
+#define pde5_rsvd0_MASK 0x00ffffff
+#define pde5_rsvd0_WORD word0
+ uint32_t reftag; /* Reference Tag Value */
+ uint32_t reftagtr; /* Reference Tag Translation Value */
};
-/* BlockGuard error-control defines */
-#define BG_EC_STOP_ERR 0x00
-#define BG_EC_CONT_ERR 0x01
-#define BG_EC_IGN_UNINIT_STOP_ERR 0x10
-#define BG_EC_IGN_UNINIT_CONT_ERR 0x11
-
-/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */
-#define PDE_DESC_TYPE_MASK 0xff000000
-#define PDE_DESC_TYPE_SHIFT 24
-#define PDE_BG_PROFILE_MASK 0x00ff0000
-#define PDE_BG_PROFILE_SHIFT 16
-#define PDE_BLOCK_LEN_MASK 0x0000fffc
-#define PDE_BLOCK_LEN_SHIFT 2
-#define PDE_ERR_CTRL_MASK 0x00000003
-#define PDE_ERR_CTRL_SHIFT 0
-/* PDE word 1 bit masks and shifts */
-#define PDE_APPTAG_MASK_MASK 0xffff0000
-#define PDE_APPTAG_MASK_SHIFT 16
-#define PDE_APPTAG_VAL_MASK 0x0000ffff
-#define PDE_APPTAG_VAL_SHIFT 0
-struct lpfc_pde {
- uint32_t parms; /* bitfields of descriptor, prof, len, and ec */
- uint32_t apptag; /* bitfields of app tag maskand app tag value */
- uint32_t reftag; /* reference tag occupying all 32 bits */
+struct lpfc_pde6 {
+ uint32_t word0;
+#define pde6_type_SHIFT 24
+#define pde6_type_MASK 0x000000ff
+#define pde6_type_WORD word0
+#define pde6_rsvd0_SHIFT 0
+#define pde6_rsvd0_MASK 0x00ffffff
+#define pde6_rsvd0_WORD word0
+ uint32_t word1;
+#define pde6_rsvd1_SHIFT 26
+#define pde6_rsvd1_MASK 0x0000003f
+#define pde6_rsvd1_WORD word1
+#define pde6_na_SHIFT 25
+#define pde6_na_MASK 0x00000001
+#define pde6_na_WORD word1
+#define pde6_rsvd2_SHIFT 16
+#define pde6_rsvd2_MASK 0x000001FF
+#define pde6_rsvd2_WORD word1
+#define pde6_apptagtr_SHIFT 0
+#define pde6_apptagtr_MASK 0x0000ffff
+#define pde6_apptagtr_WORD word1
+ uint32_t word2;
+#define pde6_optx_SHIFT 28
+#define pde6_optx_MASK 0x0000000f
+#define pde6_optx_WORD word2
+#define pde6_oprx_SHIFT 24
+#define pde6_oprx_MASK 0x0000000f
+#define pde6_oprx_WORD word2
+#define pde6_nr_SHIFT 23
+#define pde6_nr_MASK 0x00000001
+#define pde6_nr_WORD word2
+#define pde6_ce_SHIFT 22
+#define pde6_ce_MASK 0x00000001
+#define pde6_ce_WORD word2
+#define pde6_re_SHIFT 21
+#define pde6_re_MASK 0x00000001
+#define pde6_re_WORD word2
+#define pde6_ae_SHIFT 20
+#define pde6_ae_MASK 0x00000001
+#define pde6_ae_WORD word2
+#define pde6_ai_SHIFT 19
+#define pde6_ai_MASK 0x00000001
+#define pde6_ai_WORD word2
+#define pde6_bs_SHIFT 16
+#define pde6_bs_MASK 0x00000007
+#define pde6_bs_WORD word2
+#define pde6_apptagval_SHIFT 0
+#define pde6_apptagval_MASK 0x0000ffff
+#define pde6_apptagval_WORD word2
};
-/* inline function to set fields in parms of PDE */
-static inline void
-lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec)
-{
- uint32_t *wp = &p->parms;
-
- /* spec indicates that adapter appends two 0's to length field */
- len = len >> 2;
-
- *wp &= 0;
- *wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK);
- *wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK);
- *wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK);
- *wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK);
- *wp = le32_to_cpu(*wp);
-}
-
-/* inline function to set apptag and reftag fields of PDE */
-static inline void
-lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval,
- u32 reftag)
-{
- uint32_t *wp = &p->apptag;
- *wp &= 0;
- *wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK);
- *wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK);
- *wp = le32_to_cpu(*wp);
- wp = &p->reftag;
- *wp = le32_to_cpu(reftag);
-}
-
/* Structure for MB Command LOAD_SM and DOWN_LOAD */
@@ -1744,6 +1732,17 @@ typedef struct {
} un;
} BIU_DIAG_VAR;
+/* Structure for MB command READ_EVENT_LOG (0x38) */
+struct READ_EVENT_LOG_VAR {
+ uint32_t word1;
+#define lpfc_event_log_SHIFT 29
+#define lpfc_event_log_MASK 0x00000001
+#define lpfc_event_log_WORD word1
+#define USE_MAILBOX_RESPONSE 1
+ uint32_t offset;
+ struct ulp_bde64 rcv_bde64;
+};
+
/* Structure for MB Command INIT_LINK (05) */
typedef struct {
@@ -2487,8 +2486,8 @@ typedef struct {
#define DMP_VPORT_REGION_SIZE 0x200
#define DMP_MBOX_OFFSET_WORD 0x5
-#define DMP_REGION_23 0x17 /* fcoe param and port state region */
-#define DMP_RGN23_SIZE 0x400
+#define DMP_REGION_23 0x17 /* fcoe param and port state region */
+#define DMP_RGN23_SIZE 0x400
#define WAKE_UP_PARMS_REGION_ID 4
#define WAKE_UP_PARMS_WORD_SIZE 15
@@ -2503,9 +2502,9 @@ struct vport_rec {
#define VPORT_INFO_REV 0x1
#define MAX_STATIC_VPORT_COUNT 16
struct static_vport_info {
- uint32_t signature;
+ uint32_t signature;
uint32_t rev;
- struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
+ struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
uint32_t resvd[66];
};
@@ -2934,6 +2933,12 @@ typedef struct {
/* Union of all Mailbox Command types */
#define MAILBOX_CMD_WSIZE 32
#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
+/* ext_wsize times 4 bytes should not be greater than max xmit size */
+#define MAILBOX_EXT_WSIZE 512
+#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
+#define MAILBOX_HBA_EXT_OFFSET 0x100
+/* max mbox xmit size is a page size for sysfs IO operations */
+#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE
typedef union {
uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
@@ -2972,6 +2977,9 @@ typedef union {
REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
+ struct READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38
+ * (READ_EVENT_LOG)
+ */
struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
} MAILVARIANTS;
@@ -3652,7 +3660,8 @@ typedef struct _IOCB { /* IOCB structure */
/* Maximum IOCBs that will fit in SLI2 slim */
#define MAX_SLI2_IOCB 498
#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
- (sizeof(MAILBOX_t) + sizeof(PCB_t)))
+ (sizeof(MAILBOX_t) + sizeof(PCB_t) + \
+ sizeof(uint32_t) * MAILBOX_EXT_WSIZE))
/* HBQ entries are 4 words each = 4k */
#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
@@ -3660,6 +3669,7 @@ typedef struct _IOCB { /* IOCB structure */
struct lpfc_sli2_slim {
MAILBOX_t mbx;
+ uint32_t mbx_ext_words[MAILBOX_EXT_WSIZE];
PCB_t pcb;
IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
};
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 820015f..bbdcf96 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -41,8 +41,14 @@
* Or clear that bit field:
* bf_set(example_bit_field, &t1, 0);
*/
+#define bf_get_le32(name, ptr) \
+ ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
#define bf_get(name, ptr) \
(((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bf_set_le32(name, ptr, value) \
+ ((ptr)->name##_WORD = cpu_to_le32(((((value) & \
+ name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
+ ~(name##_MASK << name##_SHIFT)))))
#define bf_set(name, ptr, value) \
((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
@@ -781,6 +787,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
+#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
/* FCoE Opcodes */
#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -1102,6 +1109,39 @@ struct lpfc_mbx_mq_create {
} u;
};
+struct lpfc_mbx_mq_create_ext {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0
+#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_ext_num_pages_WORD word0
+ uint32_t async_evt_bmap;
+#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
+#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_SHIFT LPFC_TRAILER_CODE_FCOE
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_WORD async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5
+#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap
+ struct mq_context context;
+ struct dma_address page[LPFC_MAX_MQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT 0
+#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD word0
+ } response;
+ } u;
+#define LPFC_ASYNC_EVENT_LINK_STATE 0x2
+#define LPFC_ASYNC_EVENT_FCF_STATE 0x4
+#define LPFC_ASYNC_EVENT_GROUP5 0x20
+};
+
struct lpfc_mbx_mq_destroy {
struct mbox_header header;
union {
@@ -1428,8 +1468,8 @@ struct lpfc_mbx_reg_vfi {
#define lpfc_reg_vfi_fcfi_WORD word2
uint32_t wwn[2];
struct ulp_bde64 bde;
- uint32_t word8_rsvd;
- uint32_t word9_rsvd;
+ uint32_t e_d_tov;
+ uint32_t r_a_tov;
uint32_t word10;
#define lpfc_reg_vfi_nport_id_SHIFT 0
#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
@@ -1940,6 +1980,7 @@ struct lpfc_mbx_sli4_params {
#define rdma_MASK 0x00000001
#define rdma_WORD word3
uint32_t sge_supp_len;
+#define SLI4_PAGE_SIZE 4096
uint32_t word5;
#define if_page_sz_SHIFT 0
#define if_page_sz_MASK 0x0000ffff
@@ -2041,6 +2082,7 @@ struct lpfc_mqe {
struct lpfc_mbx_reg_fcfi reg_fcfi;
struct lpfc_mbx_unreg_fcfi unreg_fcfi;
struct lpfc_mbx_mq_create mq_create;
+ struct lpfc_mbx_mq_create_ext mq_create_ext;
struct lpfc_mbx_eq_create eq_create;
struct lpfc_mbx_cq_create cq_create;
struct lpfc_mbx_wq_create wq_create;
@@ -2099,6 +2141,7 @@ struct lpfc_mcqe {
#define LPFC_TRAILER_CODE_LINK 0x1
#define LPFC_TRAILER_CODE_FCOE 0x2
#define LPFC_TRAILER_CODE_DCBX 0x3
+#define LPFC_TRAILER_CODE_GRP5 0x5
};
struct lpfc_acqe_link {
@@ -2168,6 +2211,19 @@ struct lpfc_acqe_dcbx {
uint32_t trailer;
};
+struct lpfc_acqe_grp5 {
+ uint32_t word0;
+#define lpfc_acqe_grp5_pport_SHIFT 0
+#define lpfc_acqe_grp5_pport_MASK 0x000000FF
+#define lpfc_acqe_grp5_pport_WORD word0
+ uint32_t word1;
+#define lpfc_acqe_grp5_llink_spd_SHIFT 16
+#define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF
+#define lpfc_acqe_grp5_llink_spd_WORD word1
+ uint32_t event_tag;
+ uint32_t trailer;
+};
+
/*
* Define the bootstrap mailbox (bmbx) region used to communicate
* mailbox command between the host and port. The mailbox consists
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 774663e..cd9697e 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2566,7 +2566,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) {
shost->dma_boundary =
- phba->sli4_hba.pc_sli4_params.sge_supp_len;
+ phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
shost->sg_tablesize = phba->cfg_sg_seg_cnt;
}
@@ -2600,15 +2600,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
init_timer(&vport->els_tmofunc);
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long)vport;
- if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
- phba->menlo_flag |= HBA_MENLO_SUPPORT;
- /* check for menlo minimum sg count */
- if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
- phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
- shost->sg_tablesize = phba->cfg_sg_seg_cnt;
- }
- }
-
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
goto out_put_shost;
@@ -3236,12 +3227,26 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
if (!vport)
return NULL;
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp)
- return NULL;
phba = vport->phba;
if (!phba)
return NULL;
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 0;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ /* Set the node type */
+ ndlp->nlp_type |= NLP_FABRIC;
+ /* Put ndlp onto node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 0;
+ }
if (phba->pport->port_state <= LPFC_FLOGI)
return NULL;
/* If virtual link is not yet instantiated ignore CVL */
@@ -3304,11 +3309,20 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
- "2546 New FCF found/FCF parameter modified event: "
- "evt_tag:x%x, fcf_index:x%x\n",
- acqe_fcoe->event_tag, acqe_fcoe->index);
-
+ if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+ LOG_DISCOVERY,
+ "2546 New FCF found event: "
+ "evt_tag:x%x, fcf_index:x%x\n",
+ acqe_fcoe->event_tag,
+ acqe_fcoe->index);
+ else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
+ LOG_DISCOVERY,
+ "2788 FCF parameter modified event: "
+ "evt_tag:x%x, fcf_index:x%x\n",
+ acqe_fcoe->event_tag,
+ acqe_fcoe->index);
spin_lock_irq(&phba->hbalock);
if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
(phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3517,6 +3531,32 @@ lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async grp5 completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
+ * is an asynchronous notified of a logical link speed change. The Port
+ * reports the logical link speed in units of 10Mbps.
+ **/
+static void
+lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
+ struct lpfc_acqe_grp5 *acqe_grp5)
+{
+ uint16_t prev_ll_spd;
+
+ phba->fc_eventTag = acqe_grp5->event_tag;
+ phba->fcoe_eventtag = acqe_grp5->event_tag;
+ prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
+ phba->sli4_hba.link_state.logical_speed =
+ (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2789 GRP5 Async Event: Updating logical link speed "
+ "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
+ (phba->sli4_hba.link_state.logical_speed*10));
+}
+
+/**
* lpfc_sli4_async_event_proc - Process all the pending asynchronous event
* @phba: pointer to lpfc hba data structure.
*
@@ -3552,6 +3592,10 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
lpfc_sli4_async_dcbx_evt(phba,
&cq_event->cqe.acqe_dcbx);
break;
+ case LPFC_TRAILER_CODE_GRP5:
+ lpfc_sli4_async_grp5_evt(phba,
+ &cq_event->cqe.acqe_grp5);
+ break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"1804 Invalid asynchrous event code: "
@@ -3813,6 +3857,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
+ if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
+ phba->menlo_flag |= HBA_MENLO_SUPPORT;
+ /* check for menlo minimum sg count */
+ if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
+ phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
+ }
+
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4030,6 +4081,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (unlikely(rc))
goto out_free_bsmbx;
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq) {
+ rc = -ENOMEM;
+ goto out_free_bsmbx;
+ }
+
+ /* Get the Supported Pages. It is always available. */
+ lpfc_supported_pages(mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ goto out_free_bsmbx;
+ }
+
+ mqe = &mboxq->u.mqe;
+ memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+ LPFC_MAX_SUPPORTED_PAGES);
+ for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+ switch (pn_page[i]) {
+ case LPFC_SLI4_PARAMETERS:
+ phba->sli4_hba.pc_sli4_params.supported = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Read the port's SLI4 Parameters capabilities if supported. */
+ if (phba->sli4_hba.pc_sli4_params.supported)
+ rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (rc) {
+ rc = -EIO;
+ goto out_free_bsmbx;
+ }
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
if (rc)
@@ -4090,43 +4178,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_fcp_eq_hdl;
}
- mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL);
- if (!mboxq) {
- rc = -ENOMEM;
- goto out_free_fcp_eq_hdl;
- }
-
- /* Get the Supported Pages. It is always available. */
- lpfc_supported_pages(mboxq);
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (unlikely(rc)) {
- rc = -EIO;
- mempool_free(mboxq, phba->mbox_mem_pool);
- goto out_free_fcp_eq_hdl;
- }
-
- mqe = &mboxq->u.mqe;
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
- LPFC_MAX_SUPPORTED_PAGES);
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
- switch (pn_page[i]) {
- case LPFC_SLI4_PARAMETERS:
- phba->sli4_hba.pc_sli4_params.supported = 1;
- break;
- default:
- break;
- }
- }
-
- /* Read the port's SLI4 Parameters capabilities if supported. */
- if (phba->sli4_hba.pc_sli4_params.supported)
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (rc) {
- rc = -EIO;
- goto out_free_fcp_eq_hdl;
- }
return rc;
out_free_fcp_eq_hdl:
@@ -5050,6 +5101,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+ phba->mbox_ext = (phba->slim2p.virt +
+ offsetof(struct lpfc_sli2_slim, mbx_ext_words));
phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
phba->IOCBs = (phba->slim2p.virt +
offsetof(struct lpfc_sli2_slim, IOCBs));
@@ -7753,21 +7806,23 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
* @phba: pointer to lpfc hba data structure.
*
* This routine is called to prepare the SLI3 device for PCI slot recover. It
- * aborts and stops all the on-going I/Os on the pci device.
+ * aborts all the outstanding SCSI I/Os to the pci device.
**/
static void
lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2723 PCI channel I/O abort preparing for recovery\n");
- /* Prepare for bringing HBA offline */
- lpfc_offline_prep(phba);
- /* Clear sli active flag to prevent sysfs access to HBA */
- spin_lock_irq(&phba->hbalock);
- phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
- spin_unlock_irq(&phba->hbalock);
- /* Stop and flush all I/Os and bring HBA offline */
- lpfc_offline(phba);
+
+ /*
+ * There may be errored I/Os through HBA, abort all I/Os on txcmplq
+ * and let the SCSI mid-layer to retry them to recover.
+ */
+ pring = &psli->ring[psli->fcp_ring];
+ lpfc_sli_abort_iocb_ring(phba, pring);
}
/**
@@ -7781,21 +7836,20 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
static void
lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
-
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2710 PCI channel disable preparing for reset\n");
+
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
+ /* stop all timers */
+ lpfc_stop_hba_timers(phba);
+
/* Disable interrupt and pci device */
lpfc_sli_disable_intr(phba);
pci_disable_device(phba->pcidev);
- /*
- * There may be I/Os dropped by the firmware.
- * Error iocb (I/O) on txcmplq and let the SCSI layer
- * retry it after re-establishing link.
- */
- pring = &psli->ring[psli->fcp_ring];
- lpfc_sli_abort_iocb_ring(phba, pring);
+ /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+ lpfc_sli_flush_fcp_rings(phba);
}
/**
@@ -7811,6 +7865,12 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
{
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2711 PCI channel permanent disable for failure\n");
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
+ /* stop all timers */
+ lpfc_stop_hba_timers(phba);
+
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
}
@@ -7839,9 +7899,6 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
-
switch (state) {
case pci_channel_io_normal:
/* Non-fatal error, prepare for recovery */
@@ -7948,7 +8005,7 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- /* Bring the device online */
+ /* Bring device online, it will be no-op for non-fatal error resume */
lpfc_online(phba);
/* Clean up Advanced Error Reporting (AER) if needed */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 72e6adb..e84dc33 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1216,7 +1216,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pcb->feature = FEATURE_INITIAL_SLI2;
/* Setup Mailbox pointers */
- phba->pcb->mailBoxSize = sizeof(MAILBOX_t);
+ phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
@@ -1272,28 +1272,41 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*
*/
- if (phba->sli_rev == 3) {
- phba->host_gp = &mb_slim->us.s3.host[0];
- phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
- } else {
- phba->host_gp = &mb_slim->us.s2.host[0];
+ if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
+ phba->host_gp = &phba->mbox->us.s2.host[0];
phba->hbq_put = NULL;
- }
+ offset = (uint8_t *)&phba->mbox->us.s2.host -
+ (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
+ phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
+ } else {
+ /* Always Host Group Pointer is in SLIM */
+ mb->un.varCfgPort.hps = 1;
- /* mask off BAR0's flag bits 0 - 3 */
- phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
- (void __iomem *)phba->host_gp -
- (void __iomem *)phba->MBslimaddr;
- if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
- phba->pcb->hgpAddrHigh = bar_high;
- else
- phba->pcb->hgpAddrHigh = 0;
- /* write HGP data to SLIM at the required longword offset */
- memset(&hgp, 0, sizeof(struct lpfc_hgp));
+ if (phba->sli_rev == 3) {
+ phba->host_gp = &mb_slim->us.s3.host[0];
+ phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
+ } else {
+ phba->host_gp = &mb_slim->us.s2.host[0];
+ phba->hbq_put = NULL;
+ }
- for (i=0; i < phba->sli.num_rings; i++) {
- lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
+ /* mask off BAR0's flag bits 0 - 3 */
+ phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+ (void __iomem *)phba->host_gp -
+ (void __iomem *)phba->MBslimaddr;
+ if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ phba->pcb->hgpAddrHigh = bar_high;
+ else
+ phba->pcb->hgpAddrHigh = 0;
+ /* write HGP data to SLIM at the required longword offset */
+ memset(&hgp, 0, sizeof(struct lpfc_hgp));
+
+ for (i = 0; i < phba->sli.num_rings; i++) {
+ lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
sizeof(*phba->host_gp));
+ }
}
/* Setup Port Group offset */
@@ -1598,7 +1611,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
for (sgentry = 0; sgentry < sgecount; sgentry++) {
lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
- dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
mbox->sge_array->addr[sgentry], phyaddr);
}
/* Free the sge address array memory */
@@ -1656,7 +1669,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
}
/* Setup for the none-embedded mbox command */
- pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
+ pcount = (PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
@@ -1671,24 +1684,24 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
/* The DMA memory is always allocated in the length of a
* page even though the last SGE might not fill up to a
- * page, this is used as a priori size of PAGE_SIZE for
+ * page, this is used as a priori size of SLI4_PAGE_SIZE for
* the later DMA memory free.
*/
- viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
+ viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
&phyaddr, GFP_KERNEL);
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
- memset(viraddr, 0, PAGE_SIZE);
+ memset(viraddr, 0, SLI4_PAGE_SIZE);
mbox->sge_array->addr[pagen] = viraddr;
/* Keep the first page for later sub-header construction */
if (pagen == 0)
cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
resid_len = length - alloc_len;
- if (resid_len > PAGE_SIZE) {
+ if (resid_len > SLI4_PAGE_SIZE) {
lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
- PAGE_SIZE);
- alloc_len += PAGE_SIZE;
+ SLI4_PAGE_SIZE);
+ alloc_len += SLI4_PAGE_SIZE;
} else {
lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
resid_len);
@@ -1886,6 +1899,8 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
+ reg_vfi->e_d_tov = vport->phba->fc_edtov;
+ reg_vfi->r_a_tov = vport->phba->fc_ratov;
reg_vfi->bde.addrHigh = putPaddrHigh(phys);
reg_vfi->bde.addrLow = putPaddrLow(phys);
reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e331204..b90820a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -493,6 +493,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_vport **vports;
+ int i, active_vlink_present = 0 ;
/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
@@ -505,15 +508,44 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
- if ((ndlp->nlp_DID == Fabric_DID) &&
- vport->port_type == LPFC_NPIV_PORT) {
+ if (ndlp->nlp_DID == Fabric_DID) {
+ if (vport->port_state <= LPFC_FDISC)
+ goto out;
lpfc_linkdown_port(vport);
- mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_DELAY_TMO;
+ vport->fc_flag |= FC_VPORT_LOGO_RCVD;
spin_unlock_irq(shost->host_lock);
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+ i++) {
+ if ((!(vports[i]->fc_flag &
+ FC_VPORT_LOGO_RCVD)) &&
+ (vports[i]->port_state > LPFC_FDISC)) {
+ active_vlink_present = 1;
+ break;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
- ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ if (active_vlink_present) {
+ /*
+ * If there are other active VLinks present,
+ * re-instantiate the Vlink using FDISC.
+ */
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ vport->port_state = LPFC_FDISC;
+ } else {
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_retry_pport_discovery(phba);
+ }
} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
((ndlp->nlp_type & NLP_FCP_TARGET) ||
!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
@@ -526,6 +558,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
}
+out:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -604,11 +637,55 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
+/**
+ * lpfc_release_rpi - Release a RPI by issueing unreg_login mailbox cmd.
+ * @phba : Pointer to lpfc_hba structure.
+ * @vport: Pointer to lpfc_vport structure.
+ * @rpi : rpi to be release.
+ *
+ * This function will send a unreg_login mailbox command to the firmware
+ * to release a rpi.
+ **/
+void
+lpfc_release_rpi(struct lpfc_hba *phba,
+ struct lpfc_vport *vport,
+ uint16_t rpi)
+{
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!pmb)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "2796 mailbox memory allocation failed \n");
+ else {
+ lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+}
static uint32_t
lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb;
+ uint16_t rpi;
+
+ phba = vport->phba;
+ /* Release the RPI if reglogin completing */
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ (evt == NLP_EVT_CMPL_REG_LOGIN) &&
+ (!pmb->u.mb.mbxStatus)) {
+ mb = &pmb->u.mb;
+ rpi = pmb->u.mb.un.varWords[0];
+ lpfc_release_rpi(phba, vport, rpi);
+ }
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0271 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
@@ -944,6 +1021,18 @@ static uint32_t
lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb = &pmb->u.mb;
+ uint16_t rpi;
+
+ phba = vport->phba;
+ /* Release the RPI */
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ !mb->mbxStatus) {
+ rpi = pmb->u.mb.un.varWords[0];
+ lpfc_release_rpi(phba, vport, rpi);
+ }
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index dccdb82..f4a3b2e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1141,37 +1141,47 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
}
/*
- * Given a scsi cmnd, determine the BlockGuard profile to be used
- * with the cmd
+ * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
*/
static int
-lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
+lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ uint8_t *txop, uint8_t *rxop)
{
uint8_t guard_type = scsi_host_get_guard(sc->device->host);
- uint8_t ret_prof = LPFC_PROF_INVALID;
+ uint8_t ret = 0;
if (guard_type == SHOST_DIX_GUARD_IP) {
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
- ret_prof = LPFC_PROF_AST2;
+ *txop = BG_OP_IN_CSUM_OUT_NODIF;
+ *rxop = BG_OP_IN_NODIF_OUT_CSUM;
break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- ret_prof = LPFC_PROF_A1;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_NODIF;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- ret_prof = LPFC_PROF_AST1;
+ *txop = BG_OP_IN_CSUM_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_CSUM;
break;
case SCSI_PROT_NORMAL:
default:
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9063 BLKGRD:Bad op/guard:%d/%d combination\n",
+ "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
scsi_get_prot_op(sc), guard_type);
+ ret = 1;
break;
}
@@ -1179,12 +1189,14 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- ret_prof = LPFC_PROF_A1;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_NODIF;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- ret_prof = LPFC_PROF_C1;
+ *txop = BG_OP_IN_CRC_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_CRC;
break;
case SCSI_PROT_READ_INSERT:
@@ -1194,6 +1206,7 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
"9075 BLKGRD: Bad op/guard:%d/%d combination\n",
scsi_get_prot_op(sc), guard_type);
+ ret = 1;
break;
}
} else {
@@ -1201,7 +1214,7 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
BUG();
}
- return ret_prof;
+ return ret;
}
struct scsi_dif_tuple {
@@ -1266,7 +1279,9 @@ lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
* The buffer list consists of just one protection group described
* below:
* +-------------------------+
- * start of prot group --> | PDE_1 |
+ * start of prot group --> | PDE_5 |
+ * +-------------------------+
+ * | PDE_6 |
* +-------------------------+
* | Data BDE |
* +-------------------------+
@@ -1284,30 +1299,49 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct ulp_bde64 *bpl, int datasegcnt)
{
struct scatterlist *sgde = NULL; /* s/g data entry */
- struct lpfc_pde *pde1 = NULL;
+ struct lpfc_pde5 *pde5 = NULL;
+ struct lpfc_pde6 *pde6 = NULL;
dma_addr_t physaddr;
- int i = 0, num_bde = 0;
+ int i = 0, num_bde = 0, status;
int datadir = sc->sc_data_direction;
- int prof = LPFC_PROF_INVALID;
unsigned blksize;
uint32_t reftag;
uint16_t apptagmask, apptagval;
+ uint8_t txop, rxop;
- pde1 = (struct lpfc_pde *) bpl;
- prof = lpfc_sc_to_sli_prof(phba, sc);
-
- if (prof == LPFC_PROF_INVALID)
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
goto out;
- /* extract some info from the scsi command for PDE1*/
+ /* extract some info from the scsi command for pde*/
blksize = lpfc_cmd_blksize(sc);
lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
- /* setup PDE1 with what we have */
- lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
- BG_EC_STOP_ERR);
- lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+ /* setup PDE5 with what we have */
+ pde5 = (struct lpfc_pde5 *) bpl;
+ memset(pde5, 0, sizeof(struct lpfc_pde5));
+ bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+ pde5->reftag = reftag;
+ /* advance bpl and increment bde count */
+ num_bde++;
+ bpl++;
+ pde6 = (struct lpfc_pde6 *) bpl;
+
+ /* setup PDE6 with the rest of the info */
+ memset(pde6, 0, sizeof(struct lpfc_pde6));
+ bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+ bf_set(pde6_optx, pde6, txop);
+ bf_set(pde6_oprx, pde6, rxop);
+ if (datadir == DMA_FROM_DEVICE) {
+ bf_set(pde6_ce, pde6, 1);
+ bf_set(pde6_re, pde6, 1);
+ bf_set(pde6_ae, pde6, 1);
+ }
+ bf_set(pde6_ai, pde6, 1);
+ bf_set(pde6_apptagval, pde6, apptagval);
+
+ /* advance bpl and increment bde count */
num_bde++;
bpl++;
@@ -1342,15 +1376,17 @@ out:
* The buffer list for this type consists of one or more of the
* protection groups described below:
* +-------------------------+
- * start of first prot group --> | PDE_1 |
+ * start of first prot group --> | PDE_5 |
+ * +-------------------------+
+ * | PDE_6 |
* +-------------------------+
- * | PDE_3 (Prot BDE) |
+ * | PDE_7 (Prot BDE) |
* +-------------------------+
* | Data BDE |
* +-------------------------+
* |more Data BDE's ... (opt)|
* +-------------------------+
- * start of new prot group --> | PDE_1 |
+ * start of new prot group --> | PDE_5 |
* +-------------------------+
* | ... |
* +-------------------------+
@@ -1369,19 +1405,21 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
{
struct scatterlist *sgde = NULL; /* s/g data entry */
struct scatterlist *sgpe = NULL; /* s/g prot entry */
- struct lpfc_pde *pde1 = NULL;
+ struct lpfc_pde5 *pde5 = NULL;
+ struct lpfc_pde6 *pde6 = NULL;
struct ulp_bde64 *prot_bde = NULL;
dma_addr_t dataphysaddr, protphysaddr;
unsigned short curr_data = 0, curr_prot = 0;
unsigned int split_offset, protgroup_len;
unsigned int protgrp_blks, protgrp_bytes;
unsigned int remainder, subtotal;
- int prof = LPFC_PROF_INVALID;
+ int status;
int datadir = sc->sc_data_direction;
unsigned char pgdone = 0, alldone = 0;
unsigned blksize;
uint32_t reftag;
uint16_t apptagmask, apptagval;
+ uint8_t txop, rxop;
int num_bde = 0;
sgpe = scsi_prot_sglist(sc);
@@ -1394,31 +1432,47 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return 0;
}
- prof = lpfc_sc_to_sli_prof(phba, sc);
- if (prof == LPFC_PROF_INVALID)
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
goto out;
- /* extract some info from the scsi command for PDE1*/
+ /* extract some info from the scsi command */
blksize = lpfc_cmd_blksize(sc);
lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
split_offset = 0;
do {
- /* setup the first PDE_1 */
- pde1 = (struct lpfc_pde *) bpl;
-
- lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
- BG_EC_STOP_ERR);
- lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+ /* setup PDE5 with what we have */
+ pde5 = (struct lpfc_pde5 *) bpl;
+ memset(pde5, 0, sizeof(struct lpfc_pde5));
+ bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+ pde5->reftag = reftag;
+ /* advance bpl and increment bde count */
+ num_bde++;
+ bpl++;
+ pde6 = (struct lpfc_pde6 *) bpl;
+
+ /* setup PDE6 with the rest of the info */
+ memset(pde6, 0, sizeof(struct lpfc_pde6));
+ bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+ bf_set(pde6_optx, pde6, txop);
+ bf_set(pde6_oprx, pde6, rxop);
+ bf_set(pde6_ce, pde6, 1);
+ bf_set(pde6_re, pde6, 1);
+ bf_set(pde6_ae, pde6, 1);
+ bf_set(pde6_ai, pde6, 1);
+ bf_set(pde6_apptagval, pde6, apptagval);
+
+ /* advance bpl and increment bde count */
num_bde++;
bpl++;
/* setup the first BDE that points to protection buffer */
prot_bde = (struct ulp_bde64 *) bpl;
protphysaddr = sg_dma_address(sgpe);
- prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
- prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+ prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
+ prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
protgroup_len = sg_dma_len(sgpe);
@@ -1429,10 +1483,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
protgrp_bytes = protgrp_blks * blksize;
prot_bde->tus.f.bdeSize = protgroup_len;
- if (datadir == DMA_TO_DEVICE)
- prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- else
- prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
curr_prot++;
@@ -1484,6 +1535,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* Move to the next s/g segment if possible */
sgde = sg_next(sgde);
+
}
/* are we done ? */
@@ -1506,7 +1558,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
out:
-
return num_bde;
}
/*
@@ -1828,8 +1879,8 @@ out:
* field of @lpfc_cmd for device with SLI-4 interface spec.
*
* Return codes:
- * 1 - Error
- * 0 - Success
+ * 1 - Error
+ * 0 - Success
**/
static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
@@ -1937,8 +1988,8 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* lpfc_hba struct.
*
* Return codes:
- * 1 - Error
- * 0 - Success
+ * 1 - Error
+ * 0 - Success
**/
static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 049fb9a..7a61455 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -212,7 +212,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
/* If the next EQE is not valid then we are done */
- if (!bf_get(lpfc_eqe_valid, eqe))
+ if (!bf_get_le32(lpfc_eqe_valid, eqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -247,7 +247,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_eqe = q->qe[q->host_index].eqe;
- bf_set(lpfc_eqe_valid, temp_eqe, 0);
+ bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
released++;
q->host_index = ((q->host_index + 1) % q->entry_count);
}
@@ -285,7 +285,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
struct lpfc_cqe *cqe;
/* If the next CQE is not valid then we are done */
- if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
+ if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -321,7 +321,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_qe = q->qe[q->host_index].cqe;
- bf_set(lpfc_cqe_valid, temp_qe, 0);
+ bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
released++;
q->host_index = ((q->host_index + 1) % q->entry_count);
}
@@ -1659,6 +1659,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_INIT_VPI:
case MBX_INIT_VFI:
case MBX_RESUME_RPI:
+ case MBX_READ_EVENT_LOG_STATUS:
+ case MBX_READ_EVENT_LOG:
ret = mbxCommand;
break;
default:
@@ -4296,7 +4298,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
- vpd_size = PAGE_SIZE;
+ vpd_size = SLI4_PAGE_SIZE;
vpd = kzalloc(vpd_size, GFP_KERNEL);
if (!vpd) {
rc = -ENOMEM;
@@ -4891,9 +4893,34 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
mb->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
- /* First copy command data to host SLIM area */
+ /* Populate mbox extension offset word. */
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ = (uint8_t *)phba->mbox_ext
+ - (uint8_t *)phba->mbox;
+ }
+
+ /* Copy the mailbox extension data */
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
+ lpfc_sli_pcimem_bcopy(pmbox->context2,
+ (uint8_t *)phba->mbox_ext,
+ pmbox->in_ext_byte_len);
+ }
+ /* Copy command data to host SLIM area */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
} else {
+ /* Populate mbox extension offset word. */
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ = MAILBOX_HBA_EXT_OFFSET;
+
+ /* Copy the mailbox extension data */
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
+ lpfc_memcpy_to_slim(phba->MBslimaddr +
+ MAILBOX_HBA_EXT_OFFSET,
+ pmbox->context2, pmbox->in_ext_byte_len);
+
+ }
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
@@ -5003,15 +5030,22 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+ /* Copy the mailbox extension data */
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
+ lpfc_sli_pcimem_bcopy(phba->mbox_ext,
+ pmbox->context2,
+ pmbox->out_ext_byte_len);
+ }
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
- if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
- pmbox->context2) {
- lpfc_memcpy_from_slim((void *)pmbox->context2,
- phba->MBslimaddr + DMP_RSP_OFFSET,
- mb->un.varDmp.word_cnt);
+ /* Copy the mailbox extension data */
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
+ lpfc_memcpy_from_slim(pmbox->context2,
+ phba->MBslimaddr +
+ MAILBOX_HBA_EXT_OFFSET,
+ pmbox->out_ext_byte_len);
}
}
@@ -7104,13 +7138,11 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
list_del_init(&abort_iocb->list);
pring->txcmplq_cnt--;
- spin_unlock_irq(&phba->hbalock);
/* Firmware could still be in progress of DMAing
* payload, so don't free data buffer till after
* a hbeat.
*/
- spin_lock_irq(&phba->hbalock);
abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
spin_unlock_irq(&phba->hbalock);
@@ -7118,7 +7150,8 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
- }
+ } else
+ spin_unlock_irq(&phba->hbalock);
}
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -8133,6 +8166,12 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
if (pmb->mbox_cmpl) {
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
+ if (pmb->out_ext_byte_len &&
+ pmb->context2)
+ lpfc_sli_pcimem_bcopy(
+ phba->mbox_ext,
+ pmb->context2,
+ pmb->out_ext_byte_len);
}
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
@@ -8983,17 +9022,17 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
int ecount = 0;
uint16_t cqid;
- if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
+ if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0359 Not a valid slow-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
- bf_get(lpfc_eqe_major_code, eqe),
- bf_get(lpfc_eqe_minor_code, eqe));
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
return;
}
/* Get the reference to the corresponding CQ */
- cqid = bf_get(lpfc_eqe_resource_id, eqe);
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
/* Search for completion queue pointer matching this cqid */
speq = phba->sli4_hba.sp_eq;
@@ -9221,12 +9260,12 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint16_t cqid;
int ecount = 0;
- if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0366 Not a valid fast-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
- bf_get(lpfc_eqe_major_code, eqe),
- bf_get(lpfc_eqe_minor_code, eqe));
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
return;
}
@@ -9239,7 +9278,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
}
/* Get the reference to the corresponding CQ */
- cqid = bf_get(lpfc_eqe_resource_id, eqe);
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
if (unlikely(cqid != cq->queue_id)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0368 Miss-matched fast-path completion "
@@ -9506,7 +9545,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
- dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
@@ -9532,13 +9571,17 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
struct lpfc_dmabuf *dmabuf;
int x, total_qe_count;
void *dma_pointer;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
if (!queue)
return NULL;
- queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
+ queue->page_count = (ALIGN(entry_size * entry_count,
+ hw_page_size))/hw_page_size;
INIT_LIST_HEAD(&queue->list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
@@ -9547,19 +9590,19 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
if (!dmabuf)
goto out_fail;
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
- PAGE_SIZE, &dmabuf->phys,
+ hw_page_size, &dmabuf->phys,
GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
goto out_fail;
}
- memset(dmabuf->virt, 0, PAGE_SIZE);
+ memset(dmabuf->virt, 0, hw_page_size);
dmabuf->buffer_tag = x;
list_add_tail(&dmabuf->list, &queue->page_list);
/* initialize queue's entry array */
dma_pointer = dmabuf->virt;
for (; total_qe_count < entry_count &&
- dma_pointer < (PAGE_SIZE + dmabuf->virt);
+ dma_pointer < (hw_page_size + dmabuf->virt);
total_qe_count++, dma_pointer += entry_size) {
queue->qe[total_qe_count].address = dma_pointer;
}
@@ -9604,6 +9647,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint16_t dmult;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -9653,6 +9700,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
break;
}
list_for_each_entry(dmabuf, &eq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -9715,6 +9763,11 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -9752,6 +9805,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -9791,9 +9845,70 @@ out:
}
/**
+ * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
+ * @cq: The completion queue to associate with this cq.
+ *
+ * This function provides failback (fb) functionality when the
+ * mq_create_ext fails on older FW generations. It's purpose is identical
+ * to mq_create_ext otherwise.
+ *
+ * This routine cannot fail as all attributes were previously accessed and
+ * initialized in mq_create_ext.
+ **/
+static void
+lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
+ LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
+{
+ struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_dmabuf *dmabuf;
+ int length;
+
+ length = (sizeof(struct lpfc_mbx_mq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+ mq->page_count);
+ bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+ switch (mq->entry_count) {
+ case 16:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_16);
+ break;
+ case 32:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_32);
+ break;
+ case 64:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_64);
+ break;
+ case 128:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_128);
+ break;
+ }
+ list_for_each_entry(dmabuf, &mq->page_list, list) {
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+}
+
+/**
* lpfc_mq_create - Create a mailbox Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @mq: The queue structure to use to create the mailbox queue.
+ * @cq: The completion queue to associate with this cq.
+ * @subtype: The queue's subtype.
*
* This function creates a mailbox queue, as detailed in @mq, on a port,
* described by @phba by sending a MQ_CREATE mailbox command to the HBA.
@@ -9809,31 +9924,43 @@ out:
* memory this function will return ENOMEM. If the queue create mailbox command
* fails this function will return ENXIO.
**/
-uint32_t
+int32_t
lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
struct lpfc_queue *cq, uint32_t subtype)
{
struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_mbx_mq_create_ext *mq_create_ext;
struct lpfc_dmabuf *dmabuf;
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
- length = (sizeof(struct lpfc_mbx_mq_create) -
+ length = (sizeof(struct lpfc_mbx_mq_create_ext) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_MQ_CREATE,
+ LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
length, LPFC_SLI4_MBX_EMBED);
- mq_create = &mbox->u.mqe.un.mq_create;
- bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+
+ mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+ bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
mq->page_count);
- bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
- cq->queue_id);
- bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
+ 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
switch (mq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -9843,31 +9970,47 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 16:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_16);
break;
case 32:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_32);
break;
case 64:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_64);
break;
case 128:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_128);
break;
}
list_for_each_entry(dmabuf, &mq->page_list, list) {
- mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ memset(dmabuf->virt, 0, hw_page_size);
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
- mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+ &mq_create_ext->u.response);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2795 MQ_CREATE_EXT failed with "
+ "status x%x. Failback to MQ_CREATE.\n",
+ rc);
+ lpfc_mq_create_fb_init(phba, mq, mbox, cq);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+ &mq_create->u.response);
+ }
+
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
@@ -9878,7 +10021,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
status = -ENXIO;
goto out;
}
- mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
if (mq->queue_id == 0xFFFF) {
status = -ENXIO;
goto out;
@@ -9927,6 +10069,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -9942,6 +10088,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
list_for_each_entry(dmabuf, &wq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -10010,6 +10157,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
if (hrq->entry_count != drq->entry_count)
return -EINVAL;
@@ -10054,6 +10205,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
LPFC_HDR_BUF_SIZE);
list_for_each_entry(dmabuf, &hrq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -10626,7 +10778,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > PAGE_SIZE) {
+ if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"2559 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
@@ -10732,7 +10884,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
/* Calculate the requested length of the dma memory */
reqlen = cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > PAGE_SIZE) {
+ if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0217 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
@@ -11568,8 +11720,8 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
*
* This routine is invoked to post rpi header templates to the
* HBA consistent with the SLI-4 interface spec. This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
*
* This routine does not require any locks. It's usage is expected
* to be driver load or reset recovery when the driver is
@@ -11672,8 +11824,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
*
* This routine is invoked to post rpi header templates to the
* HBA consistent with the SLI-4 interface spec. This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
*
* Returns
* A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
@@ -12040,9 +12192,11 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
/* Reset FCF round robin index bmask for new scan */
- if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
+ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) {
memset(phba->fcf.fcf_rr_bmask, 0,
sizeof(*phba->fcf.fcf_rr_bmask));
+ phba->fcf.eligible_fcf_cnt = 0;
+ }
error = 0;
}
fail_fcf_scan:
@@ -12507,6 +12661,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
@@ -12523,6 +12678,11 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
+ ndlp = (struct lpfc_nodelist *) mb->context2;
+ if (ndlp) {
+ lpfc_nlp_put(ndlp);
+ mb->context2 = NULL;
+ }
}
list_del(&mb->list);
mempool_free(mb, phba->mbox_mem_pool);
@@ -12532,6 +12692,15 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
(mb->u.mb.mbxCommand == MBX_REG_VPI))
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ ndlp = (struct lpfc_nodelist *) mb->context2;
+ if (ndlp) {
+ lpfc_nlp_put(ndlp);
+ mb->context2 = NULL;
+ }
+ /* Unregister the RPI when mailbox complete */
+ mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ }
}
spin_unlock_irq(&phba->hbalock);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index b4a639c..e379215 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -36,6 +36,7 @@ struct lpfc_cq_event {
struct lpfc_acqe_link acqe_link;
struct lpfc_acqe_fcoe acqe_fcoe;
struct lpfc_acqe_dcbx acqe_dcbx;
+ struct lpfc_acqe_grp5 acqe_grp5;
struct lpfc_rcqe rcqe_cmpl;
struct sli4_wcqe_xri_aborted wcqe_axri;
struct lpfc_wcqe_complete wcqe_cmpl;
@@ -110,6 +111,9 @@ typedef struct lpfcMboxq {
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
uint8_t mbox_flag;
+ uint16_t in_ext_byte_len;
+ uint16_t out_ext_byte_len;
+ uint8_t mbox_offset_word;
struct lpfc_mcqe mcqe;
struct lpfc_mbx_nembed_sge_virt *sge_array;
} LPFC_MBOXQ_t;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 4a35e7b..58bb4c8 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -162,6 +162,7 @@ struct lpfc_fcf {
#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
uint32_t addr_mode;
uint16_t fcf_rr_init_indx;
+ uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec;
struct timer_list redisc_wait;
@@ -492,8 +493,8 @@ void lpfc_sli4_queue_free(struct lpfc_queue *);
uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
-uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
- struct lpfc_queue *, uint32_t);
+int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, uint32_t);
uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 013deec..5294c3a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.10"
+#define LPFC_DRIVER_VERSION "8.3.12"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index ffd575c..ab91359 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -763,7 +763,9 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
- lpfc_printf_vlog(port_iterator, KERN_WARNING, LOG_VPORT,
+ if (!(port_iterator->load_flag & FC_UNLOADING))
+ lpfc_printf_vlog(port_iterator, KERN_ERR,
+ LOG_VPORT,
"1801 Create vport work array FAILED: "
"cannot do scsi_host_get\n");
continue;
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index ba8e128..bbb7e4b 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -2,7 +2,7 @@
# Kernel configuration file for the MPT2SAS
#
# This code is based on drivers/scsi/mpt2sas/Kconfig
-# Copyright (C) 2007-2009 LSI Corporation
+# Copyright (C) 2007-2010 LSI Corporation
# (mailto:DL-MPTFusionLinux@lsi.com)
# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 9958d84..dada0a1 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index cf0ac9f..d4e9d6f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_cnfg.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
index c4adf76..bd6c92b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -2,7 +2,7 @@
Fusion-MPT MPI 2.0 Header File Change History
==============================
- Copyright (c) 2000-2009 LSI Corporation.
+ Copyright (c) 2000-2010 LSI Corporation.
---------------------------------------
Header Set Release Version: 02.00.14
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 6541945..220bf65 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_init.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 7549384..f18f114 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_ioc.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 73fcdbf..686b09b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_tool.h
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 88e6eeb..b830d61 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -58,6 +58,7 @@
#include <linux/sort.h>
#include <linux/io.h>
#include <linux/time.h>
+#include <linux/aer.h>
#include "mpt2sas_base.h"
@@ -285,6 +286,9 @@ _base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
return;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return;
+
switch (ioc_status) {
/****************************************************************************
@@ -517,8 +521,18 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
desc = "IR Operation Status";
break;
case MPI2_EVENT_SAS_DISCOVERY:
- desc = "Discovery";
- break;
+ {
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
+ printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ printk("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ printk("\n");
+ return;
+ }
case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
desc = "SAS Broadcast Primitive";
break;
@@ -1243,6 +1257,9 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
goto out_fail;
}
+ /* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
pci_set_master(pdev);
if (_base_config_dma_addressing(ioc, pdev) != 0) {
@@ -1253,7 +1270,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
}
for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
- if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
if (pio_sz)
continue;
pio_chip = (u64)pci_resource_start(pdev, i);
@@ -1261,15 +1278,18 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
} else {
if (memap_sz)
continue;
- ioc->chip_phys = pci_resource_start(pdev, i);
- chip_phys = (u64)ioc->chip_phys;
- memap_sz = pci_resource_len(pdev, i);
- ioc->chip = ioremap(ioc->chip_phys, memap_sz);
- if (ioc->chip == NULL) {
- printk(MPT2SAS_ERR_FMT "unable to map adapter "
- "memory!\n", ioc->name);
- r = -EINVAL;
- goto out_fail;
+ /* verify memory resource is valid before using */
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ ioc->chip_phys = pci_resource_start(pdev, i);
+ chip_phys = (u64)ioc->chip_phys;
+ memap_sz = pci_resource_len(pdev, i);
+ ioc->chip = ioremap(ioc->chip_phys, memap_sz);
+ if (ioc->chip == NULL) {
+ printk(MPT2SAS_ERR_FMT "unable to map "
+ "adapter memory!\n", ioc->name);
+ r = -EINVAL;
+ goto out_fail;
+ }
}
}
}
@@ -1295,6 +1315,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
ioc->chip_phys = 0;
ioc->pci_irq = -1;
pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
return r;
}
@@ -1898,7 +1919,10 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
ioc->config_page, ioc->config_page_dma);
}
- kfree(ioc->scsi_lookup);
+ if (ioc->scsi_lookup) {
+ free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
+ ioc->scsi_lookup = NULL;
+ }
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
}
@@ -2110,11 +2134,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- ioc->scsi_lookup = kcalloc(ioc->scsiio_depth,
- sizeof(struct request_tracker), GFP_KERNEL);
+ sz = ioc->scsiio_depth * sizeof(struct request_tracker);
+ ioc->scsi_lookup_pages = get_order(sz);
+ ioc->scsi_lookup = (struct request_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->scsi_lookup_pages);
if (!ioc->scsi_lookup) {
- printk(MPT2SAS_ERR_FMT "scsi_lookup: kcalloc failed\n",
- ioc->name);
+ printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
+ "sz(%d)\n", ioc->name, (int)sz);
goto out;
}
@@ -3006,8 +3032,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
* since epoch ~ midnight January 1, 1970.
*/
do_gettimeofday(&current_time);
- mpi_request.TimeStamp = (current_time.tv_sec * 1000) +
- (current_time.tv_usec >> 3);
+ mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
+ (current_time.tv_usec / 1000));
if (ioc->logging_level & MPT_DEBUG_INIT) {
u32 *mfp;
@@ -3179,7 +3205,7 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
mpi_request->VP_ID = 0;
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
mpi_request->EventMasks[i] =
- le32_to_cpu(ioc->event_masks[i]);
+ cpu_to_le32(ioc->event_masks[i]);
mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
@@ -3516,7 +3542,9 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
__func__));
_base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
if (ioc->pci_irq) {
synchronize_irq(pdev->irq);
free_irq(ioc->pci_irq, ioc);
@@ -3527,6 +3555,7 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
ioc->pci_irq = -1;
ioc->chip_phys = 0;
pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
return;
}
@@ -3560,8 +3589,10 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
- if (!ioc->pfacts)
+ if (!ioc->pfacts) {
+ r = -ENOMEM;
goto out_free_resources;
+ }
for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
r = _base_get_port_facts(ioc, i, CAN_SLEEP);
@@ -3607,6 +3638,15 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
mutex_init(&ioc->ctl_cmds.mutex);
+ if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
+ !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
+ !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ init_completion(&ioc->shost_recovery_done);
+
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
ioc->event_masks[i] = -1;
@@ -3639,6 +3679,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
pci_set_drvdata(ioc->pdev, NULL);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
kfree(ioc->config_cmds.reply);
kfree(ioc->base_cmds.reply);
kfree(ioc->ctl_cmds.reply);
@@ -3646,6 +3687,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->ctl_cmds.reply = NULL;
ioc->base_cmds.reply = NULL;
ioc->tm_cmds.reply = NULL;
+ ioc->scsih_cmds.reply = NULL;
ioc->transport_cmds.reply = NULL;
ioc->config_cmds.reply = NULL;
ioc->pfacts = NULL;
@@ -3675,6 +3717,7 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
kfree(ioc->base_cmds.reply);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
kfree(ioc->config_cmds.reply);
}
@@ -3811,9 +3854,8 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 0;
+ complete(&ioc->shost_recovery_done);
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- if (!r)
- _base_reset_handler(ioc, MPT2_IOC_RUNNING);
return r;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index e18b054..b4afe43 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.h
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -69,11 +69,11 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "04.100.01.00"
-#define MPT2SAS_MAJOR_VERSION 04
+#define MPT2SAS_DRIVER_VERSION "05.100.00.02"
+#define MPT2SAS_MAJOR_VERSION 05
#define MPT2SAS_MINOR_VERSION 100
-#define MPT2SAS_BUILD_VERSION 01
-#define MPT2SAS_RELEASE_VERSION 00
+#define MPT2SAS_BUILD_VERSION 00
+#define MPT2SAS_RELEASE_VERSION 02
/*
* Set MPT2SAS_SG_DEPTH value based on user input.
@@ -119,7 +119,6 @@
#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */
#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */
#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */
-#define MPT2_IOC_RUNNING 4 /* shost running */
/*
* logging format
@@ -260,16 +259,6 @@ struct _internal_cmd {
u16 smid;
};
-/*
- * SAS Topology Structures
- */
-
-#define MPTSAS_STATE_TR_SEND 0x0001
-#define MPTSAS_STATE_TR_COMPLETE 0x0002
-#define MPTSAS_STATE_CNTRL_SEND 0x0004
-#define MPTSAS_STATE_CNTRL_COMPLETE 0x0008
-
-#define MPT2SAS_REQ_SAS_CNTRL 0x0010
/**
* struct _sas_device - attached device information
@@ -307,7 +296,6 @@ struct _sas_device {
u16 slot;
u8 hidden_raid_component;
u8 responding;
- u16 state;
};
/**
@@ -378,6 +366,7 @@ struct _sas_port {
* @phy_id: unique phy id
* @handle: device handle for this phy
* @attached_handle: device handle for attached device
+ * @phy_belongs_to_port: port has been created for this phy
*/
struct _sas_phy {
struct list_head port_siblings;
@@ -387,6 +376,7 @@ struct _sas_phy {
u8 phy_id;
u16 handle;
u16 attached_handle;
+ u8 phy_belongs_to_port;
};
/**
@@ -603,7 +593,6 @@ struct MPT2SAS_ADAPTER {
/* fw event handler */
char firmware_event_name[20];
struct workqueue_struct *firmware_event_thread;
- u8 fw_events_off;
spinlock_t fw_event_lock;
struct list_head fw_event_list;
@@ -611,6 +600,7 @@ struct MPT2SAS_ADAPTER {
int aen_event_read_flag;
u8 broadcast_aen_busy;
u8 shost_recovery;
+ struct completion shost_recovery_done;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
u8 ignore_loginfos;
@@ -688,7 +678,8 @@ struct MPT2SAS_ADAPTER {
dma_addr_t request_dma;
u32 request_dma_sz;
struct request_tracker *scsi_lookup;
- spinlock_t scsi_lookup_lock;
+ ulong scsi_lookup_pages;
+ spinlock_t scsi_lookup_lock;
struct list_head free_list;
int pending_io_count;
wait_queue_head_t reset_wq;
@@ -700,7 +691,7 @@ struct MPT2SAS_ADAPTER {
u16 max_sges_in_chain_message;
u16 chains_needed_per_io;
u16 chain_offset_value_for_main_message;
- u16 chain_depth;
+ u32 chain_depth;
/* hi-priority queue */
u16 hi_priority_smid;
@@ -814,8 +805,9 @@ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
/* scsih shared API */
u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
-void mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
- u8 type, u16 smid_task, ulong timeout);
+int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task,
+ ulong timeout, struct scsi_cmnd *scmd);
void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index cf44b35..e762dd3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
* This module provides common API for accessing firmware configuration pages
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -1390,12 +1390,12 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
goto out;
for (i = 0; i < config_page->NumElements; i++) {
- if ((config_page->ConfigElement[i].ElementFlags &
+ if ((le16_to_cpu(config_page->ConfigElement[i].ElementFlags) &
MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) !=
MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT)
continue;
- if (config_page->ConfigElement[i].PhysDiskDevHandle ==
- pd_handle) {
+ if (le16_to_cpu(config_page->ConfigElement[i].
+ PhysDiskDevHandle) == pd_handle) {
*volume_handle = le16_to_cpu(config_page->
ConfigElement[i].VolDevHandle);
r = 0;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index fa9bf83..d88e975 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -533,7 +533,7 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
if (!found) {
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
- desc, tm_request->DevHandle, lun));
+ desc, le16_to_cpu(tm_request->DevHandle), lun));
tm_reply = ioc->ctl_cmds.reply;
tm_reply->DevHandle = tm_request->DevHandle;
tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -551,7 +551,8 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
- desc, tm_request->DevHandle, lun, tm_request->TaskMID));
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
return 0;
}
@@ -647,9 +648,9 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
- if (!mpi_request->FunctionDependent1 ||
- mpi_request->FunctionDependent1 >
- cpu_to_le16(ioc->facts.MaxDevHandle)) {
+ if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
+ le16_to_cpu(mpi_request->FunctionDependent1) >
+ ioc->facts.MaxDevHandle) {
ret = -EINVAL;
mpt2sas_base_free_smid(ioc, smid);
goto out;
@@ -743,8 +744,11 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
mpt2sas_base_get_sense_buffer_dma(ioc, smid);
priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid);
memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE);
- mpt2sas_base_put_smid_scsi_io(ioc, smid,
- le16_to_cpu(mpi_request->FunctionDependent1));
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
+ mpt2sas_base_put_smid_scsi_io(ioc, smid,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ else
+ mpt2sas_base_put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -752,6 +756,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
Mpi2SCSITaskManagementRequest_t *tm_request =
(Mpi2SCSITaskManagementRequest_t *)mpi_request;
+ dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: "
+ "handle(0x%04x), task_type(0x%02x)\n", ioc->name,
+ le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+
if (tm_request->TaskType ==
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
tm_request->TaskType ==
@@ -762,7 +770,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
}
}
- mutex_lock(&ioc->tm_cmds.mutex);
mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu(
tm_request->DevHandle));
mpt2sas_base_put_smid_hi_priority(ioc, smid);
@@ -818,7 +825,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
Mpi2SCSITaskManagementRequest_t *tm_request =
(Mpi2SCSITaskManagementRequest_t *)mpi_request;
- mutex_unlock(&ioc->tm_cmds.mutex);
mpt2sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
tm_request->DevHandle));
} else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
@@ -897,14 +903,13 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
printk(MPT2SAS_INFO_FMT "issue target reset: handle "
"= (0x%04x)\n", ioc->name,
- mpi_request->FunctionDependent1);
+ le16_to_cpu(mpi_request->FunctionDependent1));
mpt2sas_halt_firmware(ioc);
- mutex_lock(&ioc->tm_cmds.mutex);
mpt2sas_scsih_issue_tm(ioc,
- mpi_request->FunctionDependent1, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10);
+ le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
+ NULL);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
} else
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
@@ -1373,7 +1378,8 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(0x%p), "
"dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data,
- (unsigned long long)request_data_dma, mpi_request->BufferLength));
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
mpi_request->ProductSpecific[i] =
@@ -2334,8 +2340,8 @@ _ctl_version_nvdata_persistent_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%02xh\n",
- le16_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
}
static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
_ctl_version_nvdata_persistent_show, NULL);
@@ -2354,8 +2360,8 @@ _ctl_version_nvdata_default_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%02xh\n",
- le16_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
}
static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
_ctl_version_nvdata_default_show, NULL);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 8a5eeb1..69916e4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 5308a25..3dcddfe 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
* Logging Support for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index be171ed..c5ff26a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
* Scsi Host Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -52,6 +52,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/aer.h>
#include <linux/raid_class.h>
#include <linux/slab.h>
@@ -109,14 +110,16 @@ struct sense_info {
};
+#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
+
/**
* struct fw_event_work - firmware event struct
* @list: link list framework
* @work: work object (ioc->fault_reset_work_q)
+ * @cancel_pending_work: flag set during reset handling
* @ioc: per adapter object
* @VF_ID: virtual function id
* @VP_ID: virtual port id
- * @host_reset_handling: handling events during host reset
* @ignore: flag meaning this event has been marked to ignore
* @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
* @event_data: reply event data payload follows
@@ -125,11 +128,11 @@ struct sense_info {
*/
struct fw_event_work {
struct list_head list;
- struct work_struct work;
+ u8 cancel_pending_work;
+ struct delayed_work delayed_work;
struct MPT2SAS_ADAPTER *ioc;
u8 VF_ID;
u8 VP_ID;
- u8 host_reset_handling;
u8 ignore;
u16 event;
void *event_data;
@@ -482,27 +485,17 @@ struct _sas_device *
mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address)
{
- struct _sas_device *sas_device, *r;
+ struct _sas_device *sas_device;
- r = NULL;
- /* check the sas_device_init_list */
- list_for_each_entry(sas_device, &ioc->sas_device_init_list,
- list) {
- if (sas_device->sas_address != sas_address)
- continue;
- r = sas_device;
- goto out;
- }
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
- /* then check the sas_device_list */
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (sas_device->sas_address != sas_address)
- continue;
- r = sas_device;
- goto out;
- }
- out:
- return r;
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ return NULL;
}
/**
@@ -517,28 +510,17 @@ mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
static struct _sas_device *
_scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
- struct _sas_device *sas_device, *r;
+ struct _sas_device *sas_device;
- r = NULL;
- if (ioc->wait_for_port_enable_to_complete) {
- list_for_each_entry(sas_device, &ioc->sas_device_init_list,
- list) {
- if (sas_device->handle != handle)
- continue;
- r = sas_device;
- goto out;
- }
- } else {
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (sas_device->handle != handle)
- continue;
- r = sas_device;
- goto out;
- }
- }
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
- out:
- return r;
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ return NULL;
}
/**
@@ -555,10 +537,15 @@ _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
{
unsigned long flags;
+ if (!sas_device)
+ return;
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_del(&sas_device->list);
- memset(sas_device, 0, sizeof(struct _sas_device));
- kfree(sas_device);
+ if (mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device->sas_address)) {
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -988,7 +975,7 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
u32 chain_offset;
u32 chain_length;
u32 chain_flags;
- u32 sges_left;
+ int sges_left;
u32 sges_in_segment;
u32 sgl_flags;
u32 sgl_flags_last_element;
@@ -1009,7 +996,7 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (!sges_left) {
+ if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device, "pci_map_sg"
" failed: request for %d bytes!\n", scsi_bufflen(scmd));
return -ENOMEM;
@@ -1395,7 +1382,7 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
}
flags = le16_to_cpu(sas_device_pg0.Flags);
- device_info = le16_to_cpu(sas_device_pg0.DeviceInfo);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
sdev_printk(KERN_INFO, sdev,
"atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
@@ -1963,65 +1950,78 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
}
}
+
/**
* mpt2sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
* @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
* @timeout: timeout in seconds
- * Context: The calling function needs to acquire the tm_cmds.mutex
+ * Context: user
*
* A generic API for sending task management requests to firmware.
*
- * The ioc->tm_cmds.status flag should be MPT2_CMD_NOT_USED before calling
- * this API.
- *
* The callback index is set inside `ioc->tm_cb_idx`.
*
- * Return nothing.
+ * Return SUCCESS or FAILED.
*/
-void
-mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
- u8 type, u16 smid_task, ulong timeout)
+int
+mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
+ struct scsi_cmnd *scmd)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
u16 smid = 0;
u32 ioc_state;
unsigned long timeleft;
+ struct scsi_cmnd *scmd_lookup;
+ int rc;
+ mutex_lock(&ioc->tm_cmds.mutex);
if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
__func__, ioc->name);
- return;
+ rc = FAILED;
+ goto err_out;
}
- if (ioc->shost_recovery) {
+ if (ioc->shost_recovery || ioc->remove_host) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
- return;
+ rc = FAILED;
+ goto err_out;
}
ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
if (ioc_state & MPI2_DOORBELL_USED) {
dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "unexpected doorbell "
"active!\n", ioc->name));
- goto issue_host_reset;
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = SUCCESS;
+ goto err_out;
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt2sas_base_fault_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
- goto issue_host_reset;
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = SUCCESS;
+ goto err_out;
}
smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
if (!smid) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
- return;
+ rc = FAILED;
+ goto err_out;
}
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x),"
@@ -2035,21 +2035,24 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = type;
mpi_request->TaskMID = cpu_to_le16(smid_task);
- mpi_request->VP_ID = 0; /* TODO */
- mpi_request->VF_ID = 0;
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt2sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
mpt2sas_base_put_smid_hi_priority(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
- mpt2sas_scsih_clear_tm_flag(ioc, handle);
if (!(ioc->tm_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s: timeout\n",
ioc->name, __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
- if (!(ioc->tm_cmds.status & MPT2_CMD_RESET))
- goto issue_host_reset;
+ if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) {
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = SUCCESS;
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mpt2sas_scsih_clear_tm_flag(ioc, handle);
+ goto err_out;
+ }
}
if (ioc->tm_cmds.status & MPT2_CMD_REPLY_VALID) {
@@ -2059,12 +2062,57 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
le32_to_cpu(mpi_reply->IOCLogInfo),
le32_to_cpu(mpi_reply->TerminationCount)));
- if (ioc->logging_level & MPT_DEBUG_TM)
+ if (ioc->logging_level & MPT_DEBUG_TM) {
_scsih_response_code(ioc, mpi_reply->ResponseCode);
+ if (mpi_reply->IOCStatus)
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ }
}
- return;
- issue_host_reset:
- mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER);
+
+ /* sanity check:
+ * Check to see the commands were terminated.
+ * This is only needed for eh callbacks, hence the scmd check.
+ */
+ rc = FAILED;
+ if (scmd == NULL)
+ goto bypass_sanity_checks;
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task);
+ if (scmd_lookup && (scmd_lookup->serial_number ==
+ scmd->serial_number))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ }
+
+ bypass_sanity_checks:
+
+ mpt2sas_scsih_clear_tm_flag(ioc, handle);
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ return rc;
+
+ err_out:
+ mutex_unlock(&ioc->tm_cmds.mutex);
+ return rc;
}
/**
@@ -2081,7 +2129,6 @@ _scsih_abort(struct scsi_cmnd *scmd)
u16 smid;
u16 handle;
int r;
- struct scsi_cmnd *scmd_lookup;
printk(MPT2SAS_INFO_FMT "attempting task abort! scmd(%p)\n",
ioc->name, scmd);
@@ -2116,19 +2163,10 @@ _scsih_abort(struct scsi_cmnd *scmd)
mpt2sas_halt_firmware(ioc);
- mutex_lock(&ioc->tm_cmds.mutex);
handle = sas_device_priv_data->sas_target->handle;
- mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
-
- /* sanity check - see whether command actually completed */
- scmd_lookup = _scsih_scsi_lookup_get(ioc, smid);
- if (scmd_lookup && (scmd_lookup->serial_number == scmd->serial_number))
- r = FAILED;
- else
- r = SUCCESS;
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd);
out:
printk(MPT2SAS_INFO_FMT "task abort: %s scmd(%p)\n",
@@ -2185,22 +2223,9 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
- mutex_lock(&ioc->tm_cmds.mutex);
- mpt2sas_scsih_issue_tm(ioc, handle, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
- 30);
-
- /*
- * sanity check see whether all commands to this device been
- * completed
- */
- if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
- scmd->device->lun, scmd->device->channel))
- r = FAILED;
- else
- r = SUCCESS;
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, scmd);
out:
printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
@@ -2257,21 +2282,9 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
- mutex_lock(&ioc->tm_cmds.mutex);
- mpt2sas_scsih_issue_tm(ioc, handle, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
-
- /*
- * sanity check see whether all commands to this target been
- * completed
- */
- if (_scsih_scsi_lookup_find_by_target(ioc, scmd->device->id,
- scmd->device->channel))
- r = FAILED;
- else
- r = SUCCESS;
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 30, scmd);
out:
printk(MPT2SAS_INFO_FMT "target reset: %s scmd(%p)\n",
@@ -2325,8 +2338,9 @@ _scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
- INIT_WORK(&fw_event->work, _firmware_event_work);
- queue_work(ioc->firmware_event_thread, &fw_event->work);
+ INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work);
+ queue_delayed_work(ioc->firmware_event_thread,
+ &fw_event->delayed_work, 0);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@@ -2353,61 +2367,53 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
+
/**
- * _scsih_fw_event_add - requeue an event
+ * _scsih_queue_rescan - queue a topology rescan from user context
* @ioc: per adapter object
- * @fw_event: object describing the event
- * Context: This function will acquire ioc->fw_event_lock.
*
* Return nothing.
*/
static void
-_scsih_fw_event_requeue(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
- *fw_event, unsigned long delay)
+_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
{
- unsigned long flags;
- if (ioc->firmware_event_thread == NULL)
- return;
+ struct fw_event_work *fw_event;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- queue_work(ioc->firmware_event_thread, &fw_event->work);
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (ioc->wait_for_port_enable_to_complete)
+ return;
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
}
/**
- * _scsih_fw_event_off - turn flag off preventing event handling
+ * _scsih_fw_event_cleanup_queue - cleanup event queue
* @ioc: per adapter object
*
- * Used to prevent handling of firmware events during adapter reset
- * driver unload.
+ * Walk the firmware event queue, either killing timers, or waiting
+ * for outstanding events to complete
*
* Return nothing.
*/
static void
-_scsih_fw_event_off(struct MPT2SAS_ADAPTER *ioc)
+_scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
{
- unsigned long flags;
+ struct fw_event_work *fw_event, *next;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- ioc->fw_events_off = 1;
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
-
-}
-
-/**
- * _scsih_fw_event_on - turn flag on allowing firmware event handling
- * @ioc: per adapter object
- *
- * Returns nothing.
- */
-static void
-_scsih_fw_event_on(struct MPT2SAS_ADAPTER *ioc)
-{
- unsigned long flags;
+ if (list_empty(&ioc->fw_event_list) ||
+ !ioc->firmware_event_thread || in_interrupt())
+ return;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- ioc->fw_events_off = 0;
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+ if (cancel_delayed_work(&fw_event->delayed_work)) {
+ _scsih_fw_event_free(ioc, fw_event);
+ continue;
+ }
+ fw_event->cancel_pending_work = 1;
+ }
}
/**
@@ -2571,25 +2577,24 @@ static void
_scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
- struct MPT2SAS_TARGET *sas_target_priv_data;
u16 smid;
struct _sas_device *sas_device;
unsigned long flags;
struct _tr_list *delayed_tr;
- if (ioc->shost_recovery) {
- printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ if (ioc->shost_recovery || ioc->remove_host) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
+ "progress!\n", __func__, ioc->name));
return;
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- /* skip is hidden raid component */
- if (sas_device && sas_device->hidden_raid_component)
+ if (sas_device && sas_device->hidden_raid_component) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
if (!smid) {
@@ -2598,36 +2603,16 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
return;
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
- delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL;
- list_add_tail(&delayed_tr->list,
- &ioc->delayed_tr_list);
- if (sas_device && sas_device->starget) {
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget, "DELAYED:tr:handle(0x%04x), "
- "(open)\n", handle));
- } else {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
- }
- return;
- }
-
- if (sas_device) {
- sas_device->state |= MPTSAS_STATE_TR_SEND;
- sas_device->state |= MPT2SAS_REQ_SAS_CNTRL;
- if (sas_device->starget && sas_device->starget->hostdata) {
- sas_target_priv_data = sas_device->starget->hostdata;
- sas_target_priv_data->tm_busy = 1;
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget, "tr:handle(0x%04x), (open)\n",
- handle));
- }
- } else {
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "tr:handle(0x%04x), (open)\n", ioc->name, handle));
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
}
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), "
+ "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid,
+ ioc->tm_tr_cb_idx));
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -2657,35 +2642,15 @@ static u8
_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
u8 msix_index, u32 reply)
{
- unsigned long flags;
- u16 handle;
- struct _sas_device *sas_device;
Mpi2SasIoUnitControlReply_t *mpi_reply =
mpt2sas_base_get_reply_virt_addr(ioc, reply);
- handle = le16_to_cpu(mpi_reply->DevHandle);
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (sas_device) {
- sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE;
- if (sas_device->starget)
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget,
- "sc_complete:handle(0x%04x), "
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- handle, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
- } else {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "sc_complete:handle(0x%04x), "
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, handle, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
- }
-
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "sc_complete:handle(0x%04x), (open) "
+ "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
return 1;
}
@@ -2709,87 +2674,63 @@ static u8
_scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply)
{
- unsigned long flags;
u16 handle;
- struct _sas_device *sas_device;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
Mpi2SCSITaskManagementReply_t *mpi_reply =
mpt2sas_base_get_reply_virt_addr(ioc, reply);
Mpi2SasIoUnitControlRequest_t *mpi_request;
u16 smid_sas_ctrl;
- struct MPT2SAS_TARGET *sas_target_priv_data;
struct _tr_list *delayed_tr;
- u8 rc;
- handle = le16_to_cpu(mpi_reply->DevHandle);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (sas_device) {
- sas_device->state |= MPTSAS_STATE_TR_COMPLETE;
- if (sas_device->starget) {
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget, "tr_complete:handle(0x%04x), "
- "(%s) ioc_status(0x%04x), loginfo(0x%08x), "
- "completed(%d)\n", sas_device->handle,
- (sas_device->state & MPT2SAS_REQ_SAS_CNTRL) ?
- "open" : "active",
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
- if (sas_device->starget->hostdata) {
- sas_target_priv_data =
- sas_device->starget->hostdata;
- sas_target_priv_data->tm_busy = 0;
- }
- }
- } else {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "tr_complete:handle(0x%04x), (open) ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ if (ioc->shost_recovery || ioc->remove_host) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
+ "progress!\n", __func__, ioc->name));
+ return 1;
}
- if (!list_empty(&ioc->delayed_tr_list)) {
- delayed_tr = list_entry(ioc->delayed_tr_list.next,
- struct _tr_list, list);
- mpt2sas_base_free_smid(ioc, smid);
- if (delayed_tr->state & MPT2SAS_REQ_SAS_CNTRL)
- _scsih_tm_tr_send(ioc, delayed_tr->handle);
- list_del(&delayed_tr->list);
- kfree(delayed_tr);
- rc = 0; /* tells base_interrupt not to free mf */
- } else
- rc = 1;
-
- if (sas_device && !(sas_device->state & MPT2SAS_REQ_SAS_CNTRL))
- return rc;
-
- if (ioc->shost_recovery) {
- printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
- return rc;
+ mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "spurious interrupt: "
+ "handle(0x%04x:0x%04x), smid(%d)!!!\n", ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
}
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
smid_sas_ctrl = mpt2sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
if (!smid_sas_ctrl) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
- return rc;
+ return 1;
}
- if (sas_device)
- sas_device->state |= MPTSAS_STATE_CNTRL_SEND;
-
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sc_send:handle(0x%04x), "
+ "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid_sas_ctrl,
+ ioc->tm_sas_control_cb_idx));
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
- mpi_request->DevHandle = mpi_reply->DevHandle;
+ mpi_request->DevHandle = mpi_request_tm->DevHandle;
mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl);
- return rc;
+
+ if (!list_empty(&ioc->delayed_tr_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_list.next,
+ struct _tr_list, list);
+ mpt2sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0; /* tells base_interrupt not to free mf */
+ }
+ return 1;
}
/**
@@ -3021,25 +2962,32 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
scmd->scsi_done = done;
sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data) {
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
sas_target_priv_data = sas_device_priv_data->sas_target;
- if (!sas_target_priv_data || sas_target_priv_data->handle ==
- MPT2SAS_INVALID_DEVICE_HANDLE || sas_target_priv_data->deleted) {
+ /* invalid device handle */
+ if (sas_target_priv_data->handle == MPT2SAS_INVALID_DEVICE_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
- /* see if we are busy with task managment stuff */
- if (sas_device_priv_data->block || sas_target_priv_data->tm_busy)
- return SCSI_MLQUEUE_DEVICE_BUSY;
- else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ /* host recovery or link resets sent via IOCTLs */
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
return SCSI_MLQUEUE_HOST_BUSY;
+ /* device busy with task managment */
+ else if (sas_device_priv_data->block || sas_target_priv_data->tm_busy)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ /* device has been deleted */
+ else if (sas_target_priv_data->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
@@ -3110,8 +3058,11 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
}
}
- mpt2sas_base_put_smid_scsi_io(ioc, smid,
- sas_device_priv_data->sas_target->handle);
+ if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST))
+ mpt2sas_base_put_smid_scsi_io(ioc, smid,
+ sas_device_priv_data->sas_target->handle);
+ else
+ mpt2sas_base_put_smid_default(ioc, smid);
return 0;
out:
@@ -3301,8 +3252,8 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct sense_info data;
_scsih_normalize_sense(scmd->sense_buffer, &data);
printk(MPT2SAS_WARN_FMT "\t[sense_key,asc,ascq]: "
- "[0x%02x,0x%02x,0x%02x]\n", ioc->name, data.skey,
- data.asc, data.ascq);
+ "[0x%02x,0x%02x,0x%02x], count(%d)\n", ioc->name, data.skey,
+ data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
}
if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
@@ -3356,7 +3307,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
mpi_request.SlotStatus =
- MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
mpi_request.DevHandle = cpu_to_le16(handle);
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
@@ -4008,6 +3959,134 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
}
/**
+ * _scsih_check_access_status - check access flags
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle: sas device handle
+ * @access_flags: errors returned during discovery of the device
+ *
+ * Return 0 for success, else failure
+ */
+static u8
+_scsih_check_access_status(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
+ desc = "sata capability failed";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
+ desc = "sata affiliation conflict";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
+ desc = "route not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
+ desc = "smp error not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
+ desc = "device blocked";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
+ desc = "sata initialization failed";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ if (!rc)
+ return 0;
+
+ printk(MPT2SAS_ERR_FMT "discovery errors(%s): sas_address(0x%016llx), "
+ "handle(0x%04x)\n", ioc->name, desc,
+ (unsigned long long)sas_address, handle);
+ return rc;
+}
+
+static void
+_scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device) {
+ printk(MPT2SAS_ERR_FMT "device is not present "
+ "handle(0x%04x), no sas_device!!!\n", ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (unlikely(sas_device->handle != handle)) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget, "handle changed from(0x%04x)"
+ " to (0x%04x)!!!\n", sas_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ sas_device->handle = handle;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ printk(MPT2SAS_ERR_FMT "device is not present "
+ "handle(0x%04x), flags!!!\n", ioc->name, handle);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
+ return;
+ _scsih_ublock_io_device(ioc, handle);
+
+}
+
+/**
* _scsih_add_device - creating sas device object
* @ioc: per adapter object
* @handle: sas device handle
@@ -4045,6 +4124,8 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
@@ -4055,15 +4136,10 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
- /* check if there were any issus with discovery */
- if (sas_device_pg0.AccessStatus ==
- MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- printk(MPT2SAS_ERR_FMT "AccessStatus = 0x%02x\n",
- ioc->name, sas_device_pg0.AccessStatus);
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
return -1;
- }
/* check if this is end device */
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
@@ -4073,17 +4149,14 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
- sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
sas_address);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device) {
- _scsih_ublock_io_device(ioc, handle);
+ if (sas_device)
return 0;
- }
sas_device = kzalloc(sizeof(struct _sas_device),
GFP_KERNEL);
@@ -4126,67 +4199,38 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
}
/**
- * _scsih_remove_device - removing sas device object
+ * _scsih_remove_pd_device - removing sas device pd object
* @ioc: per adapter object
- * @sas_device: the sas_device object
+ * @sas_device_delete: the sas_device object
*
+ * For hidden raid components, we do driver-fw handshake from
+ * hotplug work threads.
* Return nothing.
*/
static void
-_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
- *sas_device)
+_scsih_remove_pd_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
+ sas_device)
{
- struct MPT2SAS_TARGET *sas_target_priv_data;
Mpi2SasIoUnitControlReply_t mpi_reply;
Mpi2SasIoUnitControlRequest_t mpi_request;
- u16 device_handle, handle;
-
- if (!sas_device)
- return;
+ u16 vol_handle, handle;
- handle = sas_device->handle;
+ handle = sas_device.handle;
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle(0x%04x),"
" sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
- (unsigned long long) sas_device->sas_address));
-
- if (sas_device->starget && sas_device->starget->hostdata) {
- sas_target_priv_data = sas_device->starget->hostdata;
- sas_target_priv_data->deleted = 1;
- }
-
- if (ioc->remove_host || ioc->shost_recovery || !handle)
- goto out;
+ (unsigned long long) sas_device.sas_address));
- if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
- "target_reset handle(0x%04x)\n", ioc->name,
- handle));
- goto skip_tr;
- }
-
- /* Target Reset to flush out all the outstanding IO */
- device_handle = (sas_device->hidden_raid_component) ?
- sas_device->volume_handle : handle;
- if (device_handle) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset: "
- "handle(0x%04x)\n", ioc->name, device_handle));
- mutex_lock(&ioc->tm_cmds.mutex);
- mpt2sas_scsih_issue_tm(ioc, device_handle, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10);
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
- "done: handle(0x%04x)\n", ioc->name, device_handle));
- if (ioc->shost_recovery)
- goto out;
- }
- skip_tr:
-
- if ((sas_device->state & MPTSAS_STATE_CNTRL_COMPLETE)) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
- "sas_cntrl handle(0x%04x)\n", ioc->name, handle));
- goto out;
- }
+ vol_handle = sas_device.volume_handle;
+ if (!vol_handle)
+ return;
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset: "
+ "handle(0x%04x)\n", ioc->name, vol_handle));
+ mpt2sas_scsih_issue_tm(ioc, vol_handle, 0, 0, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, NULL);
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
+ "done: handle(0x%04x)\n", ioc->name, vol_handle));
+ if (ioc->shost_recovery)
+ return;
/* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: handle"
@@ -4194,34 +4238,68 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
- mpi_request.DevHandle = handle;
- mpi_request.VF_ID = 0; /* TODO */
- mpi_request.VP_ID = 0;
+ mpi_request.DevHandle = cpu_to_le16(handle);
if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply,
- &mpi_request)) != 0) {
+ &mpi_request)) != 0)
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- }
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: ioc_status"
"(0x%04x), loginfo(0x%08x)\n", ioc->name,
le16_to_cpu(mpi_reply.IOCStatus),
le32_to_cpu(mpi_reply.IOCLogInfo)));
- out:
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: handle(0x%04x),"
+ " sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
+ (unsigned long long) sas_device.sas_address));
+}
- _scsih_ublock_io_device(ioc, handle);
+/**
+ * _scsih_remove_device - removing sas device object
+ * @ioc: per adapter object
+ * @sas_device_delete: the sas_device object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ struct _sas_device sas_device_backup;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
- mpt2sas_transport_port_remove(ioc, sas_device->sas_address,
- sas_device->sas_address_parent);
+ if (!sas_device)
+ return;
- printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
- "(0x%016llx)\n", ioc->name, handle,
- (unsigned long long) sas_device->sas_address);
+ memcpy(&sas_device_backup, sas_device, sizeof(struct _sas_device));
_scsih_sas_device_remove(ioc, sas_device);
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: handle"
- "(0x%04x)\n", ioc->name, __func__, handle));
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
+ "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
+ sas_device_backup.handle, (unsigned long long)
+ sas_device_backup.sas_address));
+
+ if (sas_device_backup.starget && sas_device_backup.starget->hostdata) {
+ sas_target_priv_data = sas_device_backup.starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ }
+
+ if (sas_device_backup.hidden_raid_component)
+ _scsih_remove_pd_device(ioc, sas_device_backup);
+
+ _scsih_ublock_io_device(ioc, sas_device_backup.handle);
+
+ mpt2sas_transport_port_remove(ioc, sas_device_backup.sas_address,
+ sas_device_backup.sas_address_parent);
+
+ printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
+ "(0x%016llx)\n", ioc->name, sas_device_backup.handle,
+ (unsigned long long) sas_device_backup.sas_address);
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: "
+ "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
+ sas_device_backup.handle, (unsigned long long)
+ sas_device_backup.sas_address));
}
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -4331,7 +4409,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
_scsih_sas_topology_change_event_debug(ioc, event_data);
#endif
- if (ioc->shost_recovery)
+ if (ioc->shost_recovery || ioc->remove_host)
return;
if (!ioc->sas_hba.num_phys)
@@ -4370,7 +4448,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
"expander event\n", ioc->name));
return;
}
- if (ioc->shost_recovery)
+ if (ioc->shost_recovery || ioc->remove_host)
return;
phy_number = event_data->StartPhyNum + i;
reason_code = event_data->PHY[i].PhyStatus &
@@ -4393,8 +4471,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
mpt2sas_transport_update_links(ioc, sas_address,
handle, phy_number, link_rate);
- if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
- _scsih_ublock_io_device(ioc, handle);
+ if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ _scsih_check_device(ioc, handle);
break;
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -4520,10 +4600,10 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
event_data);
#endif
- if (!(event_data->ReasonCode ==
+ if (event_data->ReasonCode !=
MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
- event_data->ReasonCode ==
- MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET))
+ event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
@@ -4630,7 +4710,6 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
__func__));
- mutex_lock(&ioc->tm_cmds.mutex);
termination_count = 0;
query_count = 0;
mpi_reply = ioc->tm_cmds.reply;
@@ -4654,8 +4733,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
lun = sas_device_priv_data->lun;
query_count++;
- mpt2sas_scsih_issue_tm(ioc, handle, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
+ mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
& MPI2_IOCSTATUS_MASK;
@@ -4666,13 +4745,11 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
continue;
- mpt2sas_scsih_issue_tm(ioc, handle, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30);
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL);
termination_count += le32_to_cpu(mpi_reply->TerminationCount);
}
ioc->broadcast_aen_busy = 0;
- mutex_unlock(&ioc->tm_cmds.mutex);
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT
"%s - exit, query_count = %d termination_count = %d\n",
@@ -5442,6 +5519,26 @@ _scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
}
/**
+ * _scsih_prep_device_scan - initialize parameters prior to device scan
+ * @ioc: per adapter object
+ *
+ * Set the deleted flag prior to device scan. If the device is found during
+ * the scan, then we clear the deleted flag.
+ */
+static void
+_scsih_prep_device_scan(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target)
+ sas_device_priv_data->sas_target->deleted = 1;
+ }
+}
+
+/**
* _scsih_mark_responding_sas_device - mark a sas_devices as responding
* @ioc: per adapter object
* @sas_address: sas address
@@ -5467,10 +5564,13 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
if (sas_device->sas_address == sas_address &&
sas_device->slot == slot && sas_device->starget) {
sas_device->responding = 1;
- sas_device->state = 0;
starget = sas_device->starget;
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->tm_busy = 0;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
starget_printk(KERN_INFO, sas_device->starget,
"handle(0x%04x), sas_addr(0x%016llx), enclosure "
"logical id(0x%016llx), slot(%d)\n", handle,
@@ -5483,7 +5583,8 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
sas_device->handle);
sas_device->handle = handle;
- sas_target_priv_data->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
goto out;
}
}
@@ -5558,6 +5659,12 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
spin_lock_irqsave(&ioc->raid_device_lock, flags);
list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
if (raid_device->wwid == wwid && raid_device->starget) {
+ starget = raid_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
raid_device->responding = 1;
starget_printk(KERN_INFO, raid_device->starget,
"handle(0x%04x), wwid(0x%016llx)\n", handle,
@@ -5567,9 +5674,8 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
raid_device->handle);
raid_device->handle = handle;
- starget = raid_device->starget;
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
goto out;
}
}
@@ -5694,13 +5800,13 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
}
/**
- * _scsih_remove_unresponding_devices - removing unresponding devices
+ * _scsih_remove_unresponding_sas_devices - removing unresponding devices
* @ioc: per adapter object
*
* Return nothing.
*/
static void
-_scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
+_scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
{
struct _sas_device *sas_device, *sas_device_next;
struct _sas_node *sas_expander;
@@ -5722,8 +5828,6 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
(unsigned long long)
sas_device->enclosure_logical_id,
sas_device->slot);
- /* invalidate the device handle */
- sas_device->handle = 0;
_scsih_remove_device(ioc, sas_device);
}
@@ -5774,32 +5878,33 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
case MPT2_IOC_PRE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
- _scsih_fw_event_off(ioc);
break;
case MPT2_IOC_AFTER_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->scsih_cmds.status & MPT2_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
ioc->tm_cmds.status |= MPT2_CMD_RESET;
mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
complete(&ioc->tm_cmds.done);
}
- _scsih_fw_event_on(ioc);
+ _scsih_fw_event_cleanup_queue(ioc);
_scsih_flush_running_cmds(ioc);
+ _scsih_queue_rescan(ioc);
break;
case MPT2_IOC_DONE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
_scsih_sas_host_refresh(ioc);
+ _scsih_prep_device_scan(ioc);
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
break;
- case MPT2_IOC_RUNNING:
- dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
- "MPT2_IOC_RUNNING\n", ioc->name, __func__));
- _scsih_remove_unresponding_devices(ioc);
- break;
}
}
@@ -5815,21 +5920,28 @@ static void
_firmware_event_work(struct work_struct *work)
{
struct fw_event_work *fw_event = container_of(work,
- struct fw_event_work, work);
+ struct fw_event_work, delayed_work.work);
unsigned long flags;
struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
/* the queue is being flushed so ignore this event */
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- if (ioc->fw_events_off || ioc->remove_host) {
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (ioc->remove_host || fw_event->cancel_pending_work) {
_scsih_fw_event_free(ioc, fw_event);
return;
}
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
- if (ioc->shost_recovery) {
- _scsih_fw_event_requeue(ioc, fw_event, 1000);
+ if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
+ _scsih_fw_event_free(ioc, fw_event);
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->shost_recovery) {
+ init_completion(&ioc->shost_recovery_done);
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
+ flags);
+ wait_for_completion(&ioc->shost_recovery_done);
+ } else
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
+ flags);
+ _scsih_remove_unresponding_sas_devices(ioc);
return;
}
@@ -5891,16 +6003,12 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
{
struct fw_event_work *fw_event;
Mpi2EventNotificationReply_t *mpi_reply;
- unsigned long flags;
u16 event;
+ u16 sz;
/* events turned off due to host reset or driver unloading */
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- if (ioc->fw_events_off || ioc->remove_host) {
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (ioc->remove_host)
return 1;
- }
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
event = le16_to_cpu(mpi_reply->Event);
@@ -5947,8 +6055,8 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
ioc->name, __FILE__, __LINE__, __func__);
return 1;
}
- fw_event->event_data =
- kzalloc(mpi_reply->EventDataLength*4, GFP_ATOMIC);
+ sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
+ fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
if (!fw_event->event_data) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -5957,7 +6065,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
}
memcpy(fw_event->event_data, mpi_reply->EventData,
- mpi_reply->EventDataLength*4);
+ sz);
fw_event->ioc = ioc;
fw_event->VF_ID = mpi_reply->VF_ID;
fw_event->VP_ID = mpi_reply->VP_ID;
@@ -6158,6 +6266,18 @@ _scsih_shutdown(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
_scsih_ir_shutdown(ioc);
mpt2sas_base_detach(ioc);
@@ -6184,7 +6304,7 @@ _scsih_remove(struct pci_dev *pdev)
unsigned long flags;
ioc->remove_host = 1;
- _scsih_fw_event_off(ioc);
+ _scsih_fw_event_cleanup_queue(ioc);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
wq = ioc->firmware_event_thread;
@@ -6557,6 +6677,122 @@ _scsih_resume(struct pci_dev *pdev)
}
#endif /* CONFIG_PM */
+/**
+ * _scsih_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return value:
+ * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t
+_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: detected callback, state(%d)!!\n",
+ ioc->name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ scsi_block_requests(ioc->shost);
+ mpt2sas_base_stop_watchdog(ioc);
+ mpt2sas_base_free_resources(ioc);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ _scsih_remove(pdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * _scsih_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+static pci_ers_result_t
+_scsih_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+
+ printk(MPT2SAS_INFO_FMT "PCI error: slot reset callback!!\n",
+ ioc->name);
+
+ ioc->pdev = pdev;
+ rc = mpt2sas_base_map_resources(ioc);
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ printk(MPT2SAS_WARN_FMT "hard reset: %s\n", ioc->name,
+ (rc == 0) ? "success" : "failed");
+
+ if (!rc)
+ return PCI_ERS_RESULT_RECOVERED;
+ else
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * _scsih_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+static void
+_scsih_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: resume callback!!\n", ioc->name);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ mpt2sas_base_start_watchdog(ioc);
+ scsi_unblock_requests(ioc->shost);
+}
+
+/**
+ * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+static pci_ers_result_t
+_scsih_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: mmio enabled callback!!\n",
+ ioc->name);
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static struct pci_error_handlers _scsih_err_handler = {
+ .error_detected = _scsih_pci_error_detected,
+ .mmio_enabled = _scsih_pci_mmio_enabled,
+ .slot_reset = _scsih_pci_slot_reset,
+ .resume = _scsih_pci_resume,
+};
static struct pci_driver scsih_driver = {
.name = MPT2SAS_DRIVER_NAME,
@@ -6564,6 +6800,7 @@ static struct pci_driver scsih_driver = {
.probe = _scsih_probe,
.remove = __devexit_p(_scsih_remove),
.shutdown = _scsih_shutdown,
+ .err_handler = &_scsih_err_handler,
#ifdef CONFIG_PM
.suspend = _scsih_suspend,
.resume = _scsih_resume,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index bd7ca2b..2727c3b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
* SAS Transport Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -465,6 +465,85 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
return rc;
}
+
+/**
+ * _transport_delete_duplicate_port - (see below description)
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ * @phy_num: phy number
+ *
+ * This function is called when attempting to add a new port that is claiming
+ * the same phy resources already in use by another port. If we don't release
+ * the claimed phy resources, the sas transport layer will hang from the BUG
+ * in sas_port_add_phy.
+ *
+ * The reason we would hit this issue is becuase someone is changing the
+ * sas address of a device on the fly, meanwhile controller firmware sends
+ * EVENTs out of order when removing the previous instance of the device.
+ */
+static void
+_transport_delete_duplicate_port(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, u64 sas_address, int phy_num)
+{
+ struct _sas_port *mpt2sas_port, *mpt2sas_port_duplicate;
+ struct _sas_phy *mpt2sas_phy;
+
+ printk(MPT2SAS_ERR_FMT "new device located at sas_addr(0x%016llx), "
+ "phy_id(%d)\n", ioc->name, (unsigned long long)sas_address,
+ phy_num);
+
+ mpt2sas_port_duplicate = NULL;
+ list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list, port_list) {
+ dev_printk(KERN_ERR, &mpt2sas_port->port->dev,
+ "existing device at sas_addr(0x%016llx), num_phys(%d)\n",
+ (unsigned long long)
+ mpt2sas_port->remote_identify.sas_address,
+ mpt2sas_port->num_phys);
+ list_for_each_entry(mpt2sas_phy, &mpt2sas_port->phy_list,
+ port_siblings) {
+ dev_printk(KERN_ERR, &mpt2sas_phy->phy->dev,
+ "phy_number(%d)\n", mpt2sas_phy->phy_id);
+ if (mpt2sas_phy->phy_id == phy_num)
+ mpt2sas_port_duplicate = mpt2sas_port;
+ }
+ }
+
+ if (!mpt2sas_port_duplicate)
+ return;
+
+ dev_printk(KERN_ERR, &mpt2sas_port_duplicate->port->dev,
+ "deleting duplicate device at sas_addr(0x%016llx), phy(%d)!!!!\n",
+ (unsigned long long)
+ mpt2sas_port_duplicate->remote_identify.sas_address, phy_num);
+ ioc->logging_level |= MPT_DEBUG_TRANSPORT;
+ mpt2sas_transport_port_remove(ioc,
+ mpt2sas_port_duplicate->remote_identify.sas_address,
+ sas_node->sas_address);
+ ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
+}
+
+/**
+ * _transport_sanity_check - sanity check when adding a new port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ *
+ * See the explanation above from _transport_delete_duplicate_port
+ */
+static void
+_transport_sanity_check(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < sas_node->num_phys; i++)
+ if (sas_node->phy[i].remote_identify.sas_address == sas_address)
+ if (sas_node->phy[i].phy_belongs_to_port)
+ _transport_delete_duplicate_port(ioc, sas_node,
+ sas_address, i);
+}
+
/**
* mpt2sas_transport_port_add - insert port to the list
* @ioc: per adapter object
@@ -522,6 +601,9 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
+ _transport_sanity_check(ioc, sas_node,
+ mpt2sas_port->remote_identify.sas_address);
+
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address !=
mpt2sas_port->remote_identify.sas_address)
@@ -553,6 +635,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
mpt2sas_port->remote_identify.sas_address,
mpt2sas_phy->phy_id);
sas_port_add_phy(port, mpt2sas_phy->phy);
+ mpt2sas_phy->phy_belongs_to_port = 1;
}
mpt2sas_port->port = port;
@@ -651,6 +734,7 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
(unsigned long long)
mpt2sas_port->remote_identify.sas_address,
mpt2sas_phy->phy_id);
+ mpt2sas_phy->phy_belongs_to_port = 0;
sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
list_del(&mpt2sas_phy->port_siblings);
}
@@ -1341,7 +1425,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
req->sense_len = sizeof(*mpi_reply);
req->resid_len = 0;
- rsp->resid_len -= mpi_reply->ResponseDataLength;
+ rsp->resid_len -=
+ le16_to_cpu(mpi_reply->ResponseDataLength);
} else {
dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
"%s - no reply\n", ioc->name, __func__));
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index d722235..716d178 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -13,112 +13,116 @@
#include "wd33c93.h"
#include "mvme147.h"
-#include<linux/stat.h>
+#include <linux/stat.h>
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
static struct Scsi_Host *mvme147_host = NULL;
-static irqreturn_t mvme147_intr (int irq, void *dummy)
+static irqreturn_t mvme147_intr(int irq, void *dummy)
{
- if (irq == MVME147_IRQ_SCSI_PORT)
- wd33c93_intr (mvme147_host);
- else
- m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
- return IRQ_HANDLED;
+ if (irq == MVME147_IRQ_SCSI_PORT)
+ wd33c93_intr(mvme147_host);
+ else
+ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
+ return IRQ_HANDLED;
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned char flags = 0x01;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
-
- /* setup dma direction */
- if (!dir_in)
- flags |= 0x04;
-
- /* remember direction */
- HDATA(mvme147_host)->dma_dir = dir_in;
-
- if (dir_in)
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- else
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
-
- /* start DMA */
- m147_pcc->dma_bcr = cmd->SCp.this_residual | (1<<24);
- m147_pcc->dma_dadr = addr;
- m147_pcc->dma_cntrl = flags;
-
- /* return success */
- return 0;
+ struct WD33C93_hostdata *hdata = shost_priv(mvme147_host);
+ unsigned char flags = 0x01;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+
+ /* setup dma direction */
+ if (!dir_in)
+ flags |= 0x04;
+
+ /* remember direction */
+ hdata->dma_dir = dir_in;
+
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
+
+ /* start DMA */
+ m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24);
+ m147_pcc->dma_dadr = addr;
+ m147_pcc->dma_cntrl = flags;
+
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
- int status)
+ int status)
{
- m147_pcc->dma_cntrl = 0;
+ m147_pcc->dma_cntrl = 0;
}
int mvme147_detect(struct scsi_host_template *tpnt)
{
- static unsigned char called = 0;
- wd33c93_regs regs;
-
- if (!MACH_IS_MVME147 || called)
- return 0;
- called++;
-
- tpnt->proc_name = "MVME147";
- tpnt->proc_info = &wd33c93_proc_info;
-
- mvme147_host = scsi_register (tpnt, sizeof(struct WD33C93_hostdata));
- if (!mvme147_host)
- goto err_out;
-
- mvme147_host->base = 0xfffe4000;
- mvme147_host->irq = MVME147_IRQ_SCSI_PORT;
- regs.SASR = (volatile unsigned char *)0xfffe4000;
- regs.SCMD = (volatile unsigned char *)0xfffe4001;
- HDATA(mvme147_host)->no_sync = 0xff;
- HDATA(mvme147_host)->fast = 0;
- HDATA(mvme147_host)->dma_mode = CTRL_DMA;
- wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
-
- if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", mvme147_intr))
- goto err_unregister;
- if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, "MVME147 SCSI DMA", mvme147_intr))
- goto err_free_irq;
+ static unsigned char called = 0;
+ wd33c93_regs regs;
+ struct WD33C93_hostdata *hdata;
+
+ if (!MACH_IS_MVME147 || called)
+ return 0;
+ called++;
+
+ tpnt->proc_name = "MVME147";
+ tpnt->proc_info = &wd33c93_proc_info;
+
+ mvme147_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
+ if (!mvme147_host)
+ goto err_out;
+
+ mvme147_host->base = 0xfffe4000;
+ mvme147_host->irq = MVME147_IRQ_SCSI_PORT;
+ regs.SASR = (volatile unsigned char *)0xfffe4000;
+ regs.SCMD = (volatile unsigned char *)0xfffe4001;
+ hdata = shost_priv(mvme147_host);
+ hdata->no_sync = 0xff;
+ hdata->fast = 0;
+ hdata->dma_mode = CTRL_DMA;
+ wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
+
+ if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0,
+ "MVME147 SCSI PORT", mvme147_intr))
+ goto err_unregister;
+ if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0,
+ "MVME147 SCSI DMA", mvme147_intr))
+ goto err_free_irq;
#if 0 /* Disabled; causes problems booting */
- m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
- udelay(100);
- m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */
- udelay(2000);
- m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */
+ m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
+ udelay(100);
+ m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */
+ udelay(2000);
+ m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */
#endif
- m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */
+ m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */
- m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */
- m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
+ m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */
+ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
- return 1;
+ return 1;
- err_free_irq:
- free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
- err_unregister:
- wd33c93_release();
- scsi_unregister(mvme147_host);
- err_out:
- return 0;
+err_free_irq:
+ free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
+err_unregister:
+ scsi_unregister(mvme147_host);
+err_out:
+ return 0;
}
static int mvme147_bus_reset(struct scsi_cmnd *cmd)
{
/* FIXME perform bus-specific reset */
- /* FIXME 2: kill this function, and let midlayer fallback to
+ /* FIXME 2: kill this function, and let midlayer fallback to
the same result, calling wd33c93_host_reset() */
spin_lock_irq(cmd->device->host->host_lock);
@@ -154,10 +158,9 @@ static struct scsi_host_template driver_template = {
int mvme147_release(struct Scsi_Host *instance)
{
#ifdef MODULE
- /* XXX Make sure DMA is stopped! */
- wd33c93_release();
- free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
- free_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr);
+ /* XXX Make sure DMA is stopped! */
+ free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
+ free_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr);
#endif
- return 1;
+ return 1;
}
diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h
index 32aee85..bfd4566 100644
--- a/drivers/scsi/mvme147.h
+++ b/drivers/scsi/mvme147.h
@@ -14,11 +14,11 @@ int mvme147_detect(struct scsi_host_template *);
int mvme147_release(struct Scsi_Host *);
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
#endif /* MVME147_H */
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 10a5077..afc7f6f 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -132,9 +132,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
tmp &= ~PHYEV_RDY_CH;
mvs_write_port_irq_stat(mvi, phy_id, tmp);
tmp = mvs_read_phy_ctl(mvi, phy_id);
- if (hard)
+ if (hard == 1)
tmp |= PHY_RST_HARD;
- else
+ else if (hard == 0)
tmp |= PHY_RST;
mvs_write_phy_ctl(mvi, phy_id, tmp);
if (hard) {
@@ -144,6 +144,26 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
}
}
+void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ if (clear_all) {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp) {
+ printk(KERN_DEBUG "check SRS 0 %08X.\n", tmp);
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ }
+ } else {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp & (1 << (reg_set % 32))) {
+ printk(KERN_DEBUG "register set 0x%x was stopped.\n",
+ reg_set);
+ mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
+ }
+ }
+}
+
static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
@@ -761,6 +781,7 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
mvs_write_port_irq_mask,
mvs_get_sas_addr,
mvs_64xx_command_active,
+ mvs_64xx_clear_srs_irq,
mvs_64xx_issue_stop,
mvs_start_delivery,
mvs_rx_update,
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 0940fae..eed4c5c 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -616,6 +616,15 @@ void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
}
#endif
+/*
+ * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
+ * with 64xx fixes
+ */
+static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
+ u8 clear_all)
+{
+}
+
const struct mvs_dispatch mvs_94xx_dispatch = {
"mv94xx",
mvs_94xx_init,
@@ -640,6 +649,7 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
mvs_write_port_irq_mask,
mvs_get_sas_addr,
mvs_94xx_command_active,
+ mvs_94xx_clear_srs_irq,
mvs_94xx_issue_stop,
mvs_start_delivery,
mvs_rx_update,
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index cae6b2c..19ad34f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -37,6 +37,7 @@ static const struct mvs_chip_info mvs_chips[] = {
};
#define SOC_SAS_NUM 2
+#define SG_MX 64
static struct scsi_host_template mvs_sht = {
.module = THIS_MODULE,
@@ -53,10 +54,10 @@ static struct scsi_host_template mvs_sht = {
.can_queue = 1,
.cmd_per_lun = 1,
.this_id = -1,
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = SG_MX,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
.slave_alloc = mvs_slave_alloc,
.target_destroy = sas_target_destroy,
@@ -65,19 +66,17 @@ static struct scsi_host_template mvs_sht = {
static struct sas_domain_function_template mvs_transport_ops = {
.lldd_dev_found = mvs_dev_found,
- .lldd_dev_gone = mvs_dev_gone,
-
+ .lldd_dev_gone = mvs_dev_gone,
.lldd_execute_task = mvs_queue_command,
.lldd_control_phy = mvs_phy_control,
.lldd_abort_task = mvs_abort_task,
.lldd_abort_task_set = mvs_abort_task_set,
.lldd_clear_aca = mvs_clear_aca,
- .lldd_clear_task_set = mvs_clear_task_set,
+ .lldd_clear_task_set = mvs_clear_task_set,
.lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
.lldd_lu_reset = mvs_lu_reset,
.lldd_query_task = mvs_query_task,
-
.lldd_port_formed = mvs_port_formed,
.lldd_port_deformed = mvs_port_deformed,
@@ -213,7 +212,7 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
{
- int i, slot_nr;
+ int i = 0, slot_nr;
if (mvi->flags & MVF_FLAG_SOC)
slot_nr = MVS_SOC_SLOTS;
@@ -232,6 +231,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
mvi->devices[i].dev_type = NO_DEVICE;
mvi->devices[i].device_id = i;
mvi->devices[i].dev_status = MVS_DEV_NORMAL;
+ init_timer(&mvi->devices[i].timer);
}
/*
@@ -437,6 +437,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
+ sha->core.shost = shost;
sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
if (!sha->lldd_ha)
@@ -574,6 +575,10 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
}
nhost++;
} while (nhost < chip->n_host);
+#ifdef MVS_USE_TASKLET
+ tasklet_init(&mv_tasklet, mvs_tasklet,
+ (unsigned long)SHOST_TO_SAS_HA(shost));
+#endif
mvs_post_sas_ha_init(shost, chip);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 0d21386..f5e3217 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -259,8 +259,6 @@ static inline void mvs_free_reg_set(struct mvs_info *mvi,
mv_printk("device has been free.\n");
return;
}
- if (dev->runing_req != 0)
- return;
if (dev->taskfileset == MVS_ID_NOT_MAPPED)
return;
MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
@@ -762,8 +760,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
}
if (is_tmf)
flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
- else
- flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
hdr->tags = cpu_to_le32(tag);
hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -878,14 +874,15 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
struct mvs_slot_info *slot;
u32 tag = 0xdeadbeef, rc, n_elem = 0;
u32 n = num, pass = 0;
- unsigned long flags = 0;
+ unsigned long flags = 0, flags_libsas = 0;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
tsm->resp = SAS_TASK_UNDELIVERED;
tsm->stat = SAS_PHY_DOWN;
- t->task_done(t);
+ if (dev->dev_type != SATA_DEV)
+ t->task_done(t);
return 0;
}
@@ -910,12 +907,25 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
else
tei.port = &mvi->port[dev->port->id];
- if (!tei.port->port_attached) {
+ if (tei.port && !tei.port->port_attached) {
if (sas_protocol_ata(t->task_proto)) {
+ struct task_status_struct *ts = &t->task_status;
+
mv_dprintk("port %d does not"
"attached device.\n", dev->port->id);
- rc = SAS_PHY_DOWN;
- goto out_done;
+ ts->stat = SAS_PROTO_RESPONSE;
+ ts->stat = SAS_PHY_DOWN;
+ spin_unlock_irqrestore(dev->sata_dev.ap->lock,
+ flags_libsas);
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ t->task_done(t);
+ spin_lock_irqsave(&mvi->lock, flags);
+ spin_lock_irqsave(dev->sata_dev.ap->lock,
+ flags_libsas);
+ if (n > 1)
+ t = list_entry(t->list.next,
+ struct sas_task, list);
+ continue;
} else {
struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
@@ -973,8 +983,8 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
break;
default:
dev_printk(KERN_ERR, mvi->dev,
- "unknown sas_task proto: 0x%x\n",
- t->task_proto);
+ "unknown sas_task proto: 0x%x\n",
+ t->task_proto);
rc = -EINVAL;
break;
}
@@ -993,11 +1003,15 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
spin_unlock(&t->task_state_lock);
mvs_hba_memory_dump(mvi, tag, t->task_proto);
- mvi_dev->runing_req++;
+ mvi_dev->running_req++;
++pass;
mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
if (n > 1)
t = list_entry(t->list.next, struct sas_task, list);
+ if (likely(pass))
+ MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
+ (MVS_CHIP_SLOT_SZ - 1));
+
} while (--n);
rc = 0;
goto out_done;
@@ -1012,10 +1026,6 @@ err_out:
dma_unmap_sg(mvi->dev, t->scatter, n_elem,
t->data_dir);
out_done:
- if (likely(pass)) {
- MVS_CHIP_DISP->start_delivery(mvi,
- (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
- }
spin_unlock_irqrestore(&mvi->lock, flags);
return rc;
}
@@ -1187,7 +1197,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
MVS_CHIP_DISP->phy_reset(mvi, i, 0);
goto out_done;
}
- } else if (phy->phy_type & PORT_TYPE_SAS
+ } else if (phy->phy_type & PORT_TYPE_SAS
|| phy->att_dev_info & PORT_SSP_INIT_MASK) {
phy->phy_attached = 1;
phy->identify.device_type =
@@ -1256,7 +1266,20 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
{
- /*Nothing*/
+ struct domain_device *dev;
+ struct mvs_phy *phy = sas_phy->lldd_phy;
+ struct mvs_info *mvi = phy->mvi;
+ struct asd_sas_port *port = sas_phy->port;
+ int phy_no = 0;
+
+ while (phy != &mvi->phy[phy_no]) {
+ phy_no++;
+ if (phy_no >= MVS_MAX_PHYS)
+ return;
+ }
+ list_for_each_entry(dev, &port->dev_list, dev_list_node)
+ mvs_do_release_task(phy->mvi, phy_no, NULL);
+
}
@@ -1316,6 +1339,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
goto found_out;
}
dev->lldd_dev = mvi_device;
+ mvi_device->dev_status = MVS_DEV_NORMAL;
mvi_device->dev_type = dev->dev_type;
mvi_device->mvi_info = mvi;
if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
@@ -1351,18 +1375,18 @@ int mvs_dev_found(struct domain_device *dev)
return mvs_dev_found_notify(dev, 1);
}
-void mvs_dev_gone_notify(struct domain_device *dev, int lock)
+void mvs_dev_gone_notify(struct domain_device *dev)
{
unsigned long flags = 0;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
- if (lock)
- spin_lock_irqsave(&mvi->lock, flags);
+ spin_lock_irqsave(&mvi->lock, flags);
if (mvi_dev) {
mv_dprintk("found dev[%d:%x] is gone.\n",
mvi_dev->device_id, mvi_dev->dev_type);
+ mvs_release_task(mvi, dev);
mvs_free_reg_set(mvi, mvi_dev);
mvs_free_dev(mvi_dev);
} else {
@@ -1370,14 +1394,13 @@ void mvs_dev_gone_notify(struct domain_device *dev, int lock)
}
dev->lldd_dev = NULL;
- if (lock)
- spin_unlock_irqrestore(&mvi->lock, flags);
+ spin_unlock_irqrestore(&mvi->lock, flags);
}
void mvs_dev_gone(struct domain_device *dev)
{
- mvs_dev_gone_notify(dev, 1);
+ mvs_dev_gone_notify(dev);
}
static struct sas_task *mvs_alloc_task(void)
@@ -1540,7 +1563,7 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
num = mvs_find_dev_phyno(dev, phyno);
spin_lock_irqsave(&mvi->lock, flags);
for (i = 0; i < num; i++)
- mvs_release_task(mvi, phyno[i], dev);
+ mvs_release_task(mvi, dev);
spin_unlock_irqrestore(&mvi->lock, flags);
}
/* If failed, fall-through I_T_Nexus reset */
@@ -1552,8 +1575,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
int mvs_I_T_nexus_reset(struct domain_device *dev)
{
unsigned long flags;
- int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
- struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
if (mvi_dev->dev_status != MVS_DEV_EH)
@@ -1563,10 +1586,8 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
__func__, mvi_dev->device_id, rc);
/* housekeeper */
- num = mvs_find_dev_phyno(dev, phyno);
spin_lock_irqsave(&mvi->lock, flags);
- for (i = 0; i < num; i++)
- mvs_release_task(mvi, phyno[i], dev);
+ mvs_release_task(mvi, dev);
spin_unlock_irqrestore(&mvi->lock, flags);
return rc;
@@ -1603,6 +1624,9 @@ int mvs_query_task(struct sas_task *task)
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
break;
+ default:
+ rc = TMF_RESP_FUNC_COMPLETE;
+ break;
}
}
mv_printk("%s:rc= %d\n", __func__, rc);
@@ -1621,8 +1645,11 @@ int mvs_abort_task(struct sas_task *task)
unsigned long flags;
u32 tag;
- if (mvi->exp_req)
- mvi->exp_req--;
+ if (!mvi_dev) {
+ mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__);
+ rc = TMF_RESP_FUNC_FAILED;
+ }
+
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1630,6 +1657,7 @@ int mvs_abort_task(struct sas_task *task)
goto out;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ mvi_dev->dev_status = MVS_DEV_EH;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
@@ -1654,12 +1682,31 @@ int mvs_abort_task(struct sas_task *task)
if (task->lldd_task) {
slot = task->lldd_task;
slot_no = (u32) (slot - mvi->slot_info);
+ spin_lock_irqsave(&mvi->lock, flags);
mvs_slot_complete(mvi, slot_no, 1);
+ spin_unlock_irqrestore(&mvi->lock, flags);
}
}
+
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
/* to do free register_set */
+ if (SATA_DEV == dev->dev_type) {
+ struct mvs_slot_info *slot = task->lldd_task;
+ struct task_status_struct *tstat;
+ u32 slot_idx = (u32)(slot - mvi->slot_info);
+ tstat = &task->task_status;
+ mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
+ "slot=%p slot_idx=x%x\n",
+ mvi, task, slot, slot_idx);
+ tstat->stat = SAS_ABORTED_TASK;
+ if (mvi_dev && mvi_dev->running_req)
+ mvi_dev->running_req--;
+ if (sas_protocol_ata(task->task_proto))
+ mvs_free_reg_set(mvi, mvi_dev);
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ return -1;
+ }
} else {
/* SMP */
@@ -1717,8 +1764,13 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
sizeof(struct dev_to_host_fis));
tstat->buf_valid_size = sizeof(*resp);
- if (unlikely(err))
- stat = SAS_PROTO_RESPONSE;
+ if (unlikely(err)) {
+ if (unlikely(err & CMD_ISS_STPD))
+ stat = SAS_OPEN_REJECT;
+ else
+ stat = SAS_PROTO_RESPONSE;
+ }
+
return stat;
}
@@ -1753,9 +1805,7 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
mv_printk("find reserved error, why?\n");
task->ata_task.use_ncq = 0;
- stat = SAS_PROTO_RESPONSE;
- mvs_sata_done(mvi, task, slot_idx, 1);
-
+ mvs_sata_done(mvi, task, slot_idx, err_dw0);
}
break;
default:
@@ -1772,18 +1822,20 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
struct sas_task *task = slot->task;
struct mvs_device *mvi_dev = NULL;
struct task_status_struct *tstat;
+ struct domain_device *dev;
+ u32 aborted;
- bool aborted;
void *to;
enum exec_status sts;
if (mvi->exp_req)
mvi->exp_req--;
- if (unlikely(!task || !task->lldd_task))
+ if (unlikely(!task || !task->lldd_task || !task->dev))
return -1;
tstat = &task->task_status;
- mvi_dev = task->dev->lldd_dev;
+ dev = task->dev;
+ mvi_dev = dev->lldd_dev;
mvs_hba_cq_dump(mvi);
@@ -1800,8 +1852,8 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
if (unlikely(aborted)) {
tstat->stat = SAS_ABORTED_TASK;
- if (mvi_dev)
- mvi_dev->runing_req--;
+ if (mvi_dev && mvi_dev->running_req)
+ mvi_dev->running_req--;
if (sas_protocol_ata(task->task_proto))
mvs_free_reg_set(mvi, mvi_dev);
@@ -1809,24 +1861,17 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
return -1;
}
- if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
- mv_dprintk("port has not device.\n");
+ if (unlikely(!mvi_dev || flags)) {
+ if (!mvi_dev)
+ mv_dprintk("port has not device.\n");
tstat->stat = SAS_PHY_DOWN;
goto out;
}
- /*
- if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
- mv_dprintk("Find device[%016llx] RXQ_ERR %X,
- err info:%016llx\n",
- SAS_ADDR(task->dev->sas_addr),
- rx_desc, (u64)(*(u64 *) slot->response));
- }
- */
-
/* error info record present */
if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
tstat->stat = mvs_slot_err(mvi, task, slot_idx);
+ tstat->resp = SAS_TASK_COMPLETE;
goto out;
}
@@ -1868,11 +1913,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
tstat->stat = SAM_CHECK_COND;
break;
}
+ if (!slot->port->port_attached) {
+ mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
+ tstat->stat = SAS_PHY_DOWN;
+ }
+
out:
- if (mvi_dev) {
- mvi_dev->runing_req--;
- if (sas_protocol_ata(task->task_proto))
+ if (mvi_dev && mvi_dev->running_req) {
+ mvi_dev->running_req--;
+ if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
mvs_free_reg_set(mvi, mvi_dev);
}
mvs_slot_task_free(mvi, task, slot, slot_idx);
@@ -1888,10 +1938,10 @@ out:
return sts;
}
-void mvs_release_task(struct mvs_info *mvi,
+void mvs_do_release_task(struct mvs_info *mvi,
int phy_no, struct domain_device *dev)
{
- int i = 0; u32 slot_idx;
+ u32 slot_idx;
struct mvs_phy *phy;
struct mvs_port *port;
struct mvs_slot_info *slot, *slot2;
@@ -1900,6 +1950,10 @@ void mvs_release_task(struct mvs_info *mvi,
port = phy->port;
if (!port)
return;
+ /* clean cmpl queue in case request is already finished */
+ mvs_int_rx(mvi, false);
+
+
list_for_each_entry_safe(slot, slot2, &port->list, entry) {
struct sas_task *task;
@@ -1911,18 +1965,22 @@ void mvs_release_task(struct mvs_info *mvi,
mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
slot_idx, slot->slot_tag, task);
-
- if (task->task_proto & SAS_PROTOCOL_SSP) {
- mv_printk("attached with SSP task CDB[");
- for (i = 0; i < 16; i++)
- mv_printk(" %02x", task->ssp_task.cdb[i]);
- mv_printk(" ]\n");
- }
+ MVS_CHIP_DISP->command_active(mvi, slot_idx);
mvs_slot_complete(mvi, slot_idx, 1);
}
}
+void mvs_release_task(struct mvs_info *mvi,
+ struct domain_device *dev)
+{
+ int i, phyno[WIDE_PORT_MAX_PHY], num;
+ /* housekeeper */
+ num = mvs_find_dev_phyno(dev, phyno);
+ for (i = 0; i < num; i++)
+ mvs_do_release_task(mvi, phyno[i], dev);
+}
+
static void mvs_phy_disconnected(struct mvs_phy *phy)
{
phy->phy_attached = 0;
@@ -2029,16 +2087,18 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
* we need check the interrupt status which belongs to per port.
*/
- if (phy->irq_status & PHYEV_DCDR_ERR)
+ if (phy->irq_status & PHYEV_DCDR_ERR) {
mv_dprintk("port %d STP decoding error.\n",
- phy_no+mvi->id*mvi->chip->n_phy);
+ phy_no + mvi->id*mvi->chip->n_phy);
+ }
if (phy->irq_status & PHYEV_POOF) {
if (!(phy->phy_event & PHY_PLUG_OUT)) {
int dev_sata = phy->phy_type & PORT_TYPE_SATA;
int ready;
- mvs_release_task(mvi, phy_no, NULL);
+ mvs_do_release_task(mvi, phy_no, NULL);
phy->phy_event |= PHY_PLUG_OUT;
+ MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
mvs_handle_event(mvi,
(void *)(unsigned long)phy_no,
PHY_PLUG_EVENT);
@@ -2085,6 +2145,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
phy_no, tmp);
}
mvs_update_phyinfo(mvi, phy_no, 0);
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2);
+ mdelay(10);
+ }
+
mvs_bytes_dmaed(mvi, phy_no);
/* whether driver is going to handle hot plug */
if (phy->phy_event & PHY_PLUG_OUT) {
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 885858b..77ddc7c 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <scsi/libsas.h>
+#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
#include <linux/version.h>
@@ -49,7 +50,7 @@
#define _MV_DUMP 0
#define MVS_ID_NOT_MAPPED 0x7f
/* #define DISABLE_HOTPLUG_DMA_FIX */
-#define MAX_EXP_RUNNING_REQ 2
+// #define MAX_EXP_RUNNING_REQ 2
#define WIDE_PORT_MAX_PHY 4
#define MV_DISABLE_NCQ 0
#define mv_printk(fmt, arg ...) \
@@ -129,6 +130,7 @@ struct mvs_dispatch {
void (*get_sas_addr)(void *buf, u32 buflen);
void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
+ void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
u32 tfs);
void (*start_delivery)(struct mvs_info *mvi, u32 tx);
@@ -236,9 +238,10 @@ struct mvs_device {
enum sas_dev_type dev_type;
struct mvs_info *mvi_info;
struct domain_device *sas_device;
+ struct timer_list timer;
u32 attached_phy;
u32 device_id;
- u32 runing_req;
+ u32 running_req;
u8 taskfileset;
u8 dev_status;
u16 reserved;
@@ -397,7 +400,9 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun);
int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
int mvs_I_T_nexus_reset(struct domain_device *dev);
int mvs_query_task(struct sas_task *task);
-void mvs_release_task(struct mvs_info *mvi, int phy_no,
+void mvs_release_task(struct mvs_info *mvi,
+ struct domain_device *dev);
+void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
struct domain_device *dev);
void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 909c00e..5ff8261 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4390,7 +4390,6 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
return -ENOMEM;
}
}
- memset(buffer, 0, fw_control->len);
memcpy(buffer, fw_control->buffer, fw_control->len);
flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index bff4f51..cd02cea 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -885,11 +885,13 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
u32 tag;
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
- u32 device_id = pm8001_dev->device_id;
+
pm8001_ha = pm8001_find_ha_by_dev(dev);
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_tag_alloc(pm8001_ha, &tag);
if (pm8001_dev) {
+ u32 device_id = pm8001_dev->device_id;
+
PM8001_DISC_DBG(pm8001_ha,
pm8001_printk("found dev[%d:%x] is gone.\n",
pm8001_dev->device_id, pm8001_dev->dev_type));
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 53aefff..c44e4ab 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3751,12 +3751,6 @@ static int pmcraid_check_ioctl_buffer(
return -EINVAL;
}
- /* buffer length can't be negetive */
- if (hdr->buffer_length < 0) {
- pmcraid_err("ioctl: invalid buffer length specified\n");
- return -EINVAL;
- }
-
/* check for appropriate buffer access */
if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
access = VERIFY_WRITE;
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index c51fd1f..5df782f 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,4 +1,5 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
- qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o
+ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
+ qla_nx.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1c7ef55..1e4cafa 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -12,13 +12,11 @@
#include <linux/delay.h>
static int qla24xx_vport_disable(struct fc_vport *, bool);
-static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
-int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
-static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
+
/* SYSFS attributes --------------------------------------------------------- */
static ssize_t
-qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
+qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -34,7 +32,7 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
}
static ssize_t
-qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
+qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -43,6 +41,12 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
int reading;
+ if (IS_QLA82XX(ha)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Firmware dump not supported for ISP82xx\n"));
+ return count;
+ }
+
if (off != 0)
return (0);
@@ -88,7 +92,7 @@ static struct bin_attribute sysfs_fw_dump_attr = {
};
static ssize_t
-qla2x00_sysfs_read_nvram(struct kobject *kobj,
+qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -107,7 +111,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
}
static ssize_t
-qla2x00_sysfs_write_nvram(struct kobject *kobj,
+qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -173,7 +177,7 @@ static struct bin_attribute sysfs_nvram_attr = {
};
static ssize_t
-qla2x00_sysfs_read_optrom(struct kobject *kobj,
+qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -189,7 +193,7 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
}
static ssize_t
-qla2x00_sysfs_write_optrom(struct kobject *kobj,
+qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -220,7 +224,7 @@ static struct bin_attribute sysfs_optrom_attr = {
};
static ssize_t
-qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
+qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -277,6 +281,12 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
return count;
}
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "HBA not online, failing NVRAM update.\n");
+ return -EAGAIN;
+ }
+
DEBUG2(qla_printk(KERN_INFO, ha,
"Reading flash region -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size));
@@ -315,8 +325,8 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
else if (start == (ha->flt_region_boot * 4) ||
start == (ha->flt_region_fw * 4))
valid = 1;
- else if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
- valid = 1;
+ else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
+ valid = 1;
if (!valid) {
qla_printk(KERN_WARNING, ha,
"Invalid start region 0x%x/0x%x.\n", start, size);
@@ -377,7 +387,7 @@ static struct bin_attribute sysfs_optrom_ctl_attr = {
};
static ssize_t
-qla2x00_sysfs_read_vpd(struct kobject *kobj,
+qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -398,7 +408,7 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
}
static ssize_t
-qla2x00_sysfs_write_vpd(struct kobject *kobj,
+qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -451,7 +461,7 @@ static struct bin_attribute sysfs_vpd_attr = {
};
static ssize_t
-qla2x00_sysfs_read_sfp(struct kobject *kobj,
+qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -512,13 +522,14 @@ static struct bin_attribute sysfs_sfp_attr = {
};
static ssize_t
-qla2x00_sysfs_write_reset(struct kobject *kobj,
+qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int type;
if (off != 0)
@@ -553,6 +564,20 @@ qla2x00_sysfs_write_reset(struct kobject *kobj,
"MPI reset failed on (%ld).\n", vha->host_no);
scsi_unblock_requests(vha->host);
break;
+ case 0x2025e:
+ if (!IS_QLA82XX(ha) || vha != base_vha) {
+ qla_printk(KERN_INFO, ha,
+ "FCoE ctx reset not supported for host%ld.\n",
+ vha->host_no);
+ return count;
+ }
+
+ qla_printk(KERN_INFO, ha,
+ "Issuing FCoE CTX reset on host%ld.\n", vha->host_no);
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_fcoe_ctx_reset(vha);
+ break;
}
return count;
}
@@ -567,7 +592,7 @@ static struct bin_attribute sysfs_reset_attr = {
};
static ssize_t
-qla2x00_sysfs_write_edc(struct kobject *kobj,
+qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -625,7 +650,7 @@ static struct bin_attribute sysfs_edc_attr = {
};
static ssize_t
-qla2x00_sysfs_write_edc_status(struct kobject *kobj,
+qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -675,7 +700,7 @@ qla2x00_sysfs_write_edc_status(struct kobject *kobj,
}
static ssize_t
-qla2x00_sysfs_read_edc_status(struct kobject *kobj,
+qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -705,7 +730,7 @@ static struct bin_attribute sysfs_edc_status_attr = {
};
static ssize_t
-qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
+qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -757,7 +782,7 @@ static struct bin_attribute sysfs_xgmac_stats_attr = {
};
static ssize_t
-qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
+qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -838,7 +863,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
continue;
- if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
+ if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -862,7 +887,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
continue;
- if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
+ if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -968,7 +993,8 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
int len = 0;
if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
- atomic_read(&vha->loop_state) == LOOP_DEAD)
+ atomic_read(&vha->loop_state) == LOOP_DEAD ||
+ vha->device_flags & DFLG_NO_CABLE)
len = snprintf(buf, PAGE_SIZE, "Link Down\n");
else if (atomic_read(&vha->loop_state) != LOOP_READY ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
@@ -1179,15 +1205,15 @@ qla24xx_84xx_fw_version_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA84XX(ha) && ha->cs84xx) {
- if (ha->cs84xx->op_fw_version == 0) {
- rval = qla84xx_verify_chip(vha, status);
- }
+ if (!IS_QLA84XX(ha))
+ return snprintf(buf, PAGE_SIZE, "\n");
+
+ if (ha->cs84xx && ha->cs84xx->op_fw_version == 0)
+ rval = qla84xx_verify_chip(vha, status);
if ((rval == QLA_SUCCESS) && (status[0] == 0))
return snprintf(buf, PAGE_SIZE, "%u\n",
(uint32_t)ha->cs84xx->op_fw_version);
- }
return snprintf(buf, PAGE_SIZE, "\n");
}
@@ -1237,7 +1263,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
@@ -1249,7 +1275,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1706,6 +1732,22 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
}
+ if (IS_QLA25XX(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4) {
+ vha->flags.difdix_supported = 1;
+ DEBUG18(qla_printk(KERN_INFO, ha,
+ "Registering for DIF/DIX type 1 and 3"
+ " protection.\n"));
+ scsi_host_set_prot(vha->host,
+ SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION);
+ scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
+ } else
+ vha->flags.difdix_supported = 0;
+ }
+
if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
&ha->pdev->dev)) {
DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
@@ -1825,582 +1867,6 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
-/* BSG support for ELS/CT pass through */
-inline srb_t *
-qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
-{
- srb_t *sp;
- struct qla_hw_data *ha = vha->hw;
- struct srb_bsg_ctx *ctx;
-
- sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
- if (!sp)
- goto done;
- ctx = kzalloc(size, GFP_KERNEL);
- if (!ctx) {
- mempool_free(sp, ha->srb_mempool);
- goto done;
- }
-
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->ctx = ctx;
-done:
- return sp;
-}
-
-static int
-qla2x00_process_els(struct fc_bsg_job *bsg_job)
-{
- struct fc_rport *rport;
- fc_port_t *fcport;
- struct Scsi_Host *host;
- scsi_qla_host_t *vha;
- struct qla_hw_data *ha;
- srb_t *sp;
- const char *type;
- int req_sg_cnt, rsp_sg_cnt;
- int rval = (DRIVER_ERROR << 16);
- uint16_t nextlid = 0;
- struct srb_bsg *els;
-
- /* Multiple SG's are not supported for ELS requests */
- if (bsg_job->request_payload.sg_cnt > 1 ||
- bsg_job->reply_payload.sg_cnt > 1) {
- DEBUG2(printk(KERN_INFO
- "multiple SG's are not supported for ELS requests"
- " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt,
- bsg_job->reply_payload.sg_cnt));
- rval = -EPERM;
- goto done;
- }
-
- /* ELS request for rport */
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
- fcport = *(fc_port_t **) rport->dd_data;
- host = rport_to_shost(rport);
- vha = shost_priv(host);
- ha = vha->hw;
- type = "FC_BSG_RPT_ELS";
-
- /* make sure the rport is logged in,
- * if not perform fabric login
- */
- if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "failed to login port %06X for ELS passthru\n",
- fcport->d_id.b24));
- rval = -EIO;
- goto done;
- }
- } else {
- host = bsg_job->shost;
- vha = shost_priv(host);
- ha = vha->hw;
- type = "FC_BSG_HST_ELS_NOLOGIN";
-
- /* Allocate a dummy fcport structure, since functions
- * preparing the IOCB and mailbox command retrieves port
- * specific information from fcport structure. For Host based
- * ELS commands there will be no fcport structure allocated
- */
- fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
- if (!fcport) {
- rval = -ENOMEM;
- goto done;
- }
-
- /* Initialize all required fields of fcport */
- fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
- fcport->d_id.b.al_pa =
- bsg_job->request->rqst_data.h_els.port_id[0];
- fcport->d_id.b.area =
- bsg_job->request->rqst_data.h_els.port_id[1];
- fcport->d_id.b.domain =
- bsg_job->request->rqst_data.h_els.port_id[2];
- fcport->loop_id =
- (fcport->d_id.b.al_pa == 0xFD) ?
- NPH_FABRIC_CONTROLLER : NPH_F_PORT;
- }
-
- if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
- rval = -EIO;
- goto done;
- }
-
- req_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!req_sg_cnt) {
- rval = -ENOMEM;
- goto done_free_fcport;
- }
- rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if (!rsp_sg_cnt) {
- rval = -ENOMEM;
- goto done_free_fcport;
- }
-
- if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
- (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
- {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
- rval = -EAGAIN;
- goto done_unmap_sg;
- }
-
- /* Alloc SRB structure */
- sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
- if (!sp) {
- rval = -ENOMEM;
- goto done_unmap_sg;
- }
-
- els = sp->ctx;
- els->ctx.type =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
- els->bsg_job = bsg_job;
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
- "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
- bsg_job->request->rqst_data.h_els.command_code,
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS) {
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- rval = -EIO;
- goto done_unmap_sg;
- }
- return rval;
-
-done_unmap_sg:
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- goto done_free_fcport;
-
-done_free_fcport:
- if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
- kfree(fcport);
-done:
- return rval;
-}
-
-static int
-qla2x00_process_ct(struct fc_bsg_job *bsg_job)
-{
- srb_t *sp;
- struct Scsi_Host *host = bsg_job->shost;
- scsi_qla_host_t *vha = shost_priv(host);
- struct qla_hw_data *ha = vha->hw;
- int rval = (DRIVER_ERROR << 16);
- int req_sg_cnt, rsp_sg_cnt;
- uint16_t loop_id;
- struct fc_port *fcport;
- char *type = "FC_BSG_HST_CT";
- struct srb_bsg *ct;
-
- /* pass through is supported only for ISP 4Gb or higher */
- if (!IS_FWI2_CAPABLE(ha)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld):Firmware is not capable to support FC "
- "CT pass thru\n", vha->host_no));
- rval = -EPERM;
- goto done;
- }
-
- req_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!req_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
-
- rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if (!rsp_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
-
- if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
- (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
- {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
- rval = -EAGAIN;
- goto done_unmap_sg;
- }
-
- if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
- rval = -EIO;
- goto done_unmap_sg;
- }
-
- loop_id =
- (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
- >> 24;
- switch (loop_id) {
- case 0xFC:
- loop_id = cpu_to_le16(NPH_SNS);
- break;
- case 0xFA:
- loop_id = vha->mgmt_svr_loop_id;
- break;
- default:
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unknown loop id: %x\n", loop_id));
- rval = -EINVAL;
- goto done_unmap_sg;
- }
-
- /* Allocate a dummy fcport structure, since functions preparing the
- * IOCB and mailbox command retrieves port specific information
- * from fcport structure. For Host based ELS commands there will be
- * no fcport structure allocated
- */
- fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
- if (!fcport)
- {
- rval = -ENOMEM;
- goto done_unmap_sg;
- }
-
- /* Initialize all required fields of fcport */
- fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
- fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
- fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
- fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
- fcport->loop_id = loop_id;
-
- /* Alloc SRB structure */
- sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
- if (!sp) {
- rval = -ENOMEM;
- goto done_free_fcport;
- }
-
- ct = sp->ctx;
- ct->ctx.type = SRB_CT_CMD;
- ct->bsg_job = bsg_job;
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
- "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
- (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS) {
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- rval = -EIO;
- goto done_free_fcport;
- }
- return rval;
-
-done_free_fcport:
- kfree(fcport);
-done_unmap_sg:
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-done:
- return rval;
-}
-
-static int
-qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
-{
- struct Scsi_Host *host = bsg_job->shost;
- scsi_qla_host_t *vha = shost_priv(host);
- struct qla_hw_data *ha = vha->hw;
- int rval;
- uint8_t command_sent;
- uint32_t vendor_cmd;
- char *type;
- struct msg_echo_lb elreq;
- uint16_t response[MAILBOX_REGISTER_COUNT];
- uint8_t* fw_sts_ptr;
- uint8_t *req_data;
- dma_addr_t req_data_dma;
- uint32_t req_data_len;
- uint8_t *rsp_data;
- dma_addr_t rsp_data_dma;
- uint32_t rsp_data_len;
-
- if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- rval = -EBUSY;
- goto done;
- }
-
- if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
- rval = -EIO;
- goto done;
- }
-
- elreq.req_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!elreq.req_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
- elreq.rsp_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if (!elreq.rsp_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
-
- if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
- (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
- {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
- rval = -EAGAIN;
- goto done_unmap_sg;
- }
- req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
- req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
- &req_data_dma, GFP_KERNEL);
-
- rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
- &rsp_data_dma, GFP_KERNEL);
-
- /* Copy the request buffer in req_data now */
- sg_copy_to_buffer(bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, req_data,
- req_data_len);
-
- elreq.send_dma = req_data_dma;
- elreq.rcv_dma = rsp_data_dma;
- elreq.transfer_size = req_data_len;
-
- /* Vendor cmd : loopback or ECHO diagnostic
- * Options:
- * Loopback : Either internal or external loopback
- * ECHO: ECHO ELS or Vendor specific FC4 link data
- */
- vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
- elreq.options =
- *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
- + 1);
-
- switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
- case QL_VND_LOOPBACK:
- if (ha->current_topology != ISP_CFG_F) {
- type = "FC_BSG_HST_VENDOR_LOOPBACK";
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
- vha->host_no, type, vendor_cmd, elreq.options));
-
- command_sent = INT_DEF_LB_LOOPBACK_CMD;
- rval = qla2x00_loopback_test(vha, &elreq, response);
- if (IS_QLA81XX(ha)) {
- if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
- DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
- "ISP\n", __func__, vha->host_no));
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- }
- }
- } else {
- type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
- vha->host_no, type, vendor_cmd, elreq.options));
-
- command_sent = INT_DEF_LB_ECHO_CMD;
- rval = qla2x00_echo_test(vha, &elreq, response);
- }
- break;
- case QLA84_RESET:
- if (!IS_QLA84XX(vha->hw)) {
- rval = -EINVAL;
- DEBUG16(printk(
- "%s(%ld): 8xxx exiting.\n",
- __func__, vha->host_no));
- return rval;
- }
- rval = qla84xx_reset(vha, &elreq, bsg_job);
- break;
- case QLA84_MGMT_CMD:
- if (!IS_QLA84XX(vha->hw)) {
- rval = -EINVAL;
- DEBUG16(printk(
- "%s(%ld): 8xxx exiting.\n",
- __func__, vha->host_no));
- return rval;
- }
- rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
- break;
- default:
- rval = -ENOSYS;
- }
-
- if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->reply->reply_payload_rcv_len = 0;
- fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
- memcpy( fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
- } else {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
- rval = bsg_job->reply->result = 0;
- bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
- bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
- fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
- memcpy(fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
- sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, rsp_data,
- rsp_data_len);
- }
- bsg_job->job_done(bsg_job);
-
-done_unmap_sg:
-
- if(req_data)
- dma_free_coherent(&ha->pdev->dev, req_data_len,
- req_data, req_data_dma);
- dma_unmap_sg(&ha->pdev->dev,
- bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev,
- bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-
-done:
- return rval;
-}
-
-static int
-qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
-{
- int ret = -EINVAL;
-
- switch (bsg_job->request->msgcode) {
- case FC_BSG_RPT_ELS:
- case FC_BSG_HST_ELS_NOLOGIN:
- ret = qla2x00_process_els(bsg_job);
- break;
- case FC_BSG_HST_CT:
- ret = qla2x00_process_ct(bsg_job);
- break;
- case FC_BSG_HST_VENDOR:
- ret = qla2x00_process_vendor_specific(bsg_job);
- break;
- case FC_BSG_HST_ADD_RPORT:
- case FC_BSG_HST_DEL_RPORT:
- case FC_BSG_RPT_CT:
- default:
- DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
- break;
- }
- return ret;
-}
-
-static int
-qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
-{
- scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
- struct qla_hw_data *ha = vha->hw;
- srb_t *sp;
- int cnt, que;
- unsigned long flags;
- struct req_que *req;
- struct srb_bsg *sp_bsg;
-
- /* find the bsg job from the active list of commands */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (que = 0; que < ha->max_req_queues; que++) {
- req = ha->req_q_map[que];
- if (!req)
- continue;
-
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
- sp = req->outstanding_cmds[cnt];
-
- if (sp) {
- sp_bsg = (struct srb_bsg*)sp->ctx;
-
- if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
- (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
- || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
- (sp_bsg->bsg_job == bsg_job)) {
- if (ha->isp_ops->abort_command(sp)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx abort_command failed\n", vha->host_no));
- bsg_job->req->errors = bsg_job->reply->result = -EIO;
- } else {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx abort_command success\n", vha->host_no));
- bsg_job->req->errors = bsg_job->reply->result = 0;
- }
- goto done;
- }
- }
- }
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) SRB not found to abort\n", vha->host_no));
- bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
- return 0;
-
-done:
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (bsg_job->request->msgcode == FC_BSG_HST_CT)
- kfree(sp->fcport);
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- return 0;
-}
-
struct fc_function_template qla2xxx_transport_functions = {
.show_host_node_name = 1,
@@ -2502,7 +1968,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
speed = FC_PORTSPEED_10GBIT;
else if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@@ -2516,125 +1982,3 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;
}
-static int
-qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
-{
- int ret = 0;
- int cmd;
- uint16_t cmd_status;
-
- DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
-
- cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
- == A84_RESET_FLAG_ENABLE_DIAG_FW ?
- A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
- ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
- &cmd_status);
- return ret;
-}
-
-static int
-qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
-{
- struct access_chip_84xx *mn;
- dma_addr_t mn_dma, mgmt_dma;
- void *mgmt_b = NULL;
- int ret = 0;
- int rsp_hdr_len, len = 0;
- struct qla84_msg_mgmt *ql84_mgmt;
-
- ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
- ql84_mgmt->cmd =
- *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
- ql84_mgmt->mgmtp.u.mem.start_addr =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
- ql84_mgmt->len =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
- ql84_mgmt->mgmtp.u.config.id =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
- ql84_mgmt->mgmtp.u.config.param0 =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
- ql84_mgmt->mgmtp.u.config.param1 =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
- ql84_mgmt->mgmtp.u.info.type =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
- ql84_mgmt->mgmtp.u.info.context =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
-
- rsp_hdr_len = bsg_job->request_payload.payload_len;
-
- mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
- if (mn == NULL) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
- "failed%lu\n", __func__, ha->host_no));
- return -ENOMEM;
- }
-
- memset(mn, 0, sizeof (struct access_chip_84xx));
-
- mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
- mn->entry_count = 1;
-
- switch (ql84_mgmt->cmd) {
- case QLA84_MGMT_READ_MEM:
- mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
- break;
- case QLA84_MGMT_WRITE_MEM:
- mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
- break;
- case QLA84_MGMT_CHNG_CONFIG:
- mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
- mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
- mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
- break;
- case QLA84_MGMT_GET_INFO:
- mn->options = cpu_to_le16(ACO_REQUEST_INFO);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
- mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
- break;
- default:
- ret = -EIO;
- goto exit_mgmt0;
- }
-
- if ((len == ql84_mgmt->len) &&
- ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
- mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
- &mgmt_dma, GFP_KERNEL);
- if (mgmt_b == NULL) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
- "failed%lu\n", __func__, ha->host_no));
- ret = -ENOMEM;
- goto exit_mgmt0;
- }
- mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
- mn->dseg_count = cpu_to_le16(1);
- mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
- mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
- mn->dseg_length = cpu_to_le32(len);
-
- if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
- memcpy(mgmt_b, ql84_mgmt->payload, len);
- }
- }
-
- ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
- if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
- || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
- if (ret != QLA_SUCCESS)
- DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
- __func__, ha->host_no));
- } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
- (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
- }
-
- if (mgmt_b)
- dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
-
-exit_mgmt0:
- dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
- return ret;
-}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
new file mode 100644
index 0000000..b905dfe
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -0,0 +1,1212 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+/* BSG support for ELS/CT pass through */
+inline srb_t *
+qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_ctx *ctx;
+
+ sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
+ if (!sp)
+ goto done;
+ ctx = kzalloc(size, GFP_KERNEL);
+ if (!ctx) {
+ mempool_free(sp, ha->srb_mempool);
+ sp = NULL;
+ goto done;
+ }
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->ctx = ctx;
+done:
+ return sp;
+}
+
+int
+qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
+{
+ int i, ret, num_valid;
+ uint8_t *bcode;
+ struct qla_fcp_prio_entry *pri_entry;
+
+ ret = 1;
+ num_valid = 0;
+ bcode = (uint8_t *)pri_cfg;
+
+ if (bcode[0x0] != 'H' || bcode[0x1] != 'Q' || bcode[0x2] != 'O' ||
+ bcode[0x3] != 'S') {
+ return 0;
+ }
+ if (flag != 1)
+ return ret;
+
+ pri_entry = &pri_cfg->entry[0];
+ for (i = 0; i < pri_cfg->num_entries; i++) {
+ if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
+ num_valid++;
+ pri_entry++;
+ }
+
+ if (num_valid == 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int
+qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int ret = 0;
+ uint32_t len;
+ uint32_t oper;
+
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ret = -EBUSY;
+ goto exit_fcp_prio_cfg;
+ }
+
+ /* Get the sub command */
+ oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ /* Only set config is allowed if config memory is not allocated */
+ if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
+ ret = -EINVAL;
+ goto exit_fcp_prio_cfg;
+ }
+ switch (oper) {
+ case QLFC_FCP_PRIO_DISABLE:
+ if (ha->flags.fcp_prio_enabled) {
+ ha->flags.fcp_prio_enabled = 0;
+ ha->fcp_prio_cfg->attributes &=
+ ~FCP_PRIO_ATTR_ENABLE;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ } else {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+ break;
+
+ case QLFC_FCP_PRIO_ENABLE:
+ if (!ha->flags.fcp_prio_enabled) {
+ if (ha->fcp_prio_cfg) {
+ ha->flags.fcp_prio_enabled = 1;
+ ha->fcp_prio_cfg->attributes |=
+ FCP_PRIO_ATTR_ENABLE;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ } else {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+ }
+ break;
+
+ case QLFC_FCP_PRIO_GET_CONFIG:
+ len = bsg_job->reply_payload.payload_len;
+ if (!len || len > FCP_PRIO_CFG_SIZE) {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+
+ bsg_job->reply->result = DID_OK;
+ bsg_job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
+ len);
+
+ break;
+
+ case QLFC_FCP_PRIO_SET_CONFIG:
+ len = bsg_job->request_payload.payload_len;
+ if (!len || len > FCP_PRIO_CFG_SIZE) {
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -EINVAL;
+ goto exit_fcp_prio_cfg;
+ }
+
+ if (!ha->fcp_prio_cfg) {
+ ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
+ if (!ha->fcp_prio_cfg) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory "
+ "for fcp prio config data (%x).\n",
+ FCP_PRIO_CFG_SIZE);
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -ENOMEM;
+ goto exit_fcp_prio_cfg;
+ }
+ }
+
+ memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
+ FCP_PRIO_CFG_SIZE);
+
+ /* validate fcp priority data */
+ if (!qla24xx_fcp_prio_cfg_valid(
+ (struct qla_fcp_prio_cfg *)
+ ha->fcp_prio_cfg, 1)) {
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -EINVAL;
+ /* If buffer was invalidatic int
+ * fcp_prio_cfg is of no use
+ */
+ vfree(ha->fcp_prio_cfg);
+ ha->fcp_prio_cfg = NULL;
+ goto exit_fcp_prio_cfg;
+ }
+
+ ha->flags.fcp_prio_enabled = 0;
+ if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
+ ha->flags.fcp_prio_enabled = 1;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+exit_fcp_prio_cfg:
+ bsg_job->job_done(bsg_job);
+ return ret;
+}
+static int
+qla2x00_process_els(struct fc_bsg_job *bsg_job)
+{
+ struct fc_rport *rport;
+ fc_port_t *fcport;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ srb_t *sp;
+ const char *type;
+ int req_sg_cnt, rsp_sg_cnt;
+ int rval = (DRIVER_ERROR << 16);
+ uint16_t nextlid = 0;
+ struct srb_ctx *els;
+
+ /* Multiple SG's are not supported for ELS requests */
+ if (bsg_job->request_payload.sg_cnt > 1 ||
+ bsg_job->reply_payload.sg_cnt > 1) {
+ DEBUG2(printk(KERN_INFO
+ "multiple SG's are not supported for ELS requests"
+ " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt,
+ bsg_job->reply_payload.sg_cnt));
+ rval = -EPERM;
+ goto done;
+ }
+
+ /* ELS request for rport */
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ rport = bsg_job->rport;
+ fcport = *(fc_port_t **) rport->dd_data;
+ host = rport_to_shost(rport);
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_RPT_ELS";
+
+ /* make sure the rport is logged in,
+ * if not perform fabric login
+ */
+ if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "failed to login port %06X for ELS passthru\n",
+ fcport->d_id.b24));
+ rval = -EIO;
+ goto done;
+ }
+ } else {
+ host = bsg_job->shost;
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_HST_ELS_NOLOGIN";
+
+ /* Allocate a dummy fcport structure, since functions
+ * preparing the IOCB and mailbox command retrieves port
+ * specific information from fcport structure. For Host based
+ * ELS commands there will be no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ fcport->d_id.b.al_pa =
+ bsg_job->request->rqst_data.h_els.port_id[0];
+ fcport->d_id.b.area =
+ bsg_job->request->rqst_data.h_els.port_id[1];
+ fcport->d_id.b.domain =
+ bsg_job->request->rqst_data.h_els.port_id[2];
+ fcport->loop_id =
+ (fcport->d_id.b.al_pa == 0xFD) ?
+ NPH_FABRIC_CONTROLLER : NPH_F_PORT;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts \
+ [request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ els = sp->ctx;
+ els->type =
+ (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
+ SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+ els->name =
+ (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
+ "bsg_els_rpt" : "bsg_els_hst");
+ els->u.bsg_job = bsg_job;
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+ bsg_job->request->rqst_data.h_els.command_code,
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+ return rval;
+
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ goto done_free_fcport;
+
+done_free_fcport:
+ if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
+ kfree(fcport);
+done:
+ return rval;
+}
+
+static int
+qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+{
+ srb_t *sp;
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = (DRIVER_ERROR << 16);
+ int req_sg_cnt, rsp_sg_cnt;
+ uint16_t loop_id;
+ struct fc_port *fcport;
+ char *type = "FC_BSG_HST_CT";
+ struct srb_ctx *ct;
+
+ /* pass through is supported only for ISP 4Gb or higher */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld):Firmware is not capable to support FC "
+ "CT pass thru\n", vha->host_no));
+ rval = -EPERM;
+ goto done;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "[request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+
+ loop_id =
+ (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+ >> 24;
+ switch (loop_id) {
+ case 0xFC:
+ loop_id = cpu_to_le16(NPH_SNS);
+ break;
+ case 0xFA:
+ loop_id = vha->mgmt_svr_loop_id;
+ break;
+ default:
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Unknown loop id: %x\n", loop_id));
+ rval = -EINVAL;
+ goto done_unmap_sg;
+ }
+
+ /* Allocate a dummy fcport structure, since functions preparing the
+ * IOCB and mailbox command retrieves port specific information
+ * from fcport structure. For Host based ELS commands there will be
+ * no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
+ fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
+ fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+ fcport->loop_id = loop_id;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ ct = sp->ctx;
+ ct->type = SRB_CT_CMD;
+ ct->name = "bsg_ct";
+ ct->u.bsg_job = bsg_job;
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+ (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_free_fcport;
+ }
+ return rval;
+
+done_free_fcport:
+ kfree(fcport);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done:
+ return rval;
+}
+
+static int
+qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ uint8_t command_sent;
+ char *type;
+ struct msg_echo_lb elreq;
+ uint16_t response[MAILBOX_REGISTER_COUNT];
+ uint8_t *fw_sts_ptr;
+ uint8_t *req_data = NULL;
+ dma_addr_t req_data_dma;
+ uint32_t req_data_len;
+ uint8_t *rsp_data = NULL;
+ dma_addr_t rsp_data_dma;
+ uint32_t rsp_data_len;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
+ return -EIO;
+ }
+
+ elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
+ DMA_TO_DEVICE);
+
+ if (!elreq.req_sg_cnt)
+ return -ENOMEM;
+
+ elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
+ DMA_FROM_DEVICE);
+
+ if (!elreq.rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_unmap_req_sg;
+ }
+
+ if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "[request_sg_cnt: %x dma_request_sg_cnt: %x "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+ req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+ req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
+ &req_data_dma, GFP_KERNEL);
+ if (!req_data) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
+ &rsp_data_dma, GFP_KERNEL);
+ if (!rsp_data) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_free_dma_req;
+ }
+
+ /* Copy the request buffer in req_data now */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, req_data_len);
+
+ elreq.send_dma = req_data_dma;
+ elreq.rcv_dma = rsp_data_dma;
+ elreq.transfer_size = req_data_len;
+
+ elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ if (ha->current_topology != ISP_CFG_F) {
+ type = "FC_BSG_HST_VENDOR_LOOPBACK";
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) bsg rqst type: %s\n",
+ vha->host_no, type));
+
+ command_sent = INT_DEF_LB_LOOPBACK_CMD;
+ rval = qla2x00_loopback_test(vha, &elreq, response);
+ if (IS_QLA81XX(ha)) {
+ if (response[0] == MBS_COMMAND_ERROR &&
+ response[1] == MBS_LB_RESET) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
+ "ISP\n", __func__, vha->host_no));
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ } else {
+ type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
+ command_sent = INT_DEF_LB_ECHO_CMD;
+ rval = qla2x00_echo_test(vha, &elreq, response);
+ }
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request %s failed\n", vha->host_no, type));
+
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+ rval = 0;
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request %s completed\n", vha->host_no, type));
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(response) + sizeof(uint8_t);
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+ bsg_job->reply->result = DID_OK;
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, rsp_data,
+ rsp_data_len);
+ }
+ bsg_job->job_done(bsg_job);
+
+ dma_free_coherent(&ha->pdev->dev, rsp_data_len,
+ rsp_data, rsp_data_dma);
+done_free_dma_req:
+ dma_free_coherent(&ha->pdev->dev, req_data_len,
+ req_data, req_data_dma);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ return rval;
+}
+
+static int
+qla84xx_reset(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint32_t flag;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_QLA84XX(ha)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
+ "exiting.\n", vha->host_no));
+ return -EINVAL;
+ }
+
+ flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx reset failed\n", vha->host_no));
+ rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx reset completed\n", vha->host_no));
+ bsg_job->reply->result = DID_OK;
+ }
+
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla84xx_updatefw(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct verify_chip_entry_84xx *mn = NULL;
+ dma_addr_t mn_dma, fw_dma;
+ void *fw_buf = NULL;
+ int rval = 0;
+ uint32_t sg_cnt;
+ uint32_t data_len;
+ uint16_t options;
+ uint32_t flag;
+ uint32_t fw_ver;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_QLA84XX(ha)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
+ "exiting.\n", vha->host_no));
+ return -EINVAL;
+ }
+
+ sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!sg_cnt)
+ return -ENOMEM;
+
+ if (sg_cnt != bsg_job->request_payload.sg_cnt) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x ",
+ bsg_job->request_payload.sg_cnt, sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->request_payload.payload_len;
+ fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &fw_dma, GFP_KERNEL);
+ if (!fw_buf) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, fw_buf, data_len);
+
+ mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (!mn) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_free_fw_buf;
+ }
+
+ flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
+
+ memset(mn, 0, sizeof(struct access_chip_84xx));
+ mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
+ mn->entry_count = 1;
+
+ options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
+ if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
+ options |= VCO_DIAG_FW;
+
+ mn->options = cpu_to_le16(options);
+ mn->fw_ver = cpu_to_le32(fw_ver);
+ mn->fw_size = cpu_to_le32(data_len);
+ mn->fw_seq_size = cpu_to_le32(data_len);
+ mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
+ mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
+ mn->dseg_length = cpu_to_le32(data_len);
+ mn->data_seg_cnt = cpu_to_le16(1);
+
+ rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx updatefw failed\n", vha->host_no));
+
+ rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx updatefw completed\n", vha->host_no));
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK;
+ }
+
+ bsg_job->job_done(bsg_job);
+ dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+done_free_fw_buf:
+ dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
+
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ return rval;
+}
+
+static int
+qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct access_chip_84xx *mn = NULL;
+ dma_addr_t mn_dma, mgmt_dma;
+ void *mgmt_b = NULL;
+ int rval = 0;
+ struct qla_bsg_a84_mgmt *ql84_mgmt;
+ uint32_t sg_cnt;
+ uint32_t data_len = 0;
+ uint32_t dma_direction = DMA_NONE;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_QLA84XX(ha)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
+ "exiting.\n", vha->host_no));
+ return -EINVAL;
+ }
+
+ ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
+ sizeof(struct fc_bsg_request));
+ if (!ql84_mgmt) {
+ DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
+ __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (!mn) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ return -ENOMEM;
+ }
+
+ memset(mn, 0, sizeof(struct access_chip_84xx));
+ mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
+ mn->entry_count = 1;
+
+ switch (ql84_mgmt->mgmt.cmd) {
+ case QLA84_MGMT_READ_MEM:
+ case QLA84_MGMT_GET_INFO:
+ sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!sg_cnt) {
+ rval = -ENOMEM;
+ goto exit_mgmt;
+ }
+
+ dma_direction = DMA_FROM_DEVICE;
+
+ if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
+ bsg_job->reply_payload.sg_cnt, sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->reply_payload.payload_len;
+
+ mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &mgmt_dma, GFP_KERNEL);
+ if (!mgmt_b) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
+ "failed for host=%lu\n",
+ __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
+ mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
+ mn->parameter1 =
+ cpu_to_le32(
+ ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
+
+ } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
+ mn->options = cpu_to_le16(ACO_REQUEST_INFO);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
+
+ mn->parameter2 =
+ cpu_to_le32(
+ ql84_mgmt->mgmt.mgmtp.u.info.context);
+ }
+ break;
+
+ case QLA84_MGMT_WRITE_MEM:
+ sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ if (!sg_cnt) {
+ rval = -ENOMEM;
+ goto exit_mgmt;
+ }
+
+ dma_direction = DMA_TO_DEVICE;
+
+ if (sg_cnt != bsg_job->request_payload.sg_cnt) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x ",
+ bsg_job->request_payload.sg_cnt, sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->request_payload.payload_len;
+ mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &mgmt_dma, GFP_KERNEL);
+ if (!mgmt_b) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
+ "failed for host=%lu\n",
+ __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
+
+ mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
+ break;
+
+ case QLA84_MGMT_CHNG_CONFIG:
+ mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
+
+ mn->parameter2 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
+
+ mn->parameter3 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
+ break;
+
+ default:
+ rval = -EIO;
+ goto exit_mgmt;
+ }
+
+ if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
+ mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
+ mn->dseg_count = cpu_to_le16(1);
+ mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
+ mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
+ mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
+ }
+
+ rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx mgmt failed\n", vha->host_no));
+
+ rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx mgmt completed\n", vha->host_no));
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK;
+
+ if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
+ (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, mgmt_b,
+ data_len);
+ }
+ }
+
+ bsg_job->job_done(bsg_job);
+
+done_unmap_sg:
+ if (mgmt_b)
+ dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
+
+ if (dma_direction == DMA_TO_DEVICE)
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ else if (dma_direction == DMA_FROM_DEVICE)
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+exit_mgmt:
+ dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+ return rval;
+}
+
+static int
+qla24xx_iidma(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ struct qla_port_param *port_param = NULL;
+ fc_port_t *fcport = NULL;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ uint8_t *rsp_ptr = NULL;
+
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
+ "supported\n", __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ port_param = (struct qla_port_param *)((char *)bsg_job->request +
+ sizeof(struct fc_bsg_request));
+ if (!port_param) {
+ DEBUG2(printk("%s(%ld): port_param header not provided, "
+ "exiting.\n", __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
+ __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+
+ if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
+ fcport->port_name, sizeof(fcport->port_name)))
+ continue;
+ break;
+ }
+
+ if (!fcport) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
+ __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ if (port_param->mode)
+ rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
+ port_param->speed, mb);
+ else
+ rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
+ &port_param->speed, mb);
+
+ if (rval) {
+ DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
+ "%02x%02x%02x%02x%02x%02x%02x%02x -- "
+ "%04x %x %04x %04x.\n",
+ vha->host_no, fcport->port_name[0],
+ fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7], rval,
+ fcport->fp_speed, mb[0], mb[1]));
+ rval = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ if (!port_param->mode) {
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(struct qla_port_param);
+
+ rsp_ptr = ((uint8_t *)bsg_job->reply) +
+ sizeof(struct fc_bsg_reply);
+
+ memcpy(rsp_ptr, port_param,
+ sizeof(struct qla_port_param));
+ }
+
+ bsg_job->reply->result = DID_OK;
+ }
+
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+{
+ switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+ case QL_VND_LOOPBACK:
+ return qla2x00_process_loopback(bsg_job);
+
+ case QL_VND_A84_RESET:
+ return qla84xx_reset(bsg_job);
+
+ case QL_VND_A84_UPDATE_FW:
+ return qla84xx_updatefw(bsg_job);
+
+ case QL_VND_A84_MGMT_CMD:
+ return qla84xx_mgmt_cmd(bsg_job);
+
+ case QL_VND_IIDMA:
+ return qla24xx_iidma(bsg_job);
+
+ case QL_VND_FCP_PRIO_CFG_CMD:
+ return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
+
+ default:
+ bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_job->job_done(bsg_job);
+ return -ENOSYS;
+ }
+}
+
+int
+qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+{
+ int ret = -EINVAL;
+
+ switch (bsg_job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_ELS_NOLOGIN:
+ ret = qla2x00_process_els(bsg_job);
+ break;
+ case FC_BSG_HST_CT:
+ ret = qla2x00_process_ct(bsg_job);
+ break;
+ case FC_BSG_HST_VENDOR:
+ ret = qla2x00_process_vendor_specific(bsg_job);
+ break;
+ case FC_BSG_HST_ADD_RPORT:
+ case FC_BSG_HST_DEL_RPORT:
+ case FC_BSG_RPT_CT:
+ default:
+ DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
+ break;
+ }
+ return ret;
+}
+
+int
+qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ int cnt, que;
+ unsigned long flags;
+ struct req_que *req;
+ struct srb_ctx *sp_bsg;
+
+ /* find the bsg job from the active list of commands */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ sp_bsg = sp->ctx;
+
+ if (((sp_bsg->type == SRB_CT_CMD) ||
+ (sp_bsg->type == SRB_ELS_CMD_HST))
+ && (sp_bsg->u.bsg_job == bsg_job)) {
+ if (ha->isp_ops->abort_command(sp)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx "
+ "abort_command failed\n",
+ vha->host_no));
+ bsg_job->req->errors =
+ bsg_job->reply->result = -EIO;
+ } else {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx "
+ "abort_command success\n",
+ vha->host_no));
+ bsg_job->req->errors =
+ bsg_job->reply->result = 0;
+ }
+ goto done;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) SRB not found to abort\n", vha->host_no));
+ bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+ return 0;
+
+done:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (bsg_job->request->msgcode == FC_BSG_HST_CT)
+ kfree(sp->fcport);
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
new file mode 100644
index 0000000..76ed92d
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -0,0 +1,135 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_BSG_H
+#define __QLA_BSG_H
+
+/* BSG Vendor specific commands */
+#define QL_VND_LOOPBACK 0x01
+#define QL_VND_A84_RESET 0x02
+#define QL_VND_A84_UPDATE_FW 0x03
+#define QL_VND_A84_MGMT_CMD 0x04
+#define QL_VND_IIDMA 0x05
+#define QL_VND_FCP_PRIO_CFG_CMD 0x06
+
+/* BSG definations for interpreting CommandSent field */
+#define INT_DEF_LB_LOOPBACK_CMD 0
+#define INT_DEF_LB_ECHO_CMD 1
+
+/* BSG Vendor specific definations */
+#define A84_ISSUE_WRITE_TYPE_CMD 0
+#define A84_ISSUE_READ_TYPE_CMD 1
+#define A84_CLEANUP_CMD 2
+#define A84_ISSUE_RESET_OP_FW 3
+#define A84_ISSUE_RESET_DIAG_FW 4
+#define A84_ISSUE_UPDATE_OPFW_CMD 5
+#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
+
+struct qla84_mgmt_param {
+ union {
+ struct {
+ uint32_t start_addr;
+ } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
+ struct {
+ uint32_t id;
+#define QLA84_MGMT_CONFIG_ID_UIF 1
+#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
+#define QLA84_MGMT_CONFIG_ID_PAUSE 3
+#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
+
+ uint32_t param0;
+ uint32_t param1;
+ } config; /* for QLA84_MGMT_CHNG_CONFIG */
+
+ struct {
+ uint32_t type;
+#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
+#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
+#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
+#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
+#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
+#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
+#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
+
+ uint32_t context;
+/*
+* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
+*/
+#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
+#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
+#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
+#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
+#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
+#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
+#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
+#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
+
+/*
+* context definitions for QLA84_MGMT_INFO_PORT_STAT
+*/
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
+
+
+/*
+* context definitions for QLA84_MGMT_INFO_LIF_STAT
+*/
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
+#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
+
+ } info; /* for QLA84_MGMT_GET_INFO */
+ } u;
+};
+
+struct qla84_msg_mgmt {
+ uint16_t cmd;
+#define QLA84_MGMT_READ_MEM 0x00
+#define QLA84_MGMT_WRITE_MEM 0x01
+#define QLA84_MGMT_CHNG_CONFIG 0x02
+#define QLA84_MGMT_GET_INFO 0x03
+ uint16_t rsrvd;
+ struct qla84_mgmt_param mgmtp;/* parameters for cmd */
+ uint32_t len; /* bytes in payload following this struct */
+ uint8_t payload[0]; /* payload for cmd */
+};
+
+struct qla_bsg_a84_mgmt {
+ struct qla84_msg_mgmt mgmt;
+} __attribute__ ((packed));
+
+struct qla_scsi_addr {
+ uint16_t bus;
+ uint16_t target;
+} __attribute__ ((packed));
+
+struct qla_ext_dest_addr {
+ union {
+ uint8_t wwnn[8];
+ uint8_t wwpn[8];
+ uint8_t id[4];
+ struct qla_scsi_addr scsi_addr;
+ } dest_addr;
+ uint16_t dest_type;
+#define EXT_DEF_TYPE_WWPN 2
+ uint16_t lun;
+ uint16_t padding[2];
+} __attribute__ ((packed));
+
+struct qla_port_param {
+ struct qla_ext_dest_addr fc_scsi_addr;
+ uint16_t mode;
+ uint16_t speed;
+} __attribute__ ((packed));
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cb2eca4..2afc8a3 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -769,6 +769,9 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
void *nxt;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ if (IS_QLA82XX(ha))
+ return;
+
risc_address = ext_mem_cnt = 0;
flags = 0;
@@ -1660,4 +1663,62 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size)
printk("\n");
}
+void
+qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size)
+{
+ uint32_t cnt;
+ uint8_t c;
+ uint8_t last16[16], cur16[16];
+ uint32_t lc = 0, num_same16 = 0, j;
+
+ printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 "
+ "Ah Bh Ch Dh Eh Fh\n");
+ printk(KERN_DEBUG "----------------------------------------"
+ "----------------------\n");
+
+ for (cnt = 0; cnt < size;) {
+ c = *b++;
+ cur16[lc++] = c;
+
+ cnt++;
+ if (cnt % 16)
+ continue;
+
+ /* We have 16 now */
+ lc = 0;
+ if (num_same16 == 0) {
+ memcpy(last16, cur16, 16);
+ num_same16++;
+ continue;
+ }
+ if (memcmp(cur16, last16, 16) == 0) {
+ num_same16++;
+ continue;
+ }
+ for (j = 0; j < 16; j++)
+ printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
+ printk(KERN_DEBUG "\n");
+
+ if (num_same16 > 1)
+ printk(KERN_DEBUG "> prev pattern repeats (%u)"
+ "more times\n", num_same16-1);
+ memcpy(last16, cur16, 16);
+ num_same16 = 1;
+ }
+
+ if (num_same16) {
+ for (j = 0; j < 16; j++)
+ printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
+ printk(KERN_DEBUG "\n");
+
+ if (num_same16 > 1)
+ printk(KERN_DEBUG "> prev pattern repeats (%u)"
+ "more times\n", num_same16-1);
+ }
+ if (lc) {
+ for (j = 0; j < lc; j++)
+ printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]);
+ printk(KERN_DEBUG "\n");
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index d6d9c86..916c81f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -27,6 +27,9 @@
/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
+/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
+
+/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
/*
* Macros use for debugging the driver.
@@ -139,6 +142,13 @@
#define DEBUG17(x) do {} while (0)
#endif
+#if defined(QL_DEBUG_LEVEL_18)
+#define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0)
+#else
+#define DEBUG18(x) do {} while (0)
+#endif
+
+
/*
* Firmware Dump structure definition
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index afa9561..8396109 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -33,7 +33,10 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
-#define QLA2XXX_DRIVER_NAME "qla2xxx"
+#include "qla_bsg.h"
+#include "qla_nx.h"
+#define QLA2XXX_DRIVER_NAME "qla2xxx"
+#define QLA2XXX_APIDEV "ql2xapidev"
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -186,6 +189,16 @@
struct req_que;
/*
+ * (sd.h is not exported, hence local inclusion)
+ * Data Integrity Field tuple.
+ */
+struct sd_dif_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+/*
* SCSI Request Block
*/
typedef struct srb {
@@ -205,40 +218,73 @@ typedef struct srb {
/*
* SRB flag definitions
*/
-#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
+#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
+#define SRB_FCP_CMND_DMA_VALID BIT_12 /* DIF: DSD List valid */
+#define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */
+#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */
+#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
+
+/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
+#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
/*
* SRB extensions.
*/
-struct srb_ctx {
-#define SRB_LOGIN_CMD 1
-#define SRB_LOGOUT_CMD 2
- uint16_t type;
- struct timer_list timer;
-
- void (*free)(srb_t *sp);
- void (*timeout)(srb_t *sp);
-};
-
-struct srb_logio {
- struct srb_ctx ctx;
-
+struct srb_iocb {
+ union {
+ struct {
+ uint16_t flags;
#define SRB_LOGIN_RETRIED BIT_0
#define SRB_LOGIN_COND_PLOGI BIT_1
#define SRB_LOGIN_SKIP_PRLI BIT_2
- uint16_t flags;
+ uint16_t data[2];
+ } logio;
+ struct {
+ /*
+ * Values for flags field below are as
+ * defined in tsk_mgmt_entry struct
+ * for control_flags field in qla_fw.h.
+ */
+ uint32_t flags;
+ uint32_t lun;
+ uint32_t data;
+ } tmf;
+ struct {
+ /*
+ * values for modif field below are as
+ * defined in mrk_entry_24xx struct
+ * for the modifier field in qla_fw.h.
+ */
+ uint8_t modif;
+ uint16_t lun;
+ uint32_t data;
+ } marker;
+ } u;
+
+ struct timer_list timer;
+
+ void (*done)(srb_t *);
+ void (*free)(srb_t *);
+ void (*timeout)(srb_t *);
};
-struct srb_bsg_ctx {
+/* Values for srb_ctx type */
+#define SRB_LOGIN_CMD 1
+#define SRB_LOGOUT_CMD 2
#define SRB_ELS_CMD_RPT 3
#define SRB_ELS_CMD_HST 4
-#define SRB_CT_CMD 5
- uint16_t type;
-};
+#define SRB_CT_CMD 5
+#define SRB_ADISC_CMD 6
+#define SRB_TM_CMD 7
+#define SRB_MARKER_CMD 8
-struct srb_bsg {
- struct srb_bsg_ctx ctx;
- struct fc_bsg_job *bsg_job;
+struct srb_ctx {
+ uint16_t type;
+ char *name;
+ union {
+ struct srb_iocb *iocb_cmd;
+ struct fc_bsg_job *bsg_job;
+ } u;
};
struct msg_echo_lb {
@@ -416,6 +462,7 @@ typedef union {
struct device_reg_2xxx isp;
struct device_reg_24xx isp24;
struct device_reg_25xxmq isp25mq;
+ struct device_reg_82xx isp82;
} device_reg_t;
#define ISP_REQ_Q_IN(ha, reg) \
@@ -1299,6 +1346,66 @@ typedef struct {
uint32_t dseg_4_length; /* Data segment 4 length. */
} cont_a64_entry_t;
+#define PO_MODE_DIF_INSERT 0
+#define PO_MODE_DIF_REMOVE BIT_0
+#define PO_MODE_DIF_PASS BIT_1
+#define PO_MODE_DIF_REPLACE (BIT_0 + BIT_1)
+#define PO_ENABLE_DIF_BUNDLING BIT_8
+#define PO_ENABLE_INCR_GUARD_SEED BIT_3
+#define PO_DISABLE_INCR_REF_TAG BIT_5
+#define PO_DISABLE_GUARD_CHECK BIT_4
+/*
+ * ISP queue - 64-Bit addressing, continuation crc entry structure definition.
+ */
+struct crc_context {
+ uint32_t handle; /* System handle. */
+ uint32_t ref_tag;
+ uint16_t app_tag;
+ uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
+ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
+ uint16_t guard_seed; /* Initial Guard Seed */
+ uint16_t prot_opts; /* Requested Data Protection Mode */
+ uint16_t blk_size; /* Data size in bytes */
+ uint16_t runt_blk_guard; /* Guard value for runt block (tape
+ * only) */
+ uint32_t byte_count; /* Total byte count/ total data
+ * transfer count */
+ union {
+ struct {
+ uint32_t reserved_1;
+ uint16_t reserved_2;
+ uint16_t reserved_3;
+ uint32_t reserved_4;
+ uint32_t data_address[2];
+ uint32_t data_length;
+ uint32_t reserved_5[2];
+ uint32_t reserved_6;
+ } nobundling;
+ struct {
+ uint32_t dif_byte_count; /* Total DIF byte
+ * count */
+ uint16_t reserved_1;
+ uint16_t dseg_count; /* Data segment count */
+ uint32_t reserved_2;
+ uint32_t data_address[2];
+ uint32_t data_length;
+ uint32_t dif_address[2];
+ uint32_t dif_length; /* Data segment 0
+ * length */
+ } bundling;
+ } u;
+
+ struct fcp_cmnd fcp_cmnd;
+ dma_addr_t crc_ctx_dma;
+ /* List of DMA context transfers */
+ struct list_head dsd_list;
+
+ /* This structure should not exceed 512 bytes */
+};
+
+#define CRC_CONTEXT_LEN_FW (offsetof(struct crc_context, fcp_cmnd.lun))
+#define CRC_CONTEXT_FCPCMND_OFF (offsetof(struct crc_context, fcp_cmnd.lun))
+
/*
* ISP queue - status entry structure definition.
*/
@@ -1359,6 +1466,7 @@ typedef struct {
#define CS_ABORTED 0x5 /* System aborted command. */
#define CS_TIMEOUT 0x6 /* Timeout error. */
#define CS_DATA_OVERRUN 0x7 /* Data overrun. */
+#define CS_DIF_ERROR 0xC /* DIF error detected */
#define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */
#define CS_QUEUE_FULL 0x1C /* Queue Full. */
@@ -1579,6 +1687,8 @@ typedef struct fc_port {
uint16_t loop_id;
uint16_t old_loop_id;
+ uint8_t fcp_prio;
+
uint8_t fabric_port_name[WWN_SIZE];
uint16_t fp_speed;
@@ -1611,6 +1721,7 @@ typedef struct fc_port {
#define FCF_FABRIC_DEVICE BIT_0
#define FCF_LOGIN_NEEDED BIT_1
#define FCF_FCP2_DEVICE BIT_2
+#define FCF_ASYNC_SENT BIT_3
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2109,6 +2220,7 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
+ int (*abort_isp) (struct scsi_qla_host *);
};
/* MSI-X Support *************************************************************/
@@ -2143,6 +2255,8 @@ enum qla_work_type {
QLA_EVT_ASYNC_LOGIN_DONE,
QLA_EVT_ASYNC_LOGOUT,
QLA_EVT_ASYNC_LOGOUT_DONE,
+ QLA_EVT_ASYNC_ADISC,
+ QLA_EVT_ASYNC_ADISC_DONE,
QLA_EVT_UEVENT,
};
@@ -2295,6 +2409,7 @@ struct qla_hw_data {
uint32_t eeh_busy :1;
uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
+ uint32_t fcp_prio_enabled :1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2382,7 +2497,8 @@ struct qla_hw_data {
#define DT_ISP2532 BIT_11
#define DT_ISP8432 BIT_12
#define DT_ISP8001 BIT_13
-#define DT_ISP_LAST (DT_ISP8001 << 1)
+#define DT_ISP8021 BIT_14
+#define DT_ISP_LAST (DT_ISP8021 << 1)
#define DT_IIDMA BIT_26
#define DT_FWI2 BIT_27
@@ -2405,6 +2521,7 @@ struct qla_hw_data {
#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
+#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2415,8 +2532,10 @@ struct qla_hw_data {
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
#define IS_QLA81XX(ha) (IS_QLA8001(ha))
+#define IS_QLA8XXX_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha))
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
- IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+ IS_QLA82XX(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
(ha)->flags.msix_enabled)
@@ -2496,6 +2615,9 @@ struct qla_hw_data {
dma_addr_t ex_init_cb_dma;
struct ex_init_cb_81xx *ex_init_cb;
+ void *async_pd;
+ dma_addr_t async_pd_dma;
+
/* These are used by mailbox operations. */
volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
@@ -2598,6 +2720,8 @@ struct qla_hw_data {
uint32_t flt_region_nvram;
uint32_t flt_region_npiv_conf;
uint32_t flt_region_gold_fw;
+ uint32_t flt_region_fcp_prio;
+ uint32_t flt_region_bootload;
/* Needed for BEACON */
uint16_t beacon_blink_led;
@@ -2626,6 +2750,39 @@ struct qla_hw_data {
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
+
+ /* FCP_CMND priority support */
+ struct qla_fcp_prio_cfg *fcp_prio_cfg;
+
+ struct dma_pool *dl_dma_pool;
+#define DSD_LIST_DMA_POOL_SIZE 512
+
+ struct dma_pool *fcp_cmnd_dma_pool;
+ mempool_t *ctx_mempool;
+#define FCP_CMND_DMA_POOL_SIZE 512
+
+ unsigned long nx_pcibase; /* Base I/O address */
+ uint8_t *nxdb_rd_ptr; /* Doorbell read pointer */
+ unsigned long nxdb_wr_ptr; /* Door bell write pointer */
+
+ uint32_t crb_win;
+ uint32_t curr_window;
+ uint32_t ddr_mn_window;
+ unsigned long mn_win_crb;
+ unsigned long ms_win_crb;
+ int qdr_sn_window;
+ uint32_t nx_dev_init_timeout;
+ uint32_t nx_reset_timeout;
+ rwlock_t hw_lock;
+ uint16_t portnum; /* port number */
+ int link_width;
+ struct fw_blob *hablob;
+ struct qla82xx_legacy_intr_set nx_legacy_intr;
+
+ uint16_t gbl_dsd_inuse;
+ uint16_t gbl_dsd_avail;
+ struct list_head gbl_dsd_list;
+#define NUM_DSD_CHAIN 4096
};
/*
@@ -2650,6 +2807,7 @@ typedef struct scsi_qla_host {
uint32_t management_server_logged_in :1;
uint32_t process_response_queue :1;
+ uint32_t difdix_supported:1;
} flags;
atomic_t loop_state;
@@ -2678,10 +2836,13 @@ typedef struct scsi_qla_host {
#define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */
#define UNLOADING 15
#define NPIV_CONFIG_NEEDED 16
+#define ISP_UNRECOVERABLE 17
+#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
#define DFLG_NO_CABLE BIT_1
+#define DFLG_DEV_FAILED BIT_5
/* ISP configuration data. */
uint16_t loop_id; /* Host adapter loop id */
@@ -2739,6 +2900,8 @@ typedef struct scsi_qla_host {
#define VP_ERR_ADAP_NORESOURCES 5
struct qla_hw_data *hw;
struct req_que *req;
+ int fw_heartbeat_counter;
+ int seconds_since_last_heartbeat;
} scsi_qla_host_t;
/*
@@ -2791,134 +2954,16 @@ typedef struct scsi_qla_host {
#define OPTROM_SIZE_24XX 0x100000
#define OPTROM_SIZE_25XX 0x200000
#define OPTROM_SIZE_81XX 0x400000
+#define OPTROM_SIZE_82XX 0x800000
+
+#define OPTROM_BURST_SIZE 0x1000
+#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
+
+#define QLA_DSDS_PER_IOCB 37
#include "qla_gbl.h"
#include "qla_dbg.h"
#include "qla_inline.h"
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
-
-/*
- * BSG Vendor specific commands
- */
-
-#define QL_VND_LOOPBACK 0x01
-#define QLA84_RESET 0x02
-#define QLA84_UPDATE_FW 0x03
-#define QLA84_MGMT_CMD 0x04
-
-/* BSG definations for interpreting CommandSent field */
-#define INT_DEF_LB_LOOPBACK_CMD 0
-#define INT_DEF_LB_ECHO_CMD 1
-
-/* BSG Vendor specific definations */
-typedef struct _A84_RESET {
- uint16_t Flags;
- uint16_t Reserved;
-#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
-} __attribute__((packed)) A84_RESET, *PA84_RESET;
-
-#define A84_ISSUE_WRITE_TYPE_CMD 0
-#define A84_ISSUE_READ_TYPE_CMD 1
-#define A84_CLEANUP_CMD 2
-#define A84_ISSUE_RESET_OP_FW 3
-#define A84_ISSUE_RESET_DIAG_FW 4
-#define A84_ISSUE_UPDATE_OPFW_CMD 5
-#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
-
-struct qla84_mgmt_param {
- union {
- struct {
- uint32_t start_addr;
- } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
- struct {
- uint32_t id;
-#define QLA84_MGMT_CONFIG_ID_UIF 1
-#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
-#define QLA84_MGMT_CONFIG_ID_PAUSE 3
-#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
-
- uint32_t param0;
- uint32_t param1;
- } config; /* for QLA84_MGMT_CHNG_CONFIG */
-
- struct {
- uint32_t type;
-#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
-#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
-#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
-#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
-#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
-#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
-#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
-
- uint32_t context;
-/*
-* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
-*/
-#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
-#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
-#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
-#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
-#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
-#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
-#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
-#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
-#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
-#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
-
-/*
-* context definitions for QLA84_MGMT_INFO_PORT_STAT
-*/
-#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
-#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
-#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
-#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
-#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
-#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
-
-
-/*
-* context definitions for QLA84_MGMT_INFO_LIF_STAT
-*/
-#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
-#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
-#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
-#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
-#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
-
- } info; /* for QLA84_MGMT_GET_INFO */
- } u;
-};
-
-struct qla84_msg_mgmt {
- uint16_t cmd;
-#define QLA84_MGMT_READ_MEM 0x00
-#define QLA84_MGMT_WRITE_MEM 0x01
-#define QLA84_MGMT_CHNG_CONFIG 0x02
-#define QLA84_MGMT_GET_INFO 0x03
- uint16_t rsrvd;
- struct qla84_mgmt_param mgmtp;/* parameters for cmd */
- uint32_t len; /* bytes in payload following this struct */
- uint8_t payload[0]; /* payload for cmd */
-};
-
-struct msg_update_fw {
- /*
- * diag_fw = 0 operational fw
- * otherwise diagnostic fw
- * offset, len, fw_len are present to overcome the current limitation
- * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
- * specifies the byte "offset" where it fits in the fw buffer. The
- * number of bytes in each chunk is specified in "len". "fw_len"
- * is the total size of fw. The first chunk should start at offset = 0.
- * When offset+len == fw_len, the fw is written to the HBA.
- */
- uint32_t diag_fw;
- uint32_t offset;/* start offset */
- uint32_t len; /* num bytes in cur xfer */
- uint32_t fw_len; /* size of fw in bytes */
- uint8_t fw_bytes[0];
-};
-
#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 42c5587..93f8339 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -400,6 +400,7 @@ struct cmd_type_6 {
struct scsi_lun lun; /* FCP LUN (BE). */
uint16_t control_flags; /* Control flags. */
+#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
@@ -466,6 +467,43 @@ struct cmd_type_7 {
uint32_t dseg_0_len; /* Data segment 0 length. */
};
+#define COMMAND_TYPE_CRC_2 0x6A /* Command Type CRC_2 (Type 6)
+ * (T10-DIF) */
+struct cmd_type_crc_2 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t timeout; /* Command timeout. */
+
+ uint16_t dseg_count; /* Data segment count. */
+
+ uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
+
+ struct scsi_lun lun; /* FCP LUN (BE). */
+
+ uint16_t control_flags; /* Control flags. */
+
+ uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
+
+ uint32_t fcp_rsp_dseg_address[2]; /* Data segment address. */
+
+ uint32_t byte_count; /* Total byte count. */
+
+ uint8_t port_id[3]; /* PortID of destination port. */
+ uint8_t vp_index;
+
+ uint32_t crc_context_address[2]; /* Data segment address. */
+ uint16_t crc_context_len; /* Data segment length. */
+ uint16_t reserved_1; /* MUST be set to 0. */
+};
+
+
/*
* ISP queue - status entry structure definition.
*/
@@ -496,10 +534,17 @@ struct sts_entry_24xx {
uint32_t sense_len; /* FCP SENSE length. */
uint32_t rsp_data_len; /* FCP response data length. */
-
uint8_t data[28]; /* FCP response/sense information. */
+ /*
+ * If DIF Error is set in comp_status, these additional fields are
+ * defined:
+ * &data[10] : uint8_t report_runt_bg[2]; - computed guard
+ * &data[12] : uint8_t actual_dif[8]; - DIF Data recieved
+ * &data[20] : uint8_t expected_dif[8]; - DIF Data computed
+ */
};
+
/*
* Status entry completion status
*/
@@ -841,6 +886,8 @@ struct device_reg_24xx {
#define FA_HW_EVENT_ENTRY_SIZE 4
#define FA_NPIV_CONF0_ADDR 0x5C000
#define FA_NPIV_CONF1_ADDR 0x5D000
+#define FA_FCP_PRIO0_ADDR 0x10000
+#define FA_FCP_PRIO1_ADDR 0x12000
/*
* Flash Error Log Event Codes.
@@ -1274,6 +1321,8 @@ struct qla_flt_header {
#define FLT_REG_NPIV_CONF_0 0x29
#define FLT_REG_NPIV_CONF_1 0x2a
#define FLT_REG_GOLD_FW 0x2f
+#define FLT_REG_FCP_PRIO_0 0x87
+#define FLT_REG_FCP_PRIO_1 0x88
struct qla_flt_region {
uint32_t code;
@@ -1750,6 +1799,61 @@ struct ex_init_cb_81xx {
#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
+/* FCP priority config defines *************************************/
+/* operations */
+#define QLFC_FCP_PRIO_DISABLE 0x0
+#define QLFC_FCP_PRIO_ENABLE 0x1
+#define QLFC_FCP_PRIO_GET_CONFIG 0x2
+#define QLFC_FCP_PRIO_SET_CONFIG 0x3
+
+struct qla_fcp_prio_entry {
+ uint16_t flags; /* Describes parameter(s) in FCP */
+ /* priority entry that are valid */
+#define FCP_PRIO_ENTRY_VALID 0x1
+#define FCP_PRIO_ENTRY_TAG_VALID 0x2
+#define FCP_PRIO_ENTRY_SPID_VALID 0x4
+#define FCP_PRIO_ENTRY_DPID_VALID 0x8
+#define FCP_PRIO_ENTRY_LUNB_VALID 0x10
+#define FCP_PRIO_ENTRY_LUNE_VALID 0x20
+#define FCP_PRIO_ENTRY_SWWN_VALID 0x40
+#define FCP_PRIO_ENTRY_DWWN_VALID 0x80
+ uint8_t tag; /* Priority value */
+ uint8_t reserved; /* Reserved for future use */
+ uint32_t src_pid; /* Src port id. high order byte */
+ /* unused; -1 (wild card) */
+ uint32_t dst_pid; /* Src port id. high order byte */
+ /* unused; -1 (wild card) */
+ uint16_t lun_beg; /* 1st lun num of lun range. */
+ /* -1 (wild card) */
+ uint16_t lun_end; /* 2nd lun num of lun range. */
+ /* -1 (wild card) */
+ uint8_t src_wwpn[8]; /* Source WWPN: -1 (wild card) */
+ uint8_t dst_wwpn[8]; /* Destination WWPN: -1 (wild card) */
+};
+
+struct qla_fcp_prio_cfg {
+ uint8_t signature[4]; /* "HQOS" signature of config data */
+ uint16_t version; /* 1: Initial version */
+ uint16_t length; /* config data size in num bytes */
+ uint16_t checksum; /* config data bytes checksum */
+ uint16_t num_entries; /* Number of entries */
+ uint16_t size_of_entry; /* Size of each entry in num bytes */
+ uint8_t attributes; /* enable/disable, persistence */
+#define FCP_PRIO_ATTR_DISABLE 0x0
+#define FCP_PRIO_ATTR_ENABLE 0x1
+#define FCP_PRIO_ATTR_PERSIST 0x2
+ uint8_t reserved; /* Reserved for future use */
+#define FCP_PRIO_CFG_HDR_SIZE 0x10
+ struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */
+#define FCP_PRIO_CFG_ENTRY_SIZE 0x20
+};
+
+#define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/
+
+/* 25XX Support ****************************************************/
+#define FA_FCP_PRIO0_ADDR_25 0x3C000
+#define FA_FCP_PRIO1_ADDR_25 0x3E000
+
/* 81XX Flash locations -- occupies second 2MB region. */
#define FA_BOOT_CODE_ADDR_81 0x80000
#define FA_RISC_CODE_ADDR_81 0xA0000
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3a89bc5..8217c3b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -44,6 +44,7 @@ extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
extern int qla2x00_abort_isp(scsi_qla_host_t *);
+extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
@@ -55,10 +56,20 @@ extern void qla84xx_put_chip(struct scsi_qla_host *);
extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
-extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
+extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
+extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
+extern int qla2x00_async_marker(fc_port_t *, uint16_t, uint8_t);
+extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
+extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
+ struct srb_iocb *);
+extern void qla2x00_async_marker_done(struct scsi_qla_host *, fc_port_t *,
+ struct srb_iocb *);
extern fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
@@ -79,6 +90,13 @@ extern int ql2xmaxqueues;
extern int ql2xmultique_tag;
extern int ql2xfwloadbin;
extern int ql2xetsenable;
+extern int ql2xshiftctondsd;
+extern int ql2xdbwr;
+extern int ql2xdontresethba;
+extern int ql2xasynctmfenable;
+extern int ql2xenabledif;
+extern int ql2xenablehba_err_chk;
+extern int ql2xtargetreset;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -93,6 +111,10 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
+extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
+ fc_port_t *, uint16_t *);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
@@ -135,6 +157,7 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
extern int qla2x00_wait_for_chip_reset(scsi_qla_host_t *);
+extern int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *);
extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
@@ -157,6 +180,10 @@ int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
extern void qla2x00_ctx_sp_free(srb_t *);
+extern uint16_t qla24xx_calc_iocbs(uint16_t);
+extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
+extern int qla24xx_dif_start_scsi(srb_t *);
+
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -328,6 +355,9 @@ extern int
qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t,
+ uint16_t *);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -340,6 +370,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
+extern int qla2x00_get_data_rate(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_sup.c source file.
*/
@@ -384,6 +415,7 @@ extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
+extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_dbg.c source file.
@@ -395,6 +427,7 @@ extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
extern void qla2x00_dump_regs(scsi_qla_host_t *);
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
+extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
/*
* Global Function Prototypes in qla_gs.c source file.
@@ -430,7 +463,10 @@ extern void qla2x00_init_host_attr(scsi_qla_host_t *);
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
-extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
+extern int qla2x00_echo_test(scsi_qla_host_t *,
+ struct msg_echo_lb *, uint16_t *);
+extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
+extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t);
/*
* Global Function Prototypes in qla_dfs.c source file.
@@ -459,4 +495,88 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+/* qla82xx related functions */
+
+/* PCI related functions */
+extern int qla82xx_pci_config(struct scsi_qla_host *);
+extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_pci_mem_write_2M(struct qla_hw_data *, u64, void *, int);
+extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
+extern int qla82xx_pci_region_offset(struct pci_dev *, int);
+extern int qla82xx_pci_region_len(struct pci_dev *, int);
+extern int qla82xx_iospace_config(struct qla_hw_data *);
+
+/* Initialization related functions */
+extern void qla82xx_reset_chip(struct scsi_qla_host *);
+extern void qla82xx_config_rings(struct scsi_qla_host *);
+extern int qla82xx_nvram_config(struct scsi_qla_host *);
+extern int qla82xx_pinit_from_rom(scsi_qla_host_t *);
+extern int qla82xx_load_firmware(scsi_qla_host_t *);
+extern int qla82xx_reset_hw(scsi_qla_host_t *);
+extern int qla82xx_load_risc_blob(scsi_qla_host_t *, uint32_t *);
+extern void qla82xx_watchdog(scsi_qla_host_t *);
+
+/* Firmware and flash related functions */
+extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
+extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+
+/* Mailbox related functions */
+extern int qla82xx_abort_isp(scsi_qla_host_t *);
+extern int qla82xx_restart_isp(scsi_qla_host_t *);
+
+/* IOCB related functions */
+extern int qla82xx_start_scsi(srb_t *);
+
+/* Interrupt related */
+extern irqreturn_t qla82xx_intr_handler(int, void *);
+extern irqreturn_t qla82xx_msi_handler(int, void *);
+extern irqreturn_t qla82xx_msix_default(int, void *);
+extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
+extern void qla82xx_enable_intrs(struct qla_hw_data *);
+extern void qla82xx_disable_intrs(struct qla_hw_data *);
+extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
+extern void qla82xx_poll(int, void *);
+extern void qla82xx_init_flags(struct qla_hw_data *);
+
+/* ISP 8021 hardware related */
+extern int qla82xx_crb_win_lock(struct qla_hw_data *);
+extern void qla82xx_crb_win_unlock(struct qla_hw_data *);
+extern int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *, ulong *);
+extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
+extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
+extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_check_for_bad_spd(struct qla_hw_data *);
+extern int qla82xx_load_fw(scsi_qla_host_t *);
+extern int qla82xx_rom_lock(struct qla_hw_data *);
+extern void qla82xx_rom_unlock(struct qla_hw_data *);
+extern int qla82xx_rom_fast_read(struct qla_hw_data *, int , int *);
+extern int qla82xx_do_rom_fast_read(struct qla_hw_data *, int, int *);
+extern unsigned long qla82xx_decode_crb_addr(unsigned long);
+
+/* ISP 8021 IDC */
+extern void qla82xx_clear_drv_active(struct qla_hw_data *);
+extern int qla82xx_idc_lock(struct qla_hw_data *);
+extern void qla82xx_idc_unlock(struct qla_hw_data *);
+extern int qla82xx_device_state_handler(scsi_qla_host_t *);
+
+extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
+ size_t, char *);
+extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
+extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
+extern void qla82xx_start_iocbs(srb_t *);
+extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
+extern void qla82xx_wait_for_pending_commands(scsi_qla_host_t *);
+
+/* BSG related functions */
+extern int qla24xx_bsg_request(struct fc_bsg_job *);
+extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
+extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
+extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
+ dma_addr_t, size_t, uint32_t);
+extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
+ uint16_t *, uint16_t *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4647015..872c55f 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1535,7 +1535,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_10GB);
else if (IS_QLA25XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4229bb4..ab2cc71 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -48,6 +48,7 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
{
srb_t *sp = (srb_t *)__data;
struct srb_ctx *ctx;
+ struct srb_iocb *iocb;
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
struct req_que *req;
@@ -57,17 +58,21 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
req = ha->req_q_map[0];
req->outstanding_cmds[sp->handle] = NULL;
ctx = sp->ctx;
- ctx->timeout(sp);
+ iocb = ctx->u.iocb_cmd;
+ iocb->timeout(sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- ctx->free(sp);
+ iocb->free(sp);
}
void
qla2x00_ctx_sp_free(srb_t *sp)
{
struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ del_timer_sync(&iocb->timer);
+ kfree(iocb);
kfree(ctx);
mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
}
@@ -79,6 +84,7 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx;
+ struct srb_iocb *iocb;
sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
if (!sp)
@@ -86,21 +92,30 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
ctx = kzalloc(size, GFP_KERNEL);
if (!ctx) {
mempool_free(sp, ha->srb_mempool);
+ sp = NULL;
+ goto done;
+ }
+ iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
+ if (!iocb) {
+ mempool_free(sp, ha->srb_mempool);
+ sp = NULL;
+ kfree(ctx);
goto done;
}
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
- ctx->free = qla2x00_ctx_sp_free;
+ ctx->u.iocb_cmd = iocb;
+ iocb->free = qla2x00_ctx_sp_free;
- init_timer(&ctx->timer);
+ init_timer(&iocb->timer);
if (!tmo)
goto done;
- ctx->timer.expires = jiffies + tmo * HZ;
- ctx->timer.data = (unsigned long)sp;
- ctx->timer.function = qla2x00_ctx_sp_timeout;
- add_timer(&ctx->timer);
+ iocb->timer.expires = jiffies + tmo * HZ;
+ iocb->timer.data = (unsigned long)sp;
+ iocb->timer.function = qla2x00_ctx_sp_timeout;
+ add_timer(&iocb->timer);
done:
return sp;
}
@@ -110,41 +125,56 @@ done:
#define ELS_TMO_2_RATOV(ha) ((ha)->r_a_tov / 10 * 2)
static void
-qla2x00_async_logio_timeout(srb_t *sp)
+qla2x00_async_iocb_timeout(srb_t *sp)
{
fc_port_t *fcport = sp->fcport;
- struct srb_logio *lio = sp->ctx;
+ struct srb_ctx *ctx = sp->ctx;
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s timeout.\n",
- fcport->vha->host_no, sp->handle,
- lio->ctx.type == SRB_LOGIN_CMD ? "login": "logout"));
+ fcport->vha->host_no, sp->handle, ctx->name));
- if (lio->ctx.type == SRB_LOGIN_CMD)
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ if (ctx->type == SRB_LOGIN_CMD)
qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
}
+static void
+qla2x00_async_login_ctx_done(srb_t *sp)
+{
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *lio = ctx->u.iocb_cmd;
+
+ qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ lio->free(sp);
+}
+
int
qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- struct srb_logio *lio;
+ struct srb_ctx *ctx;
+ struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio),
+ sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
ELS_TMO_2_RATOV(ha) + 2);
if (!sp)
goto done;
- lio = sp->ctx;
- lio->ctx.type = SRB_LOGIN_CMD;
- lio->ctx.timeout = qla2x00_async_logio_timeout;
- lio->flags |= SRB_LOGIN_COND_PLOGI;
+ ctx = sp->ctx;
+ ctx->type = SRB_LOGIN_CMD;
+ ctx->name = "login";
+ lio = ctx->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ lio->done = qla2x00_async_login_ctx_done;
+ lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
- lio->flags |= SRB_LOGIN_RETRIED;
+ lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
@@ -157,29 +187,43 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- del_timer_sync(&lio->ctx.timer);
- lio->ctx.free(sp);
+ lio->free(sp);
done:
return rval;
}
+static void
+qla2x00_async_logout_ctx_done(srb_t *sp)
+{
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *lio = ctx->u.iocb_cmd;
+
+ qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ lio->free(sp);
+}
+
int
qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- struct srb_logio *lio;
+ struct srb_ctx *ctx;
+ struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio),
+ sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
ELS_TMO_2_RATOV(ha) + 2);
if (!sp)
goto done;
- lio = sp->ctx;
- lio->ctx.type = SRB_LOGOUT_CMD;
- lio->ctx.timeout = qla2x00_async_logio_timeout;
+ ctx = sp->ctx;
+ ctx->type = SRB_LOGOUT_CMD;
+ ctx->name = "logout";
+ lio = ctx->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ lio->done = qla2x00_async_logout_ctx_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
@@ -191,30 +235,186 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- del_timer_sync(&lio->ctx.timer);
- lio->ctx.free(sp);
+ lio->free(sp);
done:
return rval;
}
+static void
+qla2x00_async_adisc_ctx_done(srb_t *sp)
+{
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *lio = ctx->u.iocb_cmd;
+
+ qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ lio->free(sp);
+}
+
int
+qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ struct srb_ctx *ctx;
+ struct srb_iocb *lio;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
+ ELS_TMO_2_RATOV(ha) + 2);
+ if (!sp)
+ goto done;
+
+ ctx = sp->ctx;
+ ctx->type = SRB_ADISC_CMD;
+ ctx->name = "adisc";
+ lio = ctx->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ lio->done = qla2x00_async_adisc_ctx_done;
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ lio->u.logio.flags |= SRB_LOGIN_RETRIED;
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ DEBUG2(printk(KERN_DEBUG
+ "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n",
+ fcport->vha->host_no, sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
+
+ return rval;
+
+done_free_sp:
+ lio->free(sp);
+done:
+ return rval;
+}
+
+static void
+qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
+{
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
+
+ qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
+ iocb->free(sp);
+}
+
+int
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+ uint32_t tag)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ struct srb_ctx *ctx;
+ struct srb_iocb *tcf;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
+ ELS_TMO_2_RATOV(ha) + 2);
+ if (!sp)
+ goto done;
+
+ ctx = sp->ctx;
+ ctx->type = SRB_TM_CMD;
+ ctx->name = "tmf";
+ tcf = ctx->u.iocb_cmd;
+ tcf->u.tmf.flags = flags;
+ tcf->u.tmf.lun = lun;
+ tcf->u.tmf.data = tag;
+ tcf->timeout = qla2x00_async_iocb_timeout;
+ tcf->done = qla2x00_async_tm_cmd_ctx_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ DEBUG2(printk(KERN_DEBUG
+ "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n",
+ fcport->vha->host_no, sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
+
+ return rval;
+
+done_free_sp:
+ tcf->free(sp);
+done:
+ return rval;
+}
+
+static void
+qla2x00_async_marker_ctx_done(srb_t *sp)
+{
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
+
+ qla2x00_async_marker_done(sp->fcport->vha, sp->fcport, iocb);
+ iocb->free(sp);
+}
+
+int
+qla2x00_async_marker(fc_port_t *fcport, uint16_t lun, uint8_t modif)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ srb_t *sp;
+ struct srb_ctx *ctx;
+ struct srb_iocb *mrk;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 0);
+ if (!sp)
+ goto done;
+
+ ctx = sp->ctx;
+ ctx->type = SRB_MARKER_CMD;
+ ctx->name = "marker";
+ mrk = ctx->u.iocb_cmd;
+ mrk->u.marker.lun = lun;
+ mrk->u.marker.modif = modif;
+ mrk->timeout = qla2x00_async_iocb_timeout;
+ mrk->done = qla2x00_async_marker_ctx_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ DEBUG2(printk(KERN_DEBUG
+ "scsi(%ld:%x): Async-marker - loop-id=%x "
+ "portid=%02x%02x%02x.\n",
+ fcport->vha->host_no, sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ return rval;
+
+done_free_sp:
+ mrk->free(sp);
+done:
+ return rval;
+}
+
+void
qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
int rval;
- uint8_t opts = 0;
switch (data[0]) {
case MBS_COMMAND_COMPLETE:
- if (fcport->flags & FCF_FCP2_DEVICE)
- opts |= BIT_1;
- rval = qla2x00_get_port_database(vha, fcport, opts);
- if (rval != QLA_SUCCESS)
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
- else
- qla2x00_update_fcport(vha, fcport);
+ if (fcport->flags & FCF_FCP2_DEVICE) {
+ fcport->flags |= FCF_ASYNC_SENT;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
+ break;
+ }
+ qla2x00_update_fcport(vha, fcport);
break;
case MBS_COMMAND_ERROR:
+ fcport->flags &= ~FCF_ASYNC_SENT;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
else
@@ -228,21 +428,84 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->loop_id++;
rval = qla2x00_find_new_loop_id(vha, fcport);
if (rval != QLA_SUCCESS) {
+ fcport->flags &= ~FCF_ASYNC_SENT;
qla2x00_mark_device_lost(vha, fcport, 1, 0);
break;
}
qla2x00_post_async_login_work(vha, fcport, NULL);
break;
}
- return QLA_SUCCESS;
+ return;
}
-int
+void
qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
qla2x00_mark_device_lost(vha, fcport, 1, 0);
- return QLA_SUCCESS;
+ return;
+}
+
+void
+qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ if (data[0] == MBS_COMMAND_COMPLETE) {
+ qla2x00_update_fcport(vha, fcport);
+
+ return;
+ }
+
+ /* Retry login. */
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ else
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+
+ return;
+}
+
+void
+qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct srb_iocb *iocb)
+{
+ int rval;
+ uint32_t flags;
+ uint16_t lun;
+
+ flags = iocb->u.tmf.flags;
+ lun = (uint16_t)iocb->u.tmf.lun;
+
+ /* Issue Marker IOCB */
+ rval = qla2x00_async_marker(fcport, lun,
+ flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+
+ if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
+ DEBUG2_3_11(printk(KERN_WARNING
+ "%s(%ld): TM IOCB failed (%x).\n",
+ __func__, vha->host_no, rval));
+ }
+
+ return;
+}
+
+void
+qla2x00_async_marker_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct srb_iocb *iocb)
+{
+ /*
+ * Currently we dont have any specific post response processing
+ * for this IOCB. We'll just return success or failed
+ * depending on whether the IOCB command succeeded or failed.
+ */
+ if (iocb->u.tmf.data) {
+ DEBUG2_3_11(printk(KERN_WARNING
+ "%s(%ld): Marker IOCB failed (%x).\n",
+ __func__, vha->host_no, iocb->u.tmf.data));
+ }
+
+ return;
}
/****************************************************************************/
@@ -328,6 +591,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (rval)
return (rval);
}
+
if (IS_QLA84XX(ha)) {
ha->cs84xx = qla84xx_get_chip(vha);
if (!ha->cs84xx) {
@@ -340,7 +604,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ha->flags.chip_reset_done = 1;
if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
- /* Issue verify 84xx FW IOCB to complete 84xx initialization */
+ /* Issue verify 84xx FW IOCB to complete 84xx initialization */
rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
@@ -349,6 +613,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
}
}
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) {
+ if (qla24xx_read_fcp_prio_cfg(vha))
+ qla_printk(KERN_ERR, ha,
+ "Unable to read FCP priority data.\n");
+ }
+
return (rval);
}
@@ -955,6 +1225,9 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
+ if (IS_QLA82XX(ha))
+ return QLA_SUCCESS;
+
ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
rval = qla2x00_mbx_reg_test(vha);
@@ -1177,6 +1450,12 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
unsigned long flags;
uint16_t fw_major_version;
+ if (IS_QLA82XX(ha)) {
+ rval = ha->isp_ops->load_risc(vha, &srisc_address);
+ if (rval == QLA_SUCCESS)
+ goto enable_82xx_npiv;
+ }
+
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Disable SRAM, Instruction RAM and GP RAM parity. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1202,6 +1481,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
if (rval == QLA_SUCCESS) {
+enable_82xx_npiv:
fw_major_version = ha->fw_major_version;
rval = qla2x00_get_fw_version(vha,
&ha->fw_major_version,
@@ -1226,8 +1506,10 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
&ha->fw_xcb_count, NULL, NULL,
&ha->max_npiv_vports, NULL);
- if (!fw_major_version && ql2xallocfwdump)
- qla2x00_alloc_fw_dump(vha);
+ if (!fw_major_version && ql2xallocfwdump) {
+ if (!IS_QLA82XX(ha))
+ qla2x00_alloc_fw_dump(vha);
+ }
}
} else {
DEBUG2(printk(KERN_INFO
@@ -1384,6 +1666,9 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLA82XX(ha))
+ return;
+
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
return;
@@ -1818,7 +2103,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
return(rval);
}
-static inline void
+inline void
qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
char *def)
{
@@ -1826,7 +2111,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
uint16_t index;
struct qla_hw_data *ha = vha->hw;
int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
- !IS_QLA81XX(ha);
+ !IS_QLA8XXX_TYPE(ha);
if (memcmp(model, BINZERO, len) != 0) {
strncpy(ha->model_number, model, len);
@@ -2017,6 +2302,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (IS_QLA23XX(ha)) {
nv->firmware_options[0] |= BIT_2;
nv->firmware_options[0] &= ~BIT_3;
+ nv->firmware_options[0] &= ~BIT_6;
nv->add_firmware_options[1] |= BIT_5 | BIT_4;
if (IS_QLA2300(ha)) {
@@ -2635,7 +2921,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
PORT_RETRY_TIME;
atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
PORT_RETRY_TIME);
- fcport->flags &= ~FCF_LOGIN_NEEDED;
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
qla2x00_iidma_fcport(vha, fcport);
@@ -2864,7 +3150,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
sw_info_t *swl;
int swl_idx;
int first_dev, last_dev;
- port_id_t wrap, nxt_d_id;
+ port_id_t wrap = {}, nxt_d_id;
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
struct scsi_qla_host *tvp;
@@ -3167,7 +3453,7 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
uint32_t rscn_entry;
uint8_t rscn_out_iter;
uint8_t format;
- port_id_t d_id;
+ port_id_t d_id = {};
rval = QLA_RSCNS_HANDLED;
@@ -3281,11 +3567,15 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
retry = 0;
if (IS_ALOGIO_CAPABLE(ha)) {
+ if (fcport->flags & FCF_ASYNC_SENT)
+ return rval;
+ fcport->flags |= FCF_ASYNC_SENT;
rval = qla2x00_post_async_login_work(vha, fcport, NULL);
if (!rval)
return rval;
}
+ fcport->flags &= ~FCF_ASYNC_SENT;
rval = qla2x00_fabric_login(vha, fcport, next_loopid);
if (rval == QLA_SUCCESS) {
/* Send an ADISC to FCP2 devices.*/
@@ -3546,6 +3836,45 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
qla2x00_rport_del(fcport);
}
+void
+qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
+ struct scsi_qla_host *tvp;
+
+ vha->flags.online = 0;
+ ha->flags.chip_reset_done = 0;
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ ha->qla_stats.total_isp_aborts++;
+
+ qla_printk(KERN_INFO, ha,
+ "Performing ISP error recovery - ha= %p.\n", ha);
+
+ /* Chip reset does not apply to 82XX */
+ if (!IS_QLA82XX(ha))
+ ha->isp_ops->reset_chip(vha);
+
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
+ qla2x00_mark_all_devices_lost(vp, 0);
+ } else {
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
+ }
+
+ /* Make sure for ISP 82XX IO DMA is complete */
+ if (IS_QLA82XX(ha))
+ qla82xx_wait_for_pending_commands(vha);
+
+ /* Requeue all commands in outstanding command list. */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+}
+
/*
* qla2x00_abort_isp
* Resets ISP and aborts all outstanding commands.
@@ -3567,27 +3896,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
struct req_que *req = ha->req_q_map[0];
if (vha->flags.online) {
- vha->flags.online = 0;
- ha->flags.chip_reset_done = 0;
- clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- ha->qla_stats.total_isp_aborts++;
-
- qla_printk(KERN_INFO, ha,
- "Performing ISP error recovery - ha= %p.\n", ha);
- ha->isp_ops->reset_chip(vha);
-
- atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
- atomic_set(&vha->loop_state, LOOP_DOWN);
- qla2x00_mark_all_devices_lost(vha, 0);
- } else {
- if (!atomic_read(&vha->loop_down_timer))
- atomic_set(&vha->loop_down_timer,
- LOOP_DOWN_TIME);
- }
-
- /* Requeue all commands in outstanding command list. */
- qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ qla2x00_abort_isp_cleanup(vha);
if (unlikely(pci_channel_offline(ha->pdev) &&
ha->flags.pci_channel_io_perm_failure)) {
@@ -3843,6 +4152,9 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ if (IS_QLA82XX(ha))
+ return;
+
vha->flags.online = 0;
ha->isp_ops->disable_intrs(ha);
@@ -3906,6 +4218,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
ha->nvram_size = sizeof(struct nvram_24xx);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
+ if (IS_QLA82XX(ha))
+ ha->vpd_size = FA_VPD_SIZE_82XX;
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
@@ -4769,7 +5083,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
* Setup driver NVRAM options.
*/
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
- "QLE81XX");
+ "QLE8XXX");
/* Use alternate WWN? */
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
@@ -4892,6 +5206,114 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
return (rval);
}
+int
+qla82xx_restart_isp(scsi_qla_host_t *vha)
+{
+ int status, rval;
+ uint32_t wait_time;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct scsi_qla_host *vp;
+ struct scsi_qla_host *tvp;
+
+ status = qla2x00_init_rings(vha);
+ if (!status) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ ha->flags.chip_reset_done = 1;
+
+ status = qla2x00_fw_ready(vha);
+ if (!status) {
+ qla_printk(KERN_INFO, ha,
+ "%s(): Start configure loop, "
+ "status = %d\n", __func__, status);
+
+ /* Issue a marker after FW becomes ready. */
+ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+
+ vha->flags.online = 1;
+ /* Wait at most MAX_TARGET RSCNs for a stable link. */
+ wait_time = 256;
+ do {
+ clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2x00_configure_loop(vha);
+ wait_time--;
+ } while (!atomic_read(&vha->loop_down_timer) &&
+ !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
+ wait_time &&
+ (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
+ }
+
+ /* if no cable then assume it's good */
+ if ((vha->device_flags & DFLG_NO_CABLE))
+ status = 0;
+
+ qla_printk(KERN_INFO, ha,
+ "%s(): Configure loop done, status = 0x%x\n",
+ __func__, status);
+ }
+
+ if (!status) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+ if (!atomic_read(&vha->loop_down_timer)) {
+ /*
+ * Issue marker command only when we are going
+ * to start the I/O .
+ */
+ vha->marker_needed = 1;
+ }
+
+ vha->flags.online = 1;
+
+ ha->isp_ops->enable_intrs(ha);
+
+ ha->isp_abort_cnt = 0;
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+
+ if (ha->fce) {
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0,
+ fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(vha,
+ ha->fce_dma, ha->fce_bufs, ha->fce_mb,
+ &ha->fce_bufs);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize FCE "
+ "(%d).\n", rval);
+ ha->flags.fce_enabled = 0;
+ }
+ }
+
+ if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(vha,
+ ha->eft_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize EFT "
+ "(%d).\n", rval);
+ }
+ }
+ }
+
+ if (!status) {
+ DEBUG(printk(KERN_INFO
+ "qla82xx_restart_isp(%ld): succeeded.\n",
+ vha->host_no));
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
+ if (vp->vp_idx)
+ qla2x00_vp_abort_isp(vp);
+ }
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "qla82xx_restart_isp: **** FAILED ****\n");
+ }
+
+ return status;
+}
+
void
qla81xx_update_fw_options(scsi_qla_host_t *vha)
{
@@ -4905,3 +5327,165 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] |= BIT_9;
qla2x00_set_fw_options(vha, ha->fw_options);
}
+
+/*
+ * qla24xx_get_fcp_prio
+ * Gets the fcp cmd priority value for the logged in port.
+ * Looks for a match of the port descriptors within
+ * each of the fcp prio config entries. If a match is found,
+ * the tag (priority) value is returned.
+ *
+ * Input:
+ * ha = adapter block po
+ * fcport = port structure pointer.
+ *
+ * Return:
+ * non-zero (if found)
+ * 0 (if not found)
+ *
+ * Context:
+ * Kernel context
+ */
+uint8_t
+qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int i, entries;
+ uint8_t pid_match, wwn_match;
+ uint8_t priority;
+ uint32_t pid1, pid2;
+ uint64_t wwn1, wwn2;
+ struct qla_fcp_prio_entry *pri_entry;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
+ return 0;
+
+ priority = 0;
+ entries = ha->fcp_prio_cfg->num_entries;
+ pri_entry = &ha->fcp_prio_cfg->entry[0];
+
+ for (i = 0; i < entries; i++) {
+ pid_match = wwn_match = 0;
+
+ if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
+ pri_entry++;
+ continue;
+ }
+
+ /* check source pid for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
+ pid1 = pri_entry->src_pid & INVALID_PORT_ID;
+ pid2 = vha->d_id.b24 & INVALID_PORT_ID;
+ if (pid1 == INVALID_PORT_ID)
+ pid_match++;
+ else if (pid1 == pid2)
+ pid_match++;
+ }
+
+ /* check destination pid for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
+ pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
+ pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
+ if (pid1 == INVALID_PORT_ID)
+ pid_match++;
+ else if (pid1 == pid2)
+ pid_match++;
+ }
+
+ /* check source WWN for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
+ wwn1 = wwn_to_u64(vha->port_name);
+ wwn2 = wwn_to_u64(pri_entry->src_wwpn);
+ if (wwn2 == (uint64_t)-1)
+ wwn_match++;
+ else if (wwn1 == wwn2)
+ wwn_match++;
+ }
+
+ /* check destination WWN for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
+ wwn1 = wwn_to_u64(fcport->port_name);
+ wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
+ if (wwn2 == (uint64_t)-1)
+ wwn_match++;
+ else if (wwn1 == wwn2)
+ wwn_match++;
+ }
+
+ if (pid_match == 2 || wwn_match == 2) {
+ /* Found a matching entry */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
+ priority = pri_entry->tag;
+ break;
+ }
+
+ pri_entry++;
+ }
+
+ return priority;
+}
+
+/*
+ * qla24xx_update_fcport_fcp_prio
+ * Activates fcp priority for the logged in fc port
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * fcp = port structure pointer.
+ *
+ * Return:
+ * QLA_SUCCESS or QLA_FUNCTION_FAILED
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *ha, fc_port_t *fcport)
+{
+ int ret;
+ uint8_t priority;
+ uint16_t mb[5];
+
+ if (atomic_read(&fcport->state) == FCS_UNCONFIGURED ||
+ fcport->port_type != FCT_TARGET ||
+ fcport->loop_id == FC_NO_LOOP_ID)
+ return QLA_FUNCTION_FAILED;
+
+ priority = qla24xx_get_fcp_prio(ha, fcport);
+ ret = qla24xx_set_fcp_prio(ha, fcport->loop_id, priority, mb);
+ if (ret == QLA_SUCCESS)
+ fcport->fcp_prio = priority;
+ else
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld): Unable to activate fcp priority, "
+ " ret=0x%x\n", ha->host_no, ret));
+
+ return ret;
+}
+
+/*
+ * qla24xx_update_all_fcp_prio
+ * Activates fcp priority for all the logged in ports
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Return:
+ * QLA_SUCCESS or QLA_FUNCTION_FAILED
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
+{
+ int ret;
+ fc_port_t *fcport;
+
+ ret = QLA_FUNCTION_FAILED;
+ /* We need to set priority for all logged in ports */
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
+
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 5e0a709..84c2fea 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -37,7 +37,10 @@ qla2x00_poll(struct rsp_que *rsp)
unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
local_irq_save(flags);
- ha->isp_ops->intr_handler(0, rsp);
+ if (IS_QLA82XX(ha))
+ qla82xx_poll(0, rsp);
+ else
+ ha->isp_ops->intr_handler(0, rsp);
local_irq_restore(flags);
}
@@ -64,3 +67,19 @@ qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
}
+
+static inline void
+qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
+{
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &((struct crc_context *)sp->ctx)->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8299a98..8ef9453 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -145,7 +145,49 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
return (cont_pkt);
}
-/**
+static inline int
+qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
+{
+ uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
+
+ /* We only support T10 DIF right now */
+ if (guard != SHOST_DIX_GUARD_CRC) {
+ DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
+ return 0;
+ }
+
+ /* We always use DIFF Bundling for best performance */
+ *fw_prot_opts = 0;
+
+ /* Translate SCSI opcode to a protection opcode */
+ switch (scsi_get_prot_op(sp->cmd)) {
+ case SCSI_PROT_READ_STRIP:
+ *fw_prot_opts |= PO_MODE_DIF_REMOVE;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ *fw_prot_opts |= PO_MODE_DIF_INSERT;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ *fw_prot_opts |= PO_MODE_DIF_INSERT;
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ *fw_prot_opts |= PO_MODE_DIF_REMOVE;
+ break;
+ case SCSI_PROT_READ_PASS:
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
+ break;
+ default: /* Normal Request */
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
+ break;
+ }
+
+ return scsi_prot_sg_count(sp->cmd);
+}
+
+/*
* qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
* capable IOCB types.
*
@@ -506,7 +548,10 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
cnt = (uint16_t)
RD_REG_DWORD(&reg->isp25mq.req_q_out);
else {
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha))
+ cnt = (uint16_t)RD_REG_DWORD(
+ &reg->isp82.req_q_out);
+ else if (IS_FWI2_CAPABLE(ha))
cnt = (uint16_t)RD_REG_DWORD(
&reg->isp24.req_q_out);
else
@@ -579,11 +624,29 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
req->ring_ptr++;
/* Set chip new ring index. */
- if (ha->mqenable) {
+ if (IS_QLA82XX(ha)) {
+ uint32_t dbval = 0x04 | (ha->portnum << 5);
+
+ /* write, read and verify logic */
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD((unsigned long __iomem *)
+ ha->nxdb_wr_ptr, dbval);
+ wmb();
+ }
+ }
+ } else if (ha->mqenable) {
+ /* Set chip new ring index. */
WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
RD_REG_DWORD(&ioreg->hccr);
- }
- else {
+ } else {
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -604,7 +667,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
*
* Returns the number of IOCB entries needed to store @dsds.
*/
-static inline uint16_t
+inline uint16_t
qla24xx_calc_iocbs(uint16_t dsds)
{
uint16_t iocbs;
@@ -615,6 +678,8 @@ qla24xx_calc_iocbs(uint16_t dsds)
if ((dsds - 1) % 5)
iocbs++;
}
+ DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
+ __func__, iocbs));
return iocbs;
}
@@ -626,7 +691,7 @@ qla24xx_calc_iocbs(uint16_t dsds)
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
*/
-static inline void
+inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
uint16_t tot_dsds)
{
@@ -695,6 +760,453 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
}
}
+struct fw_dif_context {
+ uint32_t ref_tag;
+ uint16_t app_tag;
+ uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
+ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
+};
+
+/*
+ * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
+ *
+ */
+static inline void
+qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
+ unsigned int protcnt)
+{
+ struct sd_dif_tuple *spt;
+ unsigned char op = scsi_get_prot_op(cmd);
+
+ switch (scsi_get_prot_type(cmd)) {
+ /* For TYPE 0 protection: no checking */
+ case SCSI_PROT_DIF_TYPE0:
+ pkt->ref_tag_mask[0] = 0x00;
+ pkt->ref_tag_mask[1] = 0x00;
+ pkt->ref_tag_mask[2] = 0x00;
+ pkt->ref_tag_mask[3] = 0x00;
+ break;
+
+ /*
+ * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
+ * match LBA in CDB + N
+ */
+ case SCSI_PROT_DIF_TYPE2:
+ break;
+
+ /* For Type 3 protection: 16 bit GUARD only */
+ case SCSI_PROT_DIF_TYPE3:
+ pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
+ pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
+ 0x00;
+ break;
+
+ /*
+ * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
+ * 16 bit app tag.
+ */
+ case SCSI_PROT_DIF_TYPE1:
+ if (!ql2xenablehba_err_chk)
+ break;
+
+ if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
+ op == SCSI_PROT_WRITE_PASS)) {
+ spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
+ scsi_prot_sglist(cmd)[0].offset;
+ DEBUG18(printk(KERN_DEBUG
+ "%s(): LBA from user %p, lba = 0x%x\n",
+ __func__, spt, (int)spt->ref_tag));
+ pkt->ref_tag = swab32(spt->ref_tag);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
+ } else {
+ pkt->ref_tag = cpu_to_le32((uint32_t)
+ (0xffffffff & scsi_get_lba(cmd)));
+ pkt->app_tag = __constant_cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
+ }
+ /* enable ALL bytes of the ref tag */
+ pkt->ref_tag_mask[0] = 0xff;
+ pkt->ref_tag_mask[1] = 0xff;
+ pkt->ref_tag_mask[2] = 0xff;
+ pkt->ref_tag_mask[3] = 0xff;
+ break;
+ }
+
+ DEBUG18(printk(KERN_DEBUG
+ "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
+ " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
+ " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
+ (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
+}
+
+
+static int
+qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
+ uint16_t tot_dsds)
+{
+ void *next_dsd;
+ uint8_t avail_dsds = 0;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct scatterlist *sg;
+ uint32_t *cur_dsd = dsd;
+ int i;
+ uint16_t used_dsds = tot_dsds;
+
+ uint8_t *cp;
+
+ scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ return 1;
+
+ /* allocate new list */
+ dsd_ptr->dsd_addr = next_dsd =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+
+ if (!next_dsd) {
+ /*
+ * Need to cleanup only this dsd_ptr, rest
+ * will be done by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ return 1;
+ }
+
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)sp->ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = (uint32_t *)next_dsd;
+ }
+ sle_dma = sg_dma_address(sg);
+ DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
+ " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
+ MSD(sle_dma), sg_dma_len(sg)));
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+
+ if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ cp = page_address(sg_page(sg)) + sg->offset;
+ DEBUG18(printk("%s(): User Data buffer= %p:\n",
+ __func__ , cp));
+ }
+ }
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ return 0;
+}
+
+static int
+qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
+ uint32_t *dsd,
+ uint16_t tot_dsds)
+{
+ void *next_dsd;
+ uint8_t avail_dsds = 0;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct scatterlist *sg;
+ int i;
+ struct scsi_cmnd *cmd;
+ uint32_t *cur_dsd = dsd;
+ uint16_t used_dsds = tot_dsds;
+
+ uint8_t *cp;
+
+
+ cmd = sp->cmd;
+ scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ return 1;
+
+ /* allocate new list */
+ dsd_ptr->dsd_addr = next_dsd =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+
+ if (!next_dsd) {
+ /*
+ * Need to cleanup only this dsd_ptr, rest
+ * will be done by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ return 1;
+ }
+
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)sp->ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = (uint32_t *)next_dsd;
+ }
+ sle_dma = sg_dma_address(sg);
+ if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ DEBUG18(printk(KERN_DEBUG
+ "%s(): %p, sg entry %d - addr =0x%x"
+ "0x%x, len =%d\n", __func__ , cur_dsd, i,
+ LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
+ }
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+
+ if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ cp = page_address(sg_page(sg)) + sg->offset;
+ DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
+ __func__ , cp));
+ }
+ avail_dsds--;
+ }
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ return 0;
+}
+
+/**
+ * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
+ * Type 6 IOCB types.
+ *
+ * @sp: SRB command to process
+ * @cmd_pkt: Command type 3 IOCB
+ * @tot_dsds: Total number of segments to transfer
+ */
+static inline int
+qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
+ uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
+{
+ uint32_t *cur_dsd, *fcp_dl;
+ scsi_qla_host_t *vha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *cur_seg;
+ int sgc;
+ uint32_t total_bytes;
+ uint32_t data_bytes;
+ uint32_t dif_bytes;
+ uint8_t bundling = 1;
+ uint16_t blk_size;
+ uint8_t *clr_ptr;
+ struct crc_context *crc_ctx_pkt = NULL;
+ struct qla_hw_data *ha;
+ uint8_t additional_fcpcdb_len;
+ uint16_t fcp_cmnd_len;
+ struct fcp_cmnd *fcp_cmnd;
+ dma_addr_t crc_ctx_dma;
+
+ cmd = sp->cmd;
+
+ sgc = 0;
+ /* Update entry type to indicate Command Type CRC_2 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
+
+ /* No data transfer */
+ data_bytes = scsi_bufflen(cmd);
+ if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
+ DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
+ __func__, data_bytes));
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return QLA_SUCCESS;
+ }
+
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
+ DEBUG18(printk(KERN_DEBUG
+ "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__,
+ vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd)));
+
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_WRITE_DATA);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_READ_DATA);
+ }
+
+ tot_prot_dsds = scsi_prot_sg_count(cmd);
+ if (!tot_prot_dsds)
+ bundling = 0;
+
+ /* Allocate CRC context from global pool */
+ crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &crc_ctx_dma);
+
+ if (!crc_ctx_pkt)
+ goto crc_queuing_error;
+
+ /* Zero out CTX area. */
+ clr_ptr = (uint8_t *)crc_ctx_pkt;
+ memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
+
+ crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
+
+ sp->flags |= SRB_CRC_CTX_DMA_VALID;
+
+ /* Set handle */
+ crc_ctx_pkt->handle = cmd_pkt->handle;
+
+ INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
+
+ qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
+ &crc_ctx_pkt->ref_tag, tot_prot_dsds);
+
+ cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
+ cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+ cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+
+ /* Determine SCSI command length -- align to 4 byte boundary */
+ if (cmd->cmd_len > 16) {
+ DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
+ __func__));
+ additional_fcpcdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /* SCSI cmd > 16 bytes must be multiple of 4 */
+ goto crc_queuing_error;
+ }
+ fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_fcpcdb_len = 0;
+ fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
+
+ fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ fcp_cmnd->additional_cdb_len |= 2;
+
+ int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
+ memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
+ cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
+ LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+ cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
+ MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+ fcp_cmnd->task_attribute = 0;
+ fcp_cmnd->task_managment = 0;
+
+ cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
+
+ DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
+ "entries %d, data bytes %d, Protection entries %d\n",
+ __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
+ data_bytes, tot_prot_dsds));
+
+ /* Compute dif len and adjust data len to incude protection */
+ total_bytes = data_bytes;
+ dif_bytes = 0;
+ blk_size = cmd->device->sector_size;
+ if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE1) {
+ dif_bytes = (data_bytes / blk_size) * 8;
+ total_bytes += dif_bytes;
+ }
+
+ if (!ql2xenablehba_err_chk)
+ fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+
+ if (!bundling) {
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+ } else {
+ /*
+ * Configure Bundling if we need to fetch interlaving
+ * protection PCI accesses
+ */
+ fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
+ crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
+ crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
+ tot_prot_dsds);
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+ }
+
+ /* Finish the common fields of CRC pkt */
+ crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
+ crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
+ crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
+ crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+ /* Fibre channel byte count */
+ cmd_pkt->byte_count = cpu_to_le32(total_bytes);
+ fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
+ additional_fcpcdb_len);
+ *fcp_dl = htonl(total_bytes);
+
+ DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
+ " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
+ vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
+ crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
+
+ /* Walks data segments */
+
+ cmd_pkt->control_flags |=
+ __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
+ if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
+ (tot_dsds - tot_prot_dsds)))
+ goto crc_queuing_error;
+
+ if (bundling && tot_prot_dsds) {
+ /* Walks dif segments */
+ cur_seg = scsi_prot_sglist(cmd);
+ cmd_pkt->control_flags |=
+ __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+ if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
+ tot_prot_dsds))
+ goto crc_queuing_error;
+ }
+ return QLA_SUCCESS;
+
+crc_queuing_error:
+ DEBUG18(qla_printk(KERN_INFO, ha,
+ "CMD sent FAILED crc_q error:sp = %p\n", sp));
+ /* Cleanup will be performed by the caller */
+
+ return QLA_FUNCTION_FAILED;
+}
/**
* qla24xx_start_scsi() - Send a SCSI command to the ISP
@@ -848,6 +1360,191 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
+
+/**
+ * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla24xx_dif_start_scsi(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt = 0;
+ uint16_t tot_dsds;
+ uint16_t tot_prot_dsds;
+ uint16_t fw_prot_opts = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_crc_2 *cmd_pkt;
+ uint32_t status = 0;
+
+#define QDSS_GOT_Q_SPACE BIT_0
+
+ /* Only process protection in this routine */
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL)
+ return qla24xx_start_scsi(sp);
+
+ /* Setup device pointers. */
+
+ qla25xx_set_que(sp, &rsp);
+ req = vha->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ handle++;
+ if (handle == MAX_OUTSTANDING_COMMANDS)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == MAX_OUTSTANDING_COMMANDS)
+ goto queuing_error;
+
+ /* Compute number of required data segments */
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_DMA_VALID;
+ } else
+ nseg = 0;
+
+ /* number of required data segments */
+ tot_dsds = nseg;
+
+ /* Compute number of required protection segments */
+ if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_CRC_PROT_DMA_VALID;
+ } else {
+ nseg = 0;
+ }
+
+ req_cnt = 1;
+ /* Total Data and protection sg segment(s) */
+ tot_prot_dsds = nseg;
+ tot_dsds += nseg;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ status |= QDSS_GOT_Q_SPACE;
+
+ /* Build header part of command packet (excluding the OPCODE). */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ /* Fill-in common area */
+ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* Total Data and protection segment(s) */
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Build IOCB segments and adjust for data protection segments */
+ if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+ req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+ QLA_SUCCESS)
+ goto queuing_error;
+
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where completion should happen */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ cmd_pkt->timeout = __constant_cpu_to_le16(0);
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (status & QDSS_GOT_Q_SPACE) {
+ req->outstanding_cmds[handle] = NULL;
+ req->cnt += req_cnt;
+ }
+ /* Cleanup will be performed by the caller (queuecommand) */
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ DEBUG18(qla_printk(KERN_INFO, ha,
+ "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
+ return QLA_FUNCTION_FAILED;
+}
+
+
static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
{
struct scsi_cmnd *cmd = sp->cmd;
@@ -931,37 +1628,45 @@ qla2x00_start_iocbs(srb_t *sp)
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
-
- /* Set chip new ring index. */
- if (ha->mqenable) {
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
- } else if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_start_iocbs(sp);
} else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ if (ha->mqenable) {
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+ RD_REG_DWORD(&ioreg->hccr);
+ } else if (IS_QLA82XX(ha)) {
+ qla82xx_start_iocbs(sp);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ } else {
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ req->ring_index);
+ RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ }
}
}
static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
- struct srb_logio *lio = sp->ctx;
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *lio = ctx->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
- if (lio->flags & SRB_LOGIN_COND_PLOGI)
+ if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
- if (lio->flags & SRB_LOGIN_SKIP_PRLI)
+ if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -974,14 +1679,15 @@ static void
qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
- struct srb_logio *lio = sp->ctx;
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *lio = ctx->u.iocb_cmd;
uint16_t opts;
mbx->entry_type = MBX_IOCB_TYPE;;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
- opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0;
- opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0;
+ opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
+ opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
if (HAS_EXTENDED_IDS(ha)) {
mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
mbx->mb10 = cpu_to_le16(opts);
@@ -1026,9 +1732,97 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
}
static void
+qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->vp_index = sp->fcport->vp_idx;
+}
+
+static void
+qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
+{
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+
+ mbx->entry_type = MBX_IOCB_TYPE;
+ SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
+ mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
+ if (HAS_EXTENDED_IDS(ha)) {
+ mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
+ mbx->mb10 = cpu_to_le16(BIT_0);
+ } else {
+ mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
+ }
+ mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
+ mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
+ mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
+ mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
+ mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+}
+
+static void
+qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
+{
+ uint32_t flags;
+ unsigned int lun;
+ struct fc_port *fcport = sp->fcport;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ struct req_que *req = vha->req;
+
+ flags = iocb->u.tmf.flags;
+ lun = iocb->u.tmf.lun;
+
+ tsk->entry_type = TSK_MGMT_IOCB_TYPE;
+ tsk->entry_count = 1;
+ tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
+ tsk->nport_handle = cpu_to_le16(fcport->loop_id);
+ tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+ tsk->control_flags = cpu_to_le32(flags);
+ tsk->port_id[0] = fcport->d_id.b.al_pa;
+ tsk->port_id[1] = fcport->d_id.b.area;
+ tsk->port_id[2] = fcport->d_id.b.domain;
+ tsk->vp_index = fcport->vp_idx;
+
+ if (flags == TCF_LUN_RESET) {
+ int_to_scsilun(lun, &tsk->lun);
+ host_to_fcp_swap((uint8_t *)&tsk->lun,
+ sizeof(tsk->lun));
+ }
+}
+
+static void
+qla24xx_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
+{
+ uint16_t lun;
+ uint8_t modif;
+ struct fc_port *fcport = sp->fcport;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ struct req_que *req = vha->req;
+
+ lun = iocb->u.marker.lun;
+ modif = iocb->u.marker.modif;
+ mrk->entry_type = MARKER_TYPE;
+ mrk->modifier = modif;
+ if (modif != MK_SYNC_ALL) {
+ mrk->nport_handle = cpu_to_le16(fcport->loop_id);
+ mrk->lun[1] = LSB(lun);
+ mrk->lun[2] = MSB(lun);
+ host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
+ mrk->vp_index = vha->vp_idx;
+ mrk->handle = MAKE_HANDLE(req->id, mrk->handle);
+ }
+}
+
+static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
- struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
+ struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -1041,8 +1835,10 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
- els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
- bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
+ els_iocb->opcode =
+ (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
+ bsg_job->request->rqst_data.r_els.els_code :
+ bsg_job->request->rqst_data.h_els.command_code;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
@@ -1076,7 +1872,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
int index;
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
- struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
+ struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
int entry_count = 1;
@@ -1157,12 +1953,12 @@ qla2x00_start_sp(srb_t *sp)
switch (ctx->type) {
case SRB_LOGIN_CMD:
IS_FWI2_CAPABLE(ha) ?
- qla24xx_login_iocb(sp, pkt):
+ qla24xx_login_iocb(sp, pkt) :
qla2x00_login_iocb(sp, pkt);
break;
case SRB_LOGOUT_CMD:
IS_FWI2_CAPABLE(ha) ?
- qla24xx_logout_iocb(sp, pkt):
+ qla24xx_logout_iocb(sp, pkt) :
qla2x00_logout_iocb(sp, pkt);
break;
case SRB_ELS_CMD_RPT:
@@ -1172,6 +1968,17 @@ qla2x00_start_sp(srb_t *sp)
case SRB_CT_CMD:
qla24xx_ct_iocb(sp, pkt);
break;
+ case SRB_ADISC_CMD:
+ IS_FWI2_CAPABLE(ha) ?
+ qla24xx_adisc_iocb(sp, pkt) :
+ qla2x00_adisc_iocb(sp, pkt);
+ break;
+ case SRB_TM_CMD:
+ qla24xx_tm_iocb(sp, pkt);
+ break;
+ case SRB_MARKER_CMD:
+ qla24xx_marker_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index db539b0..be3d8be 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_eh.h>
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
@@ -326,7 +327,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
/* Setup to process RIO completion. */
handle_cnt = 0;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
goto skip_rio;
switch (mb[0]) {
case MBA_SCSI_COMPLETION:
@@ -544,7 +545,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
"%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
else
@@ -845,7 +846,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
qla2x00_sp_compl(ha, sp);
} else {
DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
- " handle(%d)\n", vha->host_no, req->id, index));
+ " handle(0x%x)\n", vha->host_no, req->id, index));
qla_printk(KERN_WARNING, ha,
"Invalid ISP SCSI completion handle\n");
@@ -895,36 +896,26 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "MBX-IOCB";
const char *type;
- struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport;
srb_t *sp;
- struct srb_logio *lio;
- uint16_t data[2];
+ struct srb_iocb *lio;
+ struct srb_ctx *ctx;
+ uint16_t *data;
+ uint16_t status;
sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
if (!sp)
return;
- type = NULL;
- lio = sp->ctx;
- switch (lio->ctx.type) {
- case SRB_LOGIN_CMD:
- type = "login";
- break;
- case SRB_LOGOUT_CMD:
- type = "logout";
- break;
- default:
- qla_printk(KERN_WARNING, ha,
- "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
- lio->ctx.type);
- return;
- }
-
- del_timer(&lio->ctx.timer);
+ ctx = sp->ctx;
+ lio = ctx->u.iocb_cmd;
+ type = ctx->name;
fcport = sp->fcport;
+ data = lio->u.logio.data;
- data[0] = data[1] = 0;
+ data[0] = MBS_COMMAND_ERROR;
+ data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+ QLA_LOGIO_LOGIN_RETRIED : 0;
if (mbx->entry_status) {
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s error entry - entry-status=%x "
@@ -935,23 +926,28 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->status_flags)));
DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
- data[0] = MBS_COMMAND_ERROR;
- data[1] = lio->flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED: 0;
- goto done_post_logio_done_work;
+ goto logio_done;
}
- if (!mbx->status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
+ status = le16_to_cpu(mbx->status);
+ if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
+ le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
+ status = 0;
+ if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
DEBUG2(printk(KERN_DEBUG
"scsi(%ld:%x): Async-%s complete - mbx1=%x.\n",
fcport->vha->host_no, sp->handle, type,
le16_to_cpu(mbx->mb1)));
data[0] = MBS_COMMAND_COMPLETE;
- if (lio->ctx.type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb1) & BIT_1)
- fcport->flags |= FCF_FCP2_DEVICE;
-
- goto done_post_logio_done_work;
+ if (ctx->type == SRB_LOGIN_CMD) {
+ fcport->port_type = FCT_TARGET;
+ if (le16_to_cpu(mbx->mb1) & BIT_0)
+ fcport->port_type = FCT_INITIATOR;
+ if (le16_to_cpu(mbx->mb1) & BIT_1)
+ fcport->flags |= FCF_FCP2_DEVICE;
+ }
+ goto logio_done;
}
data[0] = le16_to_cpu(mbx->mb0);
@@ -963,25 +959,19 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
default:
data[0] = MBS_COMMAND_ERROR;
- data[1] = lio->flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED: 0;
break;
}
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x "
"mb6=%x mb7=%x.\n",
- fcport->vha->host_no, sp->handle, type, le16_to_cpu(mbx->status),
+ fcport->vha->host_no, sp->handle, type, status,
le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
le16_to_cpu(mbx->mb7)));
-done_post_logio_done_work:
- lio->ctx.type == SRB_LOGIN_CMD ?
- qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
- qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
-
- lio->ctx.free(sp);
+logio_done:
+ lio->done(sp);
}
static void
@@ -992,7 +982,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
const char *type;
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- struct srb_bsg *sp_bsg;
+ struct srb_ctx *sp_bsg;
struct fc_bsg_job *bsg_job;
uint16_t comp_status;
uint32_t fw_status[3];
@@ -1001,11 +991,11 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
- sp_bsg = (struct srb_bsg*)sp->ctx;
- bsg_job = sp_bsg->bsg_job;
+ sp_bsg = sp->ctx;
+ bsg_job = sp_bsg->u.bsg_job;
type = NULL;
- switch (sp_bsg->ctx.type) {
+ switch (sp_bsg->type) {
case SRB_ELS_CMD_RPT:
case SRB_ELS_CMD_HST:
type = "els";
@@ -1016,7 +1006,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
default:
qla_printk(KERN_WARNING, ha,
"%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
- sp_bsg->ctx.type);
+ sp_bsg->type);
return;
}
@@ -1070,8 +1060,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
dma_unmap_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) ||
- (sp_bsg->ctx.type == SRB_CT_CMD))
+ if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
+ (sp_bsg->type == SRB_CT_CMD))
kfree(sp->fcport);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
@@ -1084,37 +1074,26 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "LOGIO-IOCB";
const char *type;
- struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport;
srb_t *sp;
- struct srb_logio *lio;
- uint16_t data[2];
+ struct srb_iocb *lio;
+ struct srb_ctx *ctx;
+ uint16_t *data;
uint32_t iop[2];
sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
if (!sp)
return;
- type = NULL;
- lio = sp->ctx;
- switch (lio->ctx.type) {
- case SRB_LOGIN_CMD:
- type = "login";
- break;
- case SRB_LOGOUT_CMD:
- type = "logout";
- break;
- default:
- qla_printk(KERN_WARNING, ha,
- "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
- lio->ctx.type);
- return;
- }
-
- del_timer(&lio->ctx.timer);
+ ctx = sp->ctx;
+ lio = ctx->u.iocb_cmd;
+ type = ctx->name;
fcport = sp->fcport;
+ data = lio->u.logio.data;
- data[0] = data[1] = 0;
+ data[0] = MBS_COMMAND_ERROR;
+ data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+ QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
@@ -1122,10 +1101,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
logio->entry_status));
DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
- data[0] = MBS_COMMAND_ERROR;
- data[1] = lio->flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED: 0;
- goto done_post_logio_done_work;
+ goto logio_done;
}
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
@@ -1135,8 +1111,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
le32_to_cpu(logio->io_parameter[0])));
data[0] = MBS_COMMAND_COMPLETE;
- if (lio->ctx.type == SRB_LOGOUT_CMD)
- goto done_post_logio_done_work;
+ if (ctx->type != SRB_LOGIN_CMD)
+ goto logio_done;
iop[0] = le32_to_cpu(logio->io_parameter[0]);
if (iop[0] & BIT_4) {
@@ -1151,7 +1127,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (logio->io_parameter[9] || logio->io_parameter[10])
fcport->supported_classes |= FC_COS_CLASS3;
- goto done_post_logio_done_work;
+ goto logio_done;
}
iop[0] = le32_to_cpu(logio->io_parameter[0]);
@@ -1172,8 +1148,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
/* Fall through. */
default:
data[0] = MBS_COMMAND_ERROR;
- data[1] = lio->flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED: 0;
break;
}
@@ -1184,12 +1158,101 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
le32_to_cpu(logio->io_parameter[0]),
le32_to_cpu(logio->io_parameter[1])));
-done_post_logio_done_work:
- lio->ctx.type == SRB_LOGIN_CMD ?
- qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
- qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
+logio_done:
+ lio->done(sp);
+}
- lio->ctx.free(sp);
+static void
+qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct tsk_mgmt_entry *tsk)
+{
+ const char func[] = "TMF-IOCB";
+ const char *type;
+ fc_port_t *fcport;
+ srb_t *sp;
+ struct srb_iocb *iocb;
+ struct srb_ctx *ctx;
+ struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
+ int error = 1;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
+ if (!sp)
+ return;
+
+ ctx = sp->ctx;
+ iocb = ctx->u.iocb_cmd;
+ type = ctx->name;
+ fcport = sp->fcport;
+
+ if (sts->entry_status) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error - entry-status(%x).\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->entry_status));
+ } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error - completion status(%x).\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->comp_status));
+ } else if (!(le16_to_cpu(sts->scsi_status) &
+ SS_RESPONSE_INFO_LEN_VALID)) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error - no response info(%x).\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->scsi_status));
+ } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error - not enough response(%d).\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->rsp_data_len));
+ } else if (sts->data[3]) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error - response(%x).\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->data[3]));
+ } else {
+ error = 0;
+ }
+
+ if (error) {
+ iocb->u.tmf.data = error;
+ DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts)));
+ }
+
+ iocb->done(sp);
+}
+
+static void
+qla24xx_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct mrk_entry_24xx *mrk)
+{
+ const char func[] = "MRK-IOCB";
+ const char *type;
+ fc_port_t *fcport;
+ srb_t *sp;
+ struct srb_iocb *iocb;
+ struct srb_ctx *ctx;
+ struct sts_entry_24xx *sts = (struct sts_entry_24xx *)mrk;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, mrk);
+ if (!sp)
+ return;
+
+ ctx = sp->ctx;
+ iocb = ctx->u.iocb_cmd;
+ type = ctx->name;
+ fcport = sp->fcport;
+
+ if (sts->entry_status) {
+ iocb->u.marker.data = 1;
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->entry_status));
+ DEBUG2(qla2x00_dump_buffer((uint8_t *)mrk, sizeof(*sts)));
+ }
+
+ iocb->done(sp);
}
/**
@@ -1256,6 +1319,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
case MBX_IOCB_TYPE:
qla2x00_mbx_iocb_entry(vha, rsp->req,
(struct mbx_entry *)pkt);
+ break;
default:
/* Type Not Supported. */
DEBUG4(printk(KERN_WARNING
@@ -1301,6 +1365,78 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
}
+struct scsi_dif_tuple {
+ __be16 guard; /* Checksum */
+ __be16 app_tag; /* APPL identifer */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+/*
+ * Checks the guard or meta-data for the type of error
+ * detected by the HBA. In case of errors, we set the
+ * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
+ * to indicate to the kernel that the HBA detected error.
+ */
+static inline void
+qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
+{
+ struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_dif_tuple *ep =
+ (struct scsi_dif_tuple *)&sts24->data[20];
+ struct scsi_dif_tuple *ap =
+ (struct scsi_dif_tuple *)&sts24->data[12];
+ uint32_t e_ref_tag, a_ref_tag;
+ uint16_t e_app_tag, a_app_tag;
+ uint16_t e_guard, a_guard;
+
+ e_ref_tag = be32_to_cpu(ep->ref_tag);
+ a_ref_tag = be32_to_cpu(ap->ref_tag);
+ e_app_tag = be16_to_cpu(ep->app_tag);
+ a_app_tag = be16_to_cpu(ap->app_tag);
+ e_guard = be16_to_cpu(ep->guard);
+ a_guard = be16_to_cpu(ap->guard);
+
+ DEBUG18(printk(KERN_DEBUG
+ "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24));
+
+ DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
+ " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
+ " tag=0x%x, act guard=0x%x, exp guard=0x%x\n",
+ cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
+ a_app_tag, e_app_tag, a_guard, e_guard));
+
+
+ /* check guard */
+ if (e_guard != a_guard) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x1);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ set_host_byte(cmd, DID_ABORT);
+ cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ return;
+ }
+
+ /* check appl tag */
+ if (e_app_tag != a_app_tag) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x2);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ set_host_byte(cmd, DID_ABORT);
+ cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ return;
+ }
+
+ /* check ref tag */
+ if (e_ref_tag != a_ref_tag) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x3);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ set_host_byte(cmd, DID_ABORT);
+ cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ return;
+ }
+}
+
/**
* qla2x00_status_entry() - Process a Status IOCB entry.
* @ha: SCSI driver HA context
@@ -1316,6 +1452,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
struct sts_entry_24xx *sts24;
uint16_t comp_status;
uint16_t scsi_status;
+ uint16_t ox_id;
uint8_t lscsi_status;
int32_t resid;
uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
@@ -1324,6 +1461,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
uint32_t handle;
uint16_t que;
struct req_que *req;
+ int logit = 1;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -1337,6 +1475,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
handle = (uint32_t) LSW(sts->handle);
que = MSW(sts->handle);
req = ha->req_q_map[que];
+
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
qla2x00_process_completed_request(vha, req, handle);
@@ -1352,9 +1491,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sp = NULL;
if (sp == NULL) {
- DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
+ qla_printk(KERN_WARNING, ha,
+ "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
+ sts->handle);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -1362,10 +1501,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
cp = sp->cmd;
if (cp == NULL) {
- DEBUG2(printk("scsi(%ld): Command already returned back to OS "
- "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
qla_printk(KERN_WARNING, ha,
- "Command is NULL: already returned to OS (sp=%p)\n", sp);
+ "scsi(%ld): Command already returned (0x%x/%p).\n",
+ vha->host_no, sts->handle, sp);
return;
}
@@ -1374,6 +1512,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
fcport = sp->fcport;
+ ox_id = 0;
sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
if (IS_FWI2_CAPABLE(ha)) {
if (scsi_status & SS_SENSE_LEN_VALID)
@@ -1387,6 +1526,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
rsp_info = sts24->data;
sense_data = sts24->data;
host_to_fcp_swap(sts24->data, sizeof(sts24->data));
+ ox_id = le16_to_cpu(sts24->ox_id);
} else {
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le16_to_cpu(sts->req_sense_length);
@@ -1403,17 +1543,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (IS_FWI2_CAPABLE(ha))
sense_data += rsp_info_len;
if (rsp_info_len > 3 && rsp_info[3]) {
- DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
- "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
- "retrying command\n", vha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, rsp_info_len, rsp_info[0],
- rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
- rsp_info[5], rsp_info[6], rsp_info[7]));
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d): FCP I/O protocol failure "
+ "(0x%x/0x%x).\n", vha->host_no, cp->device->id,
+ cp->device->lun, rsp_info_len, rsp_info[3]));
cp->result = DID_BUS_BUSY << 16;
- qla2x00_sp_compl(ha, sp);
- return;
+ goto out;
}
}
@@ -1440,12 +1576,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Mid-layer underflow "
- "detected (%x of %x bytes)...returning "
- "error status.\n", vha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, resid,
- scsi_bufflen(cp));
+ "scsi(%ld:%d:%d): Mid-layer underflow "
+ "detected (0x%x of 0x%x bytes).\n",
+ vha->host_no, cp->device->id,
+ cp->device->lun, resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
@@ -1454,12 +1588,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp->result = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- DEBUG2(printk(KERN_INFO
- "scsi(%ld): QUEUE FULL status detected "
- "0x%x-0x%x.\n", vha->host_no, comp_status,
- scsi_status));
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
+ vha->host_no, cp->device->id, cp->device->lun));
break;
}
+ logit = 0;
if (lscsi_status != SS_CHECK_CONDITION)
break;
@@ -1471,23 +1605,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
break;
case CS_DATA_UNDERRUN:
- DEBUG2(printk(KERN_INFO
- "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. "
- "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n",
- vha->host_no, cp->device->id, cp->device->lun, comp_status,
- scsi_status, resid_len, fw_resid_len, cp->cmnd[0],
- cp->underflow));
-
/* Use F/W calculated residual length. */
resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- DEBUG2(printk(
- "scsi(%ld:%d:%d:%d) Dropped frame(s) "
- "detected (%x of %x bytes)...residual "
- "length mismatch...retrying command.\n",
- vha->host_no, cp->device->channel,
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) Dropped frame(s) detected "
+ "(0x%x of 0x%x bytes).\n", vha->host_no,
cp->device->id, cp->device->lun, resid,
scsi_bufflen(cp)));
@@ -1499,21 +1624,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Mid-layer underflow "
- "detected (%x of %x bytes)...returning "
- "error status.\n", vha->host_no,
- cp->device->channel, cp->device->id,
+ "scsi(%ld:%d:%d): Mid-layer underflow "
+ "detected (0x%x of 0x%x bytes).\n",
+ vha->host_no, cp->device->id,
cp->device->lun, resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
}
} else if (!lscsi_status) {
- DEBUG2(printk(
- "scsi(%ld:%d:%d:%d) Dropped frame(s) detected "
- "(%x of %x bytes)...firmware reported underrun..."
- "retrying command.\n", vha->host_no,
- cp->device->channel, cp->device->id,
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
+ "of 0x%x bytes).\n", vha->host_no, cp->device->id,
cp->device->lun, resid, scsi_bufflen(cp)));
cp->result = DID_ERROR << 16;
@@ -1521,6 +1643,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
cp->result = DID_OK << 16 | lscsi_status;
+ logit = 0;
/*
* Check to see if SCSI Status is non zero. If so report SCSI
@@ -1528,10 +1651,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
*/
if (lscsi_status != 0) {
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- DEBUG2(printk(KERN_INFO
- "scsi(%ld): QUEUE FULL status detected "
- "0x%x-0x%x.\n", vha->host_no, comp_status,
- scsi_status));
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
+ vha->host_no, cp->device->id,
+ cp->device->lun));
+ logit = 1;
break;
}
if (lscsi_status != SS_CHECK_CONDITION)
@@ -1545,109 +1669,60 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
break;
- case CS_DATA_OVERRUN:
- DEBUG2(printk(KERN_INFO
- "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
- vha->host_no, cp->device->id, cp->device->lun, comp_status,
- scsi_status));
- DEBUG2(printk(KERN_INFO
- "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
- cp->cmnd[4], cp->cmnd[5]));
- DEBUG2(printk(KERN_INFO
- "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
- "status!\n",
- cp->serial_number, scsi_bufflen(cp), resid_len));
-
- cp->result = DID_ERROR << 16;
- break;
-
case CS_PORT_LOGGED_OUT:
case CS_PORT_CONFIG_CHG:
case CS_PORT_BUSY:
case CS_INCOMPLETE:
case CS_PORT_UNAVAILABLE:
- /*
- * If the port is in Target Down state, return all IOs for this
- * Target with DID_NO_CONNECT ELSE Queue the IOs in the
- * retry_queue.
- */
- DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
- "pid=%ld, compl status=0x%x, port state=0x%x\n",
- vha->host_no, cp->device->id, cp->device->lun,
- cp->serial_number, comp_status,
- atomic_read(&fcport->state)));
-
+ case CS_TIMEOUT:
/*
* We are going to have the fc class block the rport
* while we try to recover so instruct the mid layer
* to requeue until the class decides how to handle this.
*/
cp->result = DID_TRANSPORT_DISRUPTED << 16;
+
+ if (comp_status == CS_TIMEOUT) {
+ if (IS_FWI2_CAPABLE(ha))
+ break;
+ else if ((le16_to_cpu(sts->status_flags) &
+ SF_LOGOUT_SENT) == 0)
+ break;
+ }
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n",
+ vha->host_no, cp->device->id, cp->device->lun,
+ atomic_read(&fcport->state)));
+
if (atomic_read(&fcport->state) == FCS_ONLINE)
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
break;
case CS_RESET:
- DEBUG2(printk(KERN_INFO
- "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
- vha->host_no, comp_status, scsi_status));
-
- cp->result = DID_RESET << 16;
- break;
-
case CS_ABORTED:
- /*
- * hv2.19.12 - DID_ABORT does not retry the request if we
- * aborted this request then abort otherwise it must be a
- * reset.
- */
- DEBUG2(printk(KERN_INFO
- "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
- vha->host_no, comp_status, scsi_status));
-
cp->result = DID_RESET << 16;
break;
- case CS_TIMEOUT:
- /*
- * We are going to have the fc class block the rport
- * while we try to recover so instruct the mid layer
- * to requeue until the class decides how to handle this.
- */
- cp->result = DID_TRANSPORT_DISRUPTED << 16;
-
- if (IS_FWI2_CAPABLE(ha)) {
- DEBUG2(printk(KERN_INFO
- "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
- "0x%x-0x%x\n", vha->host_no, cp->device->channel,
- cp->device->id, cp->device->lun, comp_status,
- scsi_status));
- break;
- }
- DEBUG2(printk(KERN_INFO
- "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
- "sflags=%x.\n", vha->host_no, cp->device->channel,
- cp->device->id, cp->device->lun, comp_status, scsi_status,
- le16_to_cpu(sts->status_flags)));
-
- /* Check to see if logout occurred. */
- if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ case CS_DIF_ERROR:
+ qla2x00_handle_dif_error(sp, sts24);
break;
-
default:
- DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
- "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
- qla_printk(KERN_INFO, ha,
- "Unknown status detected 0x%x-0x%x.\n",
- comp_status, scsi_status);
-
cp->result = DID_ERROR << 16;
break;
}
- /* Place command on done queue. */
+out:
+ if (logit)
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
+ "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x "
+ "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
+ cp->device->id, cp->device->lun, comp_status, scsi_status,
+ cp->result, ox_id, cp->serial_number, cp->cmnd[0],
+ cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
+ resid_len, fw_resid_len));
+
if (rsp->status_srb == NULL)
qla2x00_sp_compl(ha, sp);
}
@@ -1806,6 +1881,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct rsp_que *rsp)
{
struct sts_entry_24xx *pkt;
+ struct qla_hw_data *ha = vha->hw;
if (!vha->flags.online)
return;
@@ -1846,6 +1922,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla24xx_logio_entry(vha, rsp->req,
(struct logio_entry_24xx *)pkt);
break;
+ case TSK_MGMT_IOCB_TYPE:
+ qla24xx_tm_iocb_entry(vha, rsp->req,
+ (struct tsk_mgmt_entry *)pkt);
+ break;
+ case MARKER_TYPE:
+ qla24xx_marker_iocb_entry(vha, rsp->req,
+ (struct mrk_entry_24xx *)pkt);
+ break;
case CT_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
@@ -1866,7 +1950,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
/* Adjust ring index */
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ if (IS_QLA82XX(ha)) {
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
+ } else
+ WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
}
static void
@@ -2169,6 +2257,11 @@ static struct qla_init_msix_entry msix_entries[3] = {
{ "qla2xxx (multiq)", qla25xx_msix_rsp_q },
};
+static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
+ { "qla2xxx (default)", qla82xx_msix_default },
+ { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
+};
+
static void
qla24xx_disable_msix(struct qla_hw_data *ha)
{
@@ -2195,7 +2288,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
struct qla_msix_entry *qentry;
entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -2240,8 +2333,15 @@ msix_failed:
/* Enable MSI-X vectors for the base queue */
for (i = 0; i < 2; i++) {
qentry = &ha->msix_entries[i];
- ret = request_irq(qentry->vector, msix_entries[i].handler,
- 0, msix_entries[i].name, rsp);
+ if (IS_QLA82XX(ha)) {
+ ret = request_irq(qentry->vector,
+ qla82xx_msix_entries[i].handler,
+ 0, qla82xx_msix_entries[i].name, rsp);
+ } else {
+ ret = request_irq(qentry->vector,
+ msix_entries[i].handler,
+ 0, msix_entries[i].name, rsp);
+ }
if (ret) {
qla_printk(KERN_WARNING, ha,
"MSI-X: Unable to register handler -- %x/%d.\n",
@@ -2272,7 +2372,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
- !IS_QLA8432(ha) && !IS_QLA8001(ha))
+ !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
goto skip_msi;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2302,7 +2402,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
goto clear_risc_ints;
}
qla_printk(KERN_WARNING, ha,
- "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
+ "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
@@ -2313,7 +2413,9 @@ skip_msix:
if (!ret) {
DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
ha->flags.msi_enabled = 1;
- }
+ } else
+ qla_printk(KERN_WARNING, ha,
+ "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -2331,7 +2433,7 @@ clear_risc_ints:
* FIXME: Noted that 8014s were being dropped during NK testing.
* Timing deltas during MSI-X/INTa transitions?
*/
- if (IS_QLA81XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
goto fail;
spin_lock_irq(&ha->hardware_lock);
if (IS_FWI2_CAPABLE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 42eb7ff..f3650d0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -49,6 +49,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->pdev->error_state > pci_channel_io_frozen)
return QLA_FUNCTION_TIMEOUT;
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
+ "%s(%ld): Device in failed state, "
+ "timeout MBX Exiting.\n",
+ __func__, base_vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
reg = ha->iobase;
io_lock_on = base_vha->flags.init_done;
@@ -85,7 +93,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha))
+ optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
+ else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
else
optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -133,7 +143,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha)) {
+ if (RD_REG_DWORD(&reg->isp82.hint) &
+ HINT_MBX_INT_PENDING) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ DEBUG2_3_11(printk(KERN_INFO
+ "%s(%ld): Pending Mailbox timeout. "
+ "Exiting.\n", __func__, base_vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+ WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ } else if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -147,7 +168,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
base_vha->host_no, command));
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha)) {
+ if (RD_REG_DWORD(&reg->isp82.hint) &
+ HINT_MBX_INT_PENDING) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ DEBUG2_3_11(printk(KERN_INFO
+ "%s(%ld): Pending Mailbox timeout. "
+ "Exiting.\n", __func__, base_vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+ WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ } else if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -264,7 +296,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
/* Failed. retry later. */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
}
@@ -711,7 +743,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
* Context:
* Kernel context.
*/
-static int
+int
qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
dma_addr_t phys_addr, size_t size, uint32_t tov)
{
@@ -952,7 +984,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_0;
mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA8XXX_TYPE(vha->hw))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -978,7 +1010,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
vha->host_no));
- if (IS_QLA81XX(vha->hw)) {
+ if (IS_QLA8XXX_TYPE(vha->hw)) {
vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
vha->fcoe_fcf_idx = mcp->mb[10];
vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
@@ -1076,6 +1108,10 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
vha->host_no));
+ if (IS_QLA82XX(ha) && ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
+ (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
+
if (ha->flags.npiv_supported)
mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
else
@@ -1408,7 +1444,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
- if (IS_QLA81XX(vha->hw)) {
+ if (IS_QLA8XXX_TYPE(vha->hw)) {
/* Logout across all FCFs. */
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = BIT_1;
@@ -2428,12 +2464,22 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
int
qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
{
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
+ return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+
return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
}
int
qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
{
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
+ return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+
return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
}
@@ -2740,6 +2786,48 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
}
int
+qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+ uint16_t *port_speed, uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ mcp->mb[2] = mcp->mb[3] = 0;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Return mailbox statuses. */
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+ }
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
+ vha->host_no, rval));
+ } else {
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ if (port_speed)
+ *port_speed = mcp->mb[3];
+ }
+
+ return rval;
+}
+
+int
qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
uint16_t port_speed, uint16_t *mb)
{
@@ -2755,7 +2843,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA8XXX_TYPE(vha->hw))
mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
else
mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
@@ -3544,7 +3632,7 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3582,7 +3670,7 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3643,7 +3731,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
}
int
-qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+ uint16_t *mresp)
{
int rval;
mbx_cmd_t mc;
@@ -3678,7 +3767,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA8XXX_TYPE(vha->hw))
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3690,9 +3779,11 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
if (rval != QLA_SUCCESS) {
DEBUG2(printk(KERN_WARNING
- "(%ld): failed=%x mb[0]=0x%x "
- "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]));
+ "(%ld): failed=%x mb[0]=0x%x "
+ "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x "
+ "mb[19]=0x%x.\n",
+ vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
+ mcp->mb[3], mcp->mb[18], mcp->mb[19]));
} else {
DEBUG2(printk(KERN_WARNING
"scsi(%ld): done.\n", vha->host_no));
@@ -3706,7 +3797,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
}
int
-qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+ uint16_t *mresp)
{
int rval;
mbx_cmd_t mc;
@@ -3718,9 +3810,10 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha)) {
mcp->mb[1] |= BIT_15;
- mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0;
+ mcp->mb[2] = vha->fcoe_fcf_idx;
+ }
mcp->mb[16] = LSW(mreq->rcv_dma);
mcp->mb[17] = MSW(mreq->rcv_dma);
mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
@@ -3735,13 +3828,13 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
mcp->in_mb |= MBX_1;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
@@ -3764,8 +3857,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
return rval;
}
int
-qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
- uint16_t *cmd_status)
+qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
{
int rval;
mbx_cmd_t mc;
@@ -3782,8 +3874,6 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
rval = qla2x00_mailbox_command(ha, mcp);
- /* Return mailbox statuses. */
- *cmd_status = mcp->mb[0];
if (rval != QLA_SUCCESS)
DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
rval));
@@ -3801,7 +3891,7 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mbx_cmd_t *mcp = &mc;
if (!IS_FWI2_CAPABLE(vha->hw))
- return QLA_FUNCTION_FAILED;
+ return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3836,7 +3926,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_DATA_RATE;
mcp->mb[1] = 0;
@@ -3857,3 +3948,122 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
return rval;
}
+
+int
+qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
+ uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk(KERN_INFO
+ "%s(%ld): entered.\n", __func__, ha->host_no));
+
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ if (ha->flags.fcp_prio_enabled)
+ mcp->mb[2] = BIT_1;
+ else
+ mcp->mb[2] = BIT_2;
+ mcp->mb[4] = priority & 0xf;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+ mb[4] = mcp->mb[4];
+ }
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk(KERN_WARNING
+ "%s(%ld): failed=%x.\n", __func__,
+ vha->host_no, rval));
+ } else {
+ DEBUG11(printk(KERN_INFO
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
+
+int
+qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): entered.\n", __func__, vha->host_no));
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_TOGGLE_INTR;
+ mcp->mb[1] = 1;
+
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
+ "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
+ vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
+
+int
+qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): entered.\n", __func__, vha->host_no));
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_TOGGLE_INTR;
+ mcp->mb[1] = 0;
+
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
+ "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
+ vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
new file mode 100644
index 0000000..ff562de
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -0,0 +1,3636 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
+ ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
+ ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+#define QLA82XX_PCI_MN_2M (0)
+#define QLA82XX_PCI_MS_2M (0x80000)
+#define QLA82XX_PCI_OCM0_2M (0xc0000)
+#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+/* CRB window related */
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
+#define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
+ ((off) & 0xf0000))
+#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+#define MAX_CRB_XFORM 60
+static unsigned long crb_addr_xform[MAX_CRB_XFORM];
+int qla82xx_crb_table_initialized;
+
+#define qla82xx_crb_addr_transform(name) \
+ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
+ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+static void qla82xx_crb_addr_transform_setup(void)
+{
+ qla82xx_crb_addr_transform(XDMA);
+ qla82xx_crb_addr_transform(TIMR);
+ qla82xx_crb_addr_transform(SRE);
+ qla82xx_crb_addr_transform(SQN3);
+ qla82xx_crb_addr_transform(SQN2);
+ qla82xx_crb_addr_transform(SQN1);
+ qla82xx_crb_addr_transform(SQN0);
+ qla82xx_crb_addr_transform(SQS3);
+ qla82xx_crb_addr_transform(SQS2);
+ qla82xx_crb_addr_transform(SQS1);
+ qla82xx_crb_addr_transform(SQS0);
+ qla82xx_crb_addr_transform(RPMX7);
+ qla82xx_crb_addr_transform(RPMX6);
+ qla82xx_crb_addr_transform(RPMX5);
+ qla82xx_crb_addr_transform(RPMX4);
+ qla82xx_crb_addr_transform(RPMX3);
+ qla82xx_crb_addr_transform(RPMX2);
+ qla82xx_crb_addr_transform(RPMX1);
+ qla82xx_crb_addr_transform(RPMX0);
+ qla82xx_crb_addr_transform(ROMUSB);
+ qla82xx_crb_addr_transform(SN);
+ qla82xx_crb_addr_transform(QMN);
+ qla82xx_crb_addr_transform(QMS);
+ qla82xx_crb_addr_transform(PGNI);
+ qla82xx_crb_addr_transform(PGND);
+ qla82xx_crb_addr_transform(PGN3);
+ qla82xx_crb_addr_transform(PGN2);
+ qla82xx_crb_addr_transform(PGN1);
+ qla82xx_crb_addr_transform(PGN0);
+ qla82xx_crb_addr_transform(PGSI);
+ qla82xx_crb_addr_transform(PGSD);
+ qla82xx_crb_addr_transform(PGS3);
+ qla82xx_crb_addr_transform(PGS2);
+ qla82xx_crb_addr_transform(PGS1);
+ qla82xx_crb_addr_transform(PGS0);
+ qla82xx_crb_addr_transform(PS);
+ qla82xx_crb_addr_transform(PH);
+ qla82xx_crb_addr_transform(NIU);
+ qla82xx_crb_addr_transform(I2Q);
+ qla82xx_crb_addr_transform(EG);
+ qla82xx_crb_addr_transform(MN);
+ qla82xx_crb_addr_transform(MS);
+ qla82xx_crb_addr_transform(CAS2);
+ qla82xx_crb_addr_transform(CAS1);
+ qla82xx_crb_addr_transform(CAS0);
+ qla82xx_crb_addr_transform(CAM);
+ qla82xx_crb_addr_transform(C2C1);
+ qla82xx_crb_addr_transform(C2C0);
+ qla82xx_crb_addr_transform(SMB);
+ qla82xx_crb_addr_transform(OCM0);
+ /*
+ * Used only in P3 just define it for P2 also.
+ */
+ qla82xx_crb_addr_transform(I2C0);
+
+ qla82xx_crb_table_initialized = 1;
+}
+
+struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x0100000, 0x0102000, 0x120000},
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } } ,
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
+ {{{1, 0x0800000, 0x0802000, 0x170000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
+ {{{0} } },
+ {{{1, 0x2100000, 0x2102000, 0x120000},
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
+ {{{0} } },
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
+ {{{0} } },
+ {{{0} } },
+ {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+unsigned qla82xx_crb_hub_agt[64] = {
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* Device states */
+char *qdev_state[] = {
+ "Unknown",
+ "Cold",
+ "Initializing",
+ "Ready",
+ "Need Reset",
+ "Need Quiescent",
+ "Failed",
+ "Quiescent",
+};
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
+{
+ u32 win_read;
+
+ ha->crb_win = CRB_HI(*off);
+ writel(ha->crb_win,
+ (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+ /* Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != ha->crb_win) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
+ "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
+ }
+ *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
+}
+
+static inline unsigned long
+qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
+{
+ /* See if we are currently pointing to the region we want to use next */
+ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
+ /* No need to change window. PCIX and PCIEregs are in both
+ * regs are in both windows.
+ */
+ return off;
+ }
+
+ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
+ /* We are in first CRB window */
+ if (ha->curr_window != 0)
+ WARN_ON(1);
+ return off;
+ }
+
+ if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
+ /* We are in second CRB window */
+ off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
+
+ if (ha->curr_window != 1)
+ return off;
+
+ /* We are in the QM or direct access
+ * register region - do nothing
+ */
+ if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
+ (off < QLA82XX_PCI_CAMQM_MAX))
+ return off;
+ }
+ /* strange address given */
+ qla_printk(KERN_WARNING, ha,
+ "%s: Warning: unm_nic_pci_set_crbwindow called with"
+ " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
+ return off;
+}
+
+int
+qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
+{
+ unsigned long flags = 0;
+ int rv;
+
+ rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+
+ BUG_ON(rv == -1);
+
+ if (rv == 1) {
+ write_lock_irqsave(&ha->hw_lock, flags);
+ qla82xx_crb_win_lock(ha);
+ qla82xx_pci_set_crbwindow_2M(ha, &off);
+ }
+
+ writel(data, (void __iomem *)off);
+
+ if (rv == 1) {
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ }
+ return 0;
+}
+
+int
+qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
+{
+ unsigned long flags = 0;
+ int rv;
+ u32 data;
+
+ rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+
+ BUG_ON(rv == -1);
+
+ if (rv == 1) {
+ write_lock_irqsave(&ha->hw_lock, flags);
+ qla82xx_crb_win_lock(ha);
+ qla82xx_pci_set_crbwindow_2M(ha, &off);
+ }
+ data = RD_REG_DWORD((void __iomem *)off);
+
+ if (rv == 1) {
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ }
+ return data;
+}
+
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+int qla82xx_crb_win_lock(struct qla_hw_data *ha)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore3 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= CRB_WIN_LOCK_TIMEOUT)
+ return -1;
+ timeout++;
+ }
+ qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
+ return 0;
+}
+
+#define IDC_LOCK_TIMEOUT 100000000
+int qla82xx_idc_lock(struct qla_hw_data *ha)
+{
+ int i;
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore5 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= IDC_LOCK_TIMEOUT)
+ return -1;
+
+ timeout++;
+
+ /* Yield CPU */
+ if (!in_interrupt())
+ schedule();
+ else {
+ for (i = 0; i < 20; i++)
+ cpu_relax();
+ }
+ }
+
+ return 0;
+}
+
+void qla82xx_idc_unlock(struct qla_hw_data *ha)
+{
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
+}
+
+int
+qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
+{
+ struct crb_128M_2M_sub_block_map *m;
+
+ if (*off >= QLA82XX_CRB_MAX)
+ return -1;
+
+ if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
+ *off = (*off - QLA82XX_PCI_CAMQM) +
+ QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
+ return 0;
+ }
+
+ if (*off < QLA82XX_PCI_CRBSPACE)
+ return -1;
+
+ *off -= QLA82XX_PCI_CRBSPACE;
+
+ /* Try direct map */
+ m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+
+ if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
+ *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
+ return 0;
+ }
+ /* Not in direct map, use crb window */
+ return 1;
+}
+
+/* PCI Windowing for DDR regions. */
+#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
+ (((addr) <= (high)) && ((addr) >= (low)))
+/*
+ * check memory access boundary.
+ * used by test agent. support ddr access only for now
+ */
+static unsigned long
+qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
+ unsigned long long addr, int size)
+{
+ if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX) ||
+ !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX) ||
+ ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
+ return 0;
+ else
+ return 1;
+}
+
+int qla82xx_pci_set_window_warning_count;
+
+unsigned long
+qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
+{
+ int window;
+ u32 win_read;
+
+ if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX)) {
+ /* DDR network side */
+ window = MN_WIN(addr);
+ ha->ddr_mn_window = window;
+ qla82xx_wr_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
+ if ((win_read << 17) != window) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
+ } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+ QLA82XX_ADDR_OCM0_MAX)) {
+ unsigned int temp1;
+ if ((addr & 0x00ff800) == 0xff800) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: QM access not handled.\n", __func__);
+ addr = -1UL;
+ }
+ window = OCM_WIN(addr);
+ ha->ddr_mn_window = window;
+ qla82xx_wr_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
+ temp1 = ((window & 0x1FF) << 7) |
+ ((window & 0x0FFFE0000) >> 17);
+ if (win_read != temp1) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
+ __func__, temp1, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
+
+ } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
+ QLA82XX_P3_ADDR_QDR_NET_MAX)) {
+ /* QDR network side */
+ window = MS_WIN(addr);
+ ha->qdr_sn_window = window;
+ qla82xx_wr_32(ha,
+ ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
+ if (win_read != window) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
+ } else {
+ /*
+ * peg gdb frequently accesses memory that doesn't exist,
+ * this limits the chit chat so debugging isn't slowed down.
+ */
+ if ((qla82xx_pci_set_window_warning_count++ < 8) ||
+ (qla82xx_pci_set_window_warning_count%64 == 0)) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Warning:%s Unknown address range!\n", __func__,
+ QLA2XXX_DRIVER_NAME);
+ }
+ addr = -1UL;
+ }
+ return addr;
+}
+
+/* check if address is in the same windows as the previous access */
+static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
+ unsigned long long addr)
+{
+ int window;
+ unsigned long long qdr_max;
+
+ qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
+
+ /* DDR network side */
+ if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX))
+ BUG();
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+ QLA82XX_ADDR_OCM0_MAX))
+ return 1;
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
+ QLA82XX_ADDR_OCM1_MAX))
+ return 1;
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
+ /* QDR network side */
+ window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
+ if (ha->qdr_sn_window == window)
+ return 1;
+ }
+ return 0;
+}
+
+static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ unsigned long flags;
+ void *addr = NULL;
+ int ret = 0;
+ u64 start;
+ uint8_t *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = qla82xx_pci_set_window(ha, off);
+ if ((start == -1UL) ||
+ (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ qla_printk(KERN_ERR, ha,
+ "%s out of bound pci memory access. "
+ "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
+ return -1;
+ }
+
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ mem_base = pci_resource_start(ha->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ * consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == 0UL) {
+ *(u8 *)data = 0;
+ return -1;
+ }
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ switch (size) {
+ case 1:
+ *(u8 *)data = readb(addr);
+ break;
+ case 2:
+ *(u16 *)data = readw(addr);
+ break;
+ case 4:
+ *(u32 *)data = readl(addr);
+ break;
+ case 8:
+ *(u64 *)data = readq(addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+static int
+qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ unsigned long flags;
+ void *addr = NULL;
+ int ret = 0;
+ u64 start;
+ uint8_t *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = qla82xx_pci_set_window(ha, off);
+ if ((start == -1UL) ||
+ (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ qla_printk(KERN_ERR, ha,
+ "%s out of bound pci memory access. "
+ "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
+ return -1;
+ }
+
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ mem_base = pci_resource_start(ha->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ * consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == 0UL)
+ return -1;
+
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ switch (size) {
+ case 1:
+ writeb(*(u8 *)data, addr);
+ break;
+ case 2:
+ writew(*(u16 *)data, addr);
+ break;
+ case 4:
+ writel(*(u32 *)data, addr);
+ break;
+ case 8:
+ writeq(*(u64 *)data, addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+int
+qla82xx_wrmem(struct qla_hw_data *ha, u64 off, void *data, int size)
+{
+ int i, j, ret = 0, loop, sz[2], off0;
+ u32 temp;
+ u64 off8, mem_crb, tmpw, word[2] = {0, 0};
+#define MAX_CTL_CHECK 1000
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) {
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ } else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_write_direct(ha, off,
+ data, size);
+ }
+
+ off8 = off & 0xfffffff8;
+ off0 = off & 0x7;
+ sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+ sz[1] = size - sz[0];
+ loop = ((off0 + size - 1) >> 3) + 1;
+
+ if ((size != 8) || (off0 != 0)) {
+ for (i = 0; i < loop; i++) {
+ if (qla82xx_rdmem(ha, off8 + (i << 3), &word[i], 8))
+ return -1;
+ }
+ }
+
+ switch (size) {
+ case 1:
+ tmpw = *((u8 *)data);
+ break;
+ case 2:
+ tmpw = *((u16 *)data);
+ break;
+ case 4:
+ tmpw = *((u32 *)data);
+ break;
+ case 8:
+ default:
+ tmpw = *((u64 *)data);
+ break;
+ }
+
+ word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[0] |= tmpw << (off0 * 8);
+
+ if (loop == 2) {
+ word[1] &= ~(~0ULL << (sz[1] * 8));
+ word[1] |= tmpw >> (sz[0] * 8);
+ }
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << 3);
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+ temp = word[i] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+ temp = (word[i] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+ temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Fail to write through agent\n",
+ QLA2XXX_DRIVER_NAME);
+ ret = -1;
+ break;
+ }
+ }
+ return ret;
+}
+
+int
+qla82xx_rdmem(struct qla_hw_data *ha, u64 off, void *data, int size)
+{
+ int i, j = 0, k, start, end, loop, sz[2], off0[2];
+ u32 temp;
+ u64 off8, val, mem_crb, word[2] = {0, 0};
+#define MAX_CTL_CHECK 1000
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_read_direct(ha, off,
+ data, size);
+ }
+
+ off8 = off & 0xfffffff8;
+ off0[0] = off & 0x7;
+ off0[1] = 0;
+ sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
+ sz[1] = size - sz[0];
+ loop = ((off0[0] + size - 1) >> 3) + 1;
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << 3);
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+ temp = MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ qla_printk(KERN_INFO, ha,
+ "%s: Fail to read through agent\n",
+ QLA2XXX_DRIVER_NAME);
+ break;
+ }
+
+ start = off0[i] >> 2;
+ end = (off0[i] + sz[i] - 1) >> 2;
+ for (k = start; k <= end; k++) {
+ temp = qla82xx_rd_32(ha,
+ mem_crb + MIU_TEST_AGT_RDDATA(k));
+ word[i] |= ((u64)temp << (32 * k));
+ }
+ }
+
+ if (j >= MAX_CTL_CHECK)
+ return -1;
+
+ if (sz[0] == 8) {
+ val = word[0];
+ } else {
+ val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+ ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+ }
+
+ switch (size) {
+ case 1:
+ *(u8 *)data = val;
+ break;
+ case 2:
+ *(u16 *)data = val;
+ break;
+ case 4:
+ *(u32 *)data = val;
+ break;
+ case 8:
+ *(u64 *)data = val;
+ break;
+ }
+ return 0;
+}
+
+#define MTU_FUDGE_FACTOR 100
+unsigned long qla82xx_decode_crb_addr(unsigned long addr)
+{
+ int i;
+ unsigned long base_addr, offset, pci_base;
+
+ if (!qla82xx_crb_table_initialized)
+ qla82xx_crb_addr_transform_setup();
+
+ pci_base = ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == ADDR_ERROR)
+ return pci_base;
+ return pci_base + offset;
+}
+
+static long rom_max_timeout = 100;
+static long qla82xx_rom_lock_timeout = 100;
+
+int
+qla82xx_rom_lock(struct qla_hw_data *ha)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore2 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= qla82xx_rom_lock_timeout)
+ return -1;
+ timeout++;
+ }
+ qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+ return 0;
+}
+
+int
+qla82xx_wait_rom_busy(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ long done = 0 ;
+
+ while (done == 0) {
+ done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+ done &= 4;
+ timeout++;
+ if (timeout >= rom_max_timeout) {
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "%s: Timeout reached waiting for rom busy",
+ QLA2XXX_DRIVER_NAME));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int
+qla82xx_wait_rom_done(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ long done = 0 ;
+
+ while (done == 0) {
+ done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+ done &= 2;
+ timeout++;
+ if (timeout >= rom_max_timeout) {
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "%s: Timeout reached waiting for rom done",
+ QLA2XXX_DRIVER_NAME));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int
+qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
+{
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Error waiting for rom done\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+ /* Reset abyte_cnt and dummy_byte_cnt */
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ udelay(10);
+ cond_resched();
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+int
+qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
+{
+ int ret, loops = 0;
+
+ while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+ udelay(100);
+ schedule();
+ loops++;
+ }
+ if (loops >= 50000) {
+ qla_printk(KERN_INFO, ha,
+ "%s: qla82xx_rom_lock failed\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+ ret = qla82xx_do_rom_fast_read(ha, addr, valp);
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+int
+qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
+{
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ return -1;
+ }
+ *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+int
+qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ uint32_t done = 1 ;
+ uint32_t val;
+ int ret = 0;
+
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ while ((done != 0) && (ret == 0)) {
+ ret = qla82xx_read_status_reg(ha, &val);
+ done = val & 1;
+ timeout++;
+ udelay(10);
+ cond_resched();
+ if (timeout >= 50000) {
+ qla_printk(KERN_WARNING, ha,
+ "Timeout reached waiting for write finish");
+ return -1;
+ }
+ }
+ return ret;
+}
+
+int
+qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
+{
+ uint32_t val;
+ qla82xx_wait_rom_busy(ha);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha))
+ return -1;
+ if (qla82xx_read_status_reg(ha, &val) != 0)
+ return -1;
+ if ((val & 2) != 2)
+ return -1;
+ return 0;
+}
+
+int
+qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
+{
+ if (qla82xx_flash_set_write_enable(ha))
+ return -1;
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ return -1;
+ }
+ return qla82xx_flash_wait_write_finish(ha);
+}
+
+int
+qla82xx_write_disable_flash(struct qla_hw_data *ha)
+{
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ return -1;
+ }
+ return 0;
+}
+
+int
+ql82xx_rom_lock_d(struct qla_hw_data *ha)
+{
+ int loops = 0;
+ while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+ udelay(100);
+ cond_resched();
+ loops++;
+ }
+ if (loops >= 50000) {
+ qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
+ return -1;
+ }
+ return 0;;
+}
+
+int
+qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
+ uint32_t data)
+{
+ int ret = 0;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ if (qla82xx_flash_set_write_enable(ha))
+ goto done_write;
+
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ ret = -1;
+ goto done_write;
+ }
+
+ ret = qla82xx_flash_wait_write_finish(ha);
+
+done_write:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+/* This routine does CRB initialize sequence
+ * to put the ISP into operational state
+ */
+int qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
+{
+ int addr, val;
+ int i ;
+ struct crb_addr_pair *buf;
+ unsigned long off;
+ unsigned offset, n;
+ struct qla_hw_data *ha = vha->hw;
+
+ struct crb_addr_pair {
+ long addr;
+ long data;
+ };
+
+ /* Halt all the indiviual PEGs and other blocks of the ISP */
+ qla82xx_rom_lock(ha);
+ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ /* don't reset CAM block on reset */
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ else
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+
+ /* Read the signature value from the flash.
+ * Offset 0: Contain signature (0xcafecafe)
+ * Offset 4: Offset and number of addr/value pairs
+ * that present in CRB initialize sequence
+ */
+ if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
+ qla82xx_rom_fast_read(ha, 4, &n) != 0) {
+ qla_printk(KERN_WARNING, ha,
+ "[ERROR] Reading crb_init area: n: %08x\n", n);
+ return -1;
+ }
+
+ /* Offset in flash = lower 16 bits
+ * Number of enteries = upper 16 bits
+ */
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ /* number of addr/value pair should not exceed 1024 enteries */
+ if (n >= 1024) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
+ QLA2XXX_DRIVER_NAME, __func__, n);
+ return -1;
+ }
+
+ qla_printk(KERN_INFO, ha,
+ "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
+
+ buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: [ERROR] Unable to malloc memory.\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
+ qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -1;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+ /* Translate internal CRB initialization
+ * address to PCI bus address
+ */
+ off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
+ QLA82XX_PCI_CRBSPACE;
+ /* Not all CRB addr/value pair to be written,
+ * some of them are skipped
+ */
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLA82XX_CAM_RAM(0x1fc))
+ continue;
+
+ /* do not reset PCI */
+ if (off == (ROMUSB_GLB + 0xbc))
+ continue;
+
+ /* skip core clock, so that firmware can increase the clock */
+ if (off == (ROMUSB_GLB + 0xc8))
+ continue;
+
+ /* skip the function enable register */
+ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+
+ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+
+ if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
+ continue;
+
+ if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
+ continue;
+
+ if (off == ADDR_ERROR) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: [ERROR] Unknown addr: 0x%08lx\n",
+ QLA2XXX_DRIVER_NAME, buf[i].addr);
+ continue;
+ }
+
+ if (off == (QLA82XX_CRB_PEG_NET_1 + 0x18)) {
+ if (!QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision))
+ buf[i].data = 0x1020;
+ }
+
+ qla82xx_wr_32(ha, off, buf[i].data);
+
+ /* ISP requires much bigger delay to settle down,
+ * else crb_window returns 0xffffffff
+ */
+ if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
+ msleep(1000);
+
+ /* ISP requires millisec delay between
+ * successive CRB register updation
+ */
+ msleep(1);
+ }
+
+ kfree(buf);
+
+ /* Resetting the data and instruction cache */
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
+
+ /* Clear all protocol processing engines */
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
+ return 0;
+}
+
+int qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
+ val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
+ if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
+ qla_printk(KERN_INFO, ha,
+ "Memory DIMM SPD not programmed. "
+ " Assumed valid.\n");
+ return 1;
+ } else if (val) {
+ qla_printk(KERN_INFO, ha,
+ "Memory DIMM type incorrect.Info:%08X.\n", val);
+ return 2;
+ }
+ return 0;
+}
+
+int
+qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
+{
+ int i;
+ long size = 0;
+ long flashaddr = BOOTLD_START, memaddr = BOOTLD_START;
+ u64 data;
+ u32 high, low;
+ size = (IMAGE_START - BOOTLD_START) / 8;
+
+ for (i = 0; i < size; i++) {
+ if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
+ (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
+ return -1;
+ }
+ data = ((u64)high << 32) | low ;
+ qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
+ flashaddr += 8;
+ memaddr += 8;
+
+ if (i % 0x1000 == 0)
+ msleep(1);
+ }
+ udelay(100);
+ read_lock(&ha->hw_lock);
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+ } else {
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001d);
+ }
+ read_unlock(&ha->hw_lock);
+ return 0;
+}
+
+int
+qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ int i, j = 0, k, start, end, loop, sz[2], off0[2];
+ int shift_amount;
+ uint32_t temp;
+ uint64_t off8, val, mem_crb, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_read_direct(ha,
+ off, data, size);
+ }
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ off8 = off & 0xfffffff0;
+ off0[0] = off & 0xf;
+ sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
+ shift_amount = 4;
+ } else {
+ off8 = off & 0xfffffff8;
+ off0[0] = off & 0x7;
+ sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
+ shift_amount = 4;
+ }
+ loop = ((off0[0] + size - 1) >> shift_amount) + 1;
+ off0[1] = 0;
+ sz[1] = size - sz[0];
+
+ /*
+ * don't lock here - write_wx gets the lock if each time
+ * write_lock_irqsave(&adapter->adapter_lock, flags);
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+ */
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << shift_amount);
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+ temp = MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&ha->pdev->dev,
+ "failed to read through agent\n");
+ break;
+ }
+
+ start = off0[i] >> 2;
+ end = (off0[i] + sz[i] - 1) >> 2;
+ for (k = start; k <= end; k++) {
+ temp = qla82xx_rd_32(ha,
+ mem_crb + MIU_TEST_AGT_RDDATA(k));
+ word[i] |= ((uint64_t)temp << (32 * (k & 1)));
+ }
+ }
+
+ /*
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
+ * write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ */
+
+ if (j >= MAX_CTL_CHECK)
+ return -1;
+
+ if ((off0[0] & 7) == 0) {
+ val = word[0];
+ } else {
+ val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+ ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+ }
+
+ switch (size) {
+ case 1:
+ *(uint8_t *)data = val;
+ break;
+ case 2:
+ *(uint16_t *)data = val;
+ break;
+ case 4:
+ *(uint32_t *)data = val;
+ break;
+ case 8:
+ *(uint64_t *)data = val;
+ break;
+ }
+ return 0;
+}
+
+int
+qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ int i, j, ret = 0, loop, sz[2], off0;
+ int scale, shift_amount, p3p, startword;
+ uint32_t temp;
+ uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_write_direct(ha,
+ off, data, size);
+ }
+
+ off0 = off & 0x7;
+ sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+ sz[1] = size - sz[0];
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ off8 = off & 0xfffffff0;
+ loop = (((off & 0xf) + size - 1) >> 4) + 1;
+ shift_amount = 4;
+ scale = 2;
+ p3p = 1;
+ startword = (off & 0xf)/8;
+ } else {
+ off8 = off & 0xfffffff8;
+ loop = ((off0 + size - 1) >> 3) + 1;
+ shift_amount = 3;
+ scale = 1;
+ p3p = 0;
+ startword = 0;
+ }
+
+ if (p3p || (size != 8) || (off0 != 0)) {
+ for (i = 0; i < loop; i++) {
+ if (qla82xx_pci_mem_read_2M(ha, off8 +
+ (i << shift_amount), &word[i * scale], 8))
+ return -1;
+ }
+ }
+
+ switch (size) {
+ case 1:
+ tmpw = *((uint8_t *)data);
+ break;
+ case 2:
+ tmpw = *((uint16_t *)data);
+ break;
+ case 4:
+ tmpw = *((uint32_t *)data);
+ break;
+ case 8:
+ default:
+ tmpw = *((uint64_t *)data);
+ break;
+ }
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ if (sz[0] == 8) {
+ word[startword] = tmpw;
+ } else {
+ word[startword] &=
+ ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[startword] |= tmpw << (off0 * 8);
+ }
+ if (sz[1] != 0) {
+ word[startword+1] &= ~(~0ULL << (sz[1] * 8));
+ word[startword+1] |= tmpw >> (sz[0] * 8);
+ }
+ } else {
+ word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[startword] |= tmpw << (off0 * 8);
+
+ if (loop == 2) {
+ word[1] &= ~(~0ULL << (sz[1] * 8));
+ word[1] |= tmpw >> (sz[0] * 8);
+ }
+ }
+
+ /*
+ * don't lock here - write_wx gets the lock if each time
+ * write_lock_irqsave(&adapter->adapter_lock, flags);
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+ */
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << shift_amount);
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+ temp = word[i * scale] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+ temp = (word[i * scale] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ temp = word[i*scale + 1] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb +
+ MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
+ temp = (word[i*scale + 1] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb +
+ MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
+ }
+
+ temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&ha->pdev->dev,
+ "failed to write through agent\n");
+ ret = -1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* PCI related functions */
+char *
+qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+ int pcie_reg;
+ struct qla_hw_data *ha = vha->hw;
+ char lwstr[6];
+ uint16_t lnk;
+
+ pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
+ ha->link_width = (lnk >> 4) & 0x3f;
+
+ strcpy(str, "PCIe (");
+ strcat(str, "2.5Gb/s ");
+ snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
+ strcat(str, lwstr);
+ return str;
+}
+
+int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
+{
+ unsigned long val = 0;
+ u32 control;
+
+ switch (region) {
+ case 0:
+ val = 0;
+ break;
+ case 1:
+ pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
+ val = control + QLA82XX_MSIX_TBL_SPACE;
+ break;
+ }
+ return val;
+}
+
+int qla82xx_pci_region_len(struct pci_dev *pdev, int region)
+{
+ unsigned long val = 0;
+ u32 control;
+ switch (region) {
+ case 0:
+ pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
+ val = control;
+ break;
+ case 1:
+ val = pci_resource_len(pdev, 0) -
+ qla82xx_pci_region_offset(pdev, 1);
+ break;
+ }
+ return val;
+}
+
+int
+qla82xx_iospace_config(struct qla_hw_data *ha)
+{
+ uint32_t len = 0;
+
+ if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
+ qla_printk(KERN_WARNING, ha,
+ "Failed to reserve selected regions (%s)\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+ qla_printk(KERN_ERR, ha,
+ "region #0 not an MMIO resource (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ len = pci_resource_len(ha->pdev, 0);
+ ha->nx_pcibase =
+ (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
+ if (!ha->nx_pcibase) {
+ qla_printk(KERN_ERR, ha,
+ "cannot remap pcibase MMIO (%s), aborting\n",
+ pci_name(ha->pdev));
+ pci_release_regions(ha->pdev);
+ goto iospace_error_exit;
+ }
+
+ /* Mapping of IO base pointer */
+ ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
+ 0xbc000 + (ha->pdev->devfn << 11));
+
+ if (!ql2xdbwr) {
+ ha->nxdb_wr_ptr =
+ (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
+ (ha->pdev->devfn << 12)), 4);
+ if (!ha->nxdb_wr_ptr) {
+ qla_printk(KERN_ERR, ha,
+ "cannot remap MMIO (%s), aborting\n",
+ pci_name(ha->pdev));
+ pci_release_regions(ha->pdev);
+ goto iospace_error_exit;
+ }
+
+ /* Mapping of IO base pointer,
+ * door bell read and write pointer
+ */
+ ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
+ (ha->pdev->devfn * 8);
+ } else {
+ ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
+ QLA82XX_CAMRAM_DB1 :
+ QLA82XX_CAMRAM_DB2);
+ }
+
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->msix_count = ha->max_rsp_queues + 1;
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
+/* GS related functions */
+
+/* Initialization related functions */
+
+/**
+ * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+*/
+int
+qla82xx_pci_config(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int ret;
+
+ pci_set_master(ha->pdev);
+ ret = pci_set_mwi(ha->pdev);
+ ha->chip_revision = ha->pdev->revision;
+ return 0;
+}
+
+/**
+ * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla82xx_reset_chip(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ ha->isp_ops->disable_intrs(ha);
+}
+
+void qla82xx_config_rings(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ struct init_cb_81xx *icb;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ /* Setup ring parameters in initialization control block. */
+ icb = (struct init_cb_81xx *)ha->init_cb;
+ icb->request_q_outpointer = __constant_cpu_to_le16(0);
+ icb->response_q_inpointer = __constant_cpu_to_le16(0);
+ icb->request_q_length = cpu_to_le16(req->length);
+ icb->response_q_length = cpu_to_le16(rsp->length);
+ icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+ icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+ icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+ icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+ icb->version = 1;
+ icb->frame_payload_size = 2112;
+ icb->execution_throttle = 8;
+ icb->exchange_count = 128;
+ icb->login_retry_count = 8;
+
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
+}
+
+void qla82xx_reset_adapter(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ vha->flags.online = 0;
+ qla2x00_try_to_stop_firmware(vha);
+ ha->isp_ops->disable_intrs(ha);
+}
+
+int qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ __le64 data;
+
+ size = (IMAGE_START - BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)&ha->hablob->fw->data[BOOTLD_START];
+ flashaddr = BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+ qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8);
+ flashaddr += 8;
+ }
+
+ size = *(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET];
+ size = (__force u32)cpu_to_le32(size) / 8;
+ ptr64 = (u64 *)&ha->hablob->fw->data[IMAGE_START];
+ flashaddr = FLASH_ADDR_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
+ return -EIO;
+ flashaddr += 8;
+ }
+
+ /* Write a magic value to CAMRAM register
+ * at a specified offset to indicate
+ * that all data is written and
+ * ready for firmware to initialize.
+ */
+ qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), 0x12345678);
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+ } else
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001d);
+ return 0;
+}
+
+int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ int retries = 60;
+
+ do {
+ read_lock(&ha->hw_lock);
+ val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
+ read_unlock(&ha->hw_lock);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return QLA_SUCCESS;
+ case PHAN_INITIALIZE_FAILED:
+ break;
+ default:
+ break;
+ }
+ qla_printk(KERN_WARNING, ha,
+ "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
+ val, retries);
+
+ msleep(500);
+
+ } while (--retries);
+
+ qla_printk(KERN_INFO, ha,
+ "Cmd Peg initialization failed: 0x%x.\n", val);
+
+ qla82xx_check_for_bad_spd(ha);
+ val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
+ read_lock(&ha->hw_lock);
+ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+ read_unlock(&ha->hw_lock);
+ return QLA_FUNCTION_FAILED;
+}
+
+int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ int retries = 60;
+
+ do {
+ read_lock(&ha->hw_lock);
+ val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
+ read_unlock(&ha->hw_lock);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return QLA_SUCCESS;
+ case PHAN_INITIALIZE_FAILED:
+ break;
+ default:
+ break;
+ }
+
+ qla_printk(KERN_WARNING, ha,
+ "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
+ val, retries);
+
+ msleep(500);
+
+ } while (--retries);
+
+ qla_printk(KERN_INFO, ha,
+ "Rcv Peg initialization failed: 0x%x.\n", val);
+ read_lock(&ha->hw_lock);
+ qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
+ read_unlock(&ha->hw_lock);
+ return QLA_FUNCTION_FAILED;
+}
+
+/* ISR related functions */
+uint32_t qla82xx_isr_int_target_mask_enable[8] = {
+ ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
+ ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
+ ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
+ ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
+};
+
+uint32_t qla82xx_isr_int_target_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
+};
+
+static struct qla82xx_legacy_intr_set legacy_intr[] = \
+ QLA82XX_LEGACY_INTR_CONFIG;
+
+/*
+ * qla82xx_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb0: Mailbox0 register
+ */
+void
+qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
+{
+ uint16_t cnt;
+ uint16_t __iomem *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
+
+ /* Load return mailbox registers. */
+ ha->flags.mbox_int = 1;
+ ha->mailbox_out[0] = mb0;
+
+ for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+ ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ wptr++;
+ }
+
+ if (ha->mcp) {
+ DEBUG3_11(printk(KERN_INFO "%s(%ld): "
+ "Got mailbox completion. cmd=%x.\n",
+ __func__, vha->host_no, ha->mcp->mb[0]));
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "%s(%ld): MBX pointer ERROR!\n",
+ __func__, vha->host_no);
+ }
+}
+
+/*
+ * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ * @regs:
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla82xx_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0, status1 = 0;
+ unsigned long flags;
+ unsigned long iter;
+ uint32_t stat;
+ uint16_t mb[4];
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+
+ if (!ha->flags.msi_enabled) {
+ status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
+ if (!(status & ha->nx_legacy_intr.int_vec_bit))
+ return IRQ_NONE;
+
+ status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
+ if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
+ return IRQ_NONE;
+ }
+
+ /* clear the interrupt */
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+
+ /* read twice to ensure write is flushed */
+ qla82xx_rd_32(ha, ISR_INT_VECTOR);
+ qla82xx_rd_32(ha, ISR_INT_VECTOR);
+
+ reg = &ha->iobase->isp82;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ for (iter = 1; iter--; ) {
+
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ DEBUG2(printk("scsi(%ld): "
+ " Unrecognized interrupt type (%d).\n",
+ vha->host_no, stat & 0xff));
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (!ha->flags.msi_enabled)
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+
+#ifdef QL_DEBUG_LEVEL_17
+ if (!irq && ha->flags.eeh_busy)
+ qla_printk(KERN_WARNING, ha,
+ "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
+ status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
+#endif
+
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla82xx_msix_default(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ unsigned long flags;
+ uint32_t stat;
+ uint16_t mb[4];
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+
+ reg = &ha->iobase->isp82;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ do {
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ DEBUG2(printk("scsi(%ld): "
+ " Unrecognized interrupt type (%d).\n",
+ vha->host_no, stat & 0xff));
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ } while (0);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+#ifdef QL_DEBUG_LEVEL_17
+ if (!irq && ha->flags.eeh_busy)
+ qla_printk(KERN_WARNING, ha,
+ "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
+ status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
+#endif
+
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla82xx_msix_rsp_q(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ ha = rsp->hw;
+ reg = &ha->iobase->isp82;
+ spin_lock_irq(&ha->hardware_lock);
+ vha = pci_get_drvdata(ha->pdev);
+ qla24xx_process_response_queue(vha, rsp);
+ WRT_REG_DWORD(&reg->host_int, 0);
+ spin_unlock_irq(&ha->hardware_lock);
+ return IRQ_HANDLED;
+}
+
+void
+qla82xx_poll(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ uint32_t stat;
+ uint16_t mb[4];
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return;
+ }
+ ha = rsp->hw;
+
+ reg = &ha->iobase->isp82;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
+ "(%d).\n",
+ vha->host_no, stat & 0xff));
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla82xx_enable_intrs(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ qla82xx_mbx_intr_enable(vha);
+ spin_lock_irq(&ha->hardware_lock);
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+ spin_unlock_irq(&ha->hardware_lock);
+ ha->interrupts_on = 1;
+}
+
+void
+qla82xx_disable_intrs(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ qla82xx_mbx_intr_disable(vha);
+ spin_lock_irq(&ha->hardware_lock);
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+ spin_unlock_irq(&ha->hardware_lock);
+ ha->interrupts_on = 0;
+}
+
+void qla82xx_init_flags(struct qla_hw_data *ha)
+{
+ struct qla82xx_legacy_intr_set *nx_legacy_intr;
+
+ /* ISP 8021 initializations */
+ rwlock_init(&ha->hw_lock);
+ ha->qdr_sn_window = -1;
+ ha->ddr_mn_window = -1;
+ ha->curr_window = 255;
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
+ nx_legacy_intr = &legacy_intr[ha->portnum];
+ ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
+ ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
+ ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
+ ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
+}
+
+static inline void
+qla82xx_set_drv_active(scsi_qla_host_t *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+
+ /* If reset value is all FF's, initialize DRV_ACTIVE */
+ if (drv_active == 0xffffffff) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, 0);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ }
+ drv_active |= (1 << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+}
+
+inline void
+qla82xx_clear_drv_active(struct qla_hw_data *ha)
+{
+ uint32_t drv_active;
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ drv_active &= ~(1 << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+}
+
+static inline int
+qla82xx_need_reset(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+ int rval;
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ rval = drv_state & (1 << (ha->portnum * 4));
+ return rval;
+}
+
+static inline void
+qla82xx_set_rst_ready(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+
+ /* If reset value is all FF's, initialize DRV_STATE */
+ if (drv_state == 0xffffffff) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ }
+ drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+ qla_printk(KERN_INFO, ha,
+ "%s(%ld):drv_state = 0x%x\n",
+ __func__, vha->host_no, drv_state);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla82xx_clear_rst_ready(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
+{
+ uint32_t qsnt_state;
+
+ qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
+}
+
+int qla82xx_load_fw(scsi_qla_host_t *vha)
+{
+ int rst;
+ struct fw_blob *blob;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Put both the PEG CMD and RCV PEG to default state
+ * of 0 before resetting the hardware
+ */
+ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+ qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
+
+ if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "%s: Error during CRB Initialization\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+ udelay(500);
+
+ /* Bring QM and CAMRAM out of reset */
+ rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
+ rst &= ~((1 << 28) | (1 << 24));
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
+
+ /*
+ * FW Load priority:
+ * 1) Operational firmware residing in flash.
+ * 2) Firmware via request-firmware interface (.bin file).
+ */
+ if (ql2xfwloadbin == 2)
+ goto try_blob_fw;
+
+ qla_printk(KERN_INFO, ha,
+ "Attempting to load firmware from flash\n");
+
+ if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "Firmware loaded successfully from flash\n");
+ return QLA_SUCCESS;
+ }
+try_blob_fw:
+ qla_printk(KERN_INFO, ha,
+ "Attempting to load firmware from blob\n");
+
+ /* Load firmware blob. */
+ blob = ha->hablob = qla2x00_request_firmware(vha);
+ if (!blob) {
+ qla_printk(KERN_ERR, ha,
+ "Firmware image not present.\n");
+ goto fw_load_failed;
+ }
+
+ if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "%s: Firmware loaded successfully "
+ " from binary blob\n", __func__);
+ return QLA_SUCCESS;
+ } else {
+ qla_printk(KERN_ERR, ha,
+ "Firmware load failed from binary blob\n");
+ blob->fw = NULL;
+ blob = NULL;
+ goto fw_load_failed;
+ }
+ return QLA_SUCCESS;
+
+fw_load_failed:
+ return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla82xx_start_firmware(scsi_qla_host_t *vha)
+{
+ int pcie_cap;
+ uint16_t lnk;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* scrub dma mask expansion register */
+ qla82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
+
+ /* Overwrite stale initialization register values */
+ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
+ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
+
+ if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
+ qla_printk(KERN_INFO, ha,
+ "%s: Error trying to start fw!\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Handshake with the card before we register the devices. */
+ if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
+ qla_printk(KERN_INFO, ha,
+ "%s: Error during card handshake!\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Negotiated Link width */
+ pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
+ ha->link_width = (lnk >> 4) & 0x3f;
+
+ /* Synchronize with Receive peg */
+ return qla82xx_check_rcvpeg_state(ha);
+}
+
+static inline int
+qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+ uint16_t tot_dsds)
+{
+ uint32_t *cur_dsd = NULL;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *cur_seg;
+ uint32_t *dsd_seg;
+ void *next_dsd;
+ uint8_t avail_dsds;
+ uint8_t first_iocb = 1;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct ct6_dsd *ctx;
+
+ cmd = sp->cmd;
+
+ /* Update entry type to indicate Command Type 3 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_6);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return 0;
+ }
+
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_WRITE_DATA);
+ ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_READ_DATA);
+ ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ }
+
+ cur_seg = scsi_sglist(cmd);
+ ctx = sp->ctx;
+
+ while (tot_dsds) {
+ avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : tot_dsds;
+ tot_dsds -= avail_dsds;
+ dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
+
+ dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
+ struct dsd_dma, list);
+ next_dsd = dsd_ptr->dsd_addr;
+ list_del(&dsd_ptr->list);
+ ha->gbl_dsd_avail--;
+ list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
+ ctx->dsd_use_cnt++;
+ ha->gbl_dsd_inuse++;
+
+ if (first_iocb) {
+ first_iocb = 0;
+ dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+ *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = dsd_list_len;
+ } else {
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ }
+ cur_dsd = (uint32_t *)next_dsd;
+ while (avail_dsds) {
+ dma_addr_t sle_dma;
+
+ sle_dma = sg_dma_address(cur_seg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+ cur_seg++;
+ avail_dsds--;
+ }
+ }
+
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ return 0;
+}
+
+/*
+ * qla82xx_calc_dsd_lists() - Determine number of DSD list required
+ * for Command Type 6.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of dsd list needed to store @dsds.
+ */
+inline uint16_t
+qla82xx_calc_dsd_lists(uint16_t dsds)
+{
+ uint16_t dsd_lists = 0;
+
+ dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
+ if (dsds % QLA_DSDS_PER_IOCB)
+ dsd_lists++;
+ return dsd_lists;
+}
+
+/*
+ * qla82xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occured, else zero.
+ */
+int
+qla82xx_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+ uint32_t *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+
+ /* Setup device pointers. */
+ ret = 0;
+ reg = &ha->iobase->isp82;
+ cmd = sp->cmd;
+ req = vha->req;
+ rsp = ha->rsp_q_map[0];
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ dbval = 0x04 | (ha->portnum << 5);
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req,
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ handle++;
+ if (handle == MAX_OUTSTANDING_COMMANDS)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == MAX_OUTSTANDING_COMMANDS)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+
+ if (tot_dsds > ql2xshiftctondsd) {
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+
+ more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
+ goto queuing_error;
+
+ if (more_dsd_lists <= ha->gbl_dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= ha->gbl_dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ goto queuing_error;
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!sp->ctx) {
+ DEBUG(printk(KERN_INFO
+ "%s(%ld): failed to allocate"
+ " ctx.\n", __func__, vha->host_no));
+ goto queuing_error;
+ }
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ DEBUG2_3(printk("%s(%ld): failed to allocate"
+ " fcp_cmnd.\n", __func__, vha->host_no));
+ goto queuing_error_fcp_cmnd;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /* SCSI command bigger than 16 bytes must be
+ * multiple of 4
+ */
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ /* Build IOCB segments */
+ if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
+ goto queuing_error_fcp_cmnd;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+
+ /* build FCP_CMND IU */
+ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ cmd_pkt->fcp_cmnd_dseg_address[0] =
+ cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
+ cmd_pkt->fcp_cmnd_dseg_address[1] =
+ cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ } else {
+ struct cmd_type_7 *cmd_pkt;
+ req_cnt = qla24xx_calc_iocbs(tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
+ sizeof(cmd_pkt->lun));
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen.
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+
+ }
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ /* write, read and verify logic */
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ }
+ }
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ if (sp->ctx) {
+ mempool_free(sp->ctx, ha->ctx_mempool);
+ sp->ctx = NULL;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+uint32_t *
+qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+ uint32_t length)
+{
+ uint32_t i;
+ uint32_t val;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Dword reads to flash. */
+ for (i = 0; i < length/4; i++, faddr += 4) {
+ if (qla82xx_rom_fast_read(ha, faddr, &val)) {
+ qla_printk(KERN_WARNING, ha,
+ "Do ROM fast read failed\n");
+ goto done_read;
+ }
+ dwptr[i] = __constant_cpu_to_le32(val);
+ }
+done_read:
+ return dwptr;
+}
+
+int
+qla82xx_unprotect_flash(struct qla_hw_data *ha)
+{
+ int ret;
+ uint32_t val;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ ret = qla82xx_read_status_reg(ha, &val);
+ if (ret < 0)
+ goto done_unprotect;
+
+ val &= ~(0x7 << 2);
+ ret = qla82xx_write_status_reg(ha, val);
+ if (ret < 0) {
+ val |= (0x7 << 2);
+ qla82xx_write_status_reg(ha, val);
+ }
+
+ if (qla82xx_write_disable_flash(ha) != 0)
+ qla_printk(KERN_WARNING, ha, "Write disable failed\n");
+
+done_unprotect:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+int
+qla82xx_protect_flash(struct qla_hw_data *ha)
+{
+ int ret;
+ uint32_t val;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ ret = qla82xx_read_status_reg(ha, &val);
+ if (ret < 0)
+ goto done_protect;
+
+ val |= (0x7 << 2);
+ /* LOCK all sectors */
+ ret = qla82xx_write_status_reg(ha, val);
+ if (ret < 0)
+ qla_printk(KERN_WARNING, ha, "Write status register failed\n");
+
+ if (qla82xx_write_disable_flash(ha) != 0)
+ qla_printk(KERN_WARNING, ha, "Write disable failed\n");
+done_protect:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+int
+qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
+{
+ int ret = 0;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ qla82xx_flash_set_write_enable(ha);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
+
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ ret = -1;
+ goto done;
+ }
+ ret = qla82xx_flash_wait_write_finish(ha);
+done:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+/*
+ * Address and length are byte address
+ */
+uint8_t *
+qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ scsi_block_requests(vha->host);
+ qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
+ scsi_unblock_requests(vha->host);
+ return buf;
+}
+
+static int
+qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t dwords)
+{
+ int ret;
+ uint32_t liter;
+ uint32_t sec_mask, rest_addr;
+ dma_addr_t optrom_dma;
+ void *optrom = NULL;
+ int page_mode = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret = -1;
+
+ /* Prepare burst-capable write on supported ISPs. */
+ if (page_mode && !(faddr & 0xfff) &&
+ dwords > OPTROM_BURST_DWORDS) {
+ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+ &optrom_dma, GFP_KERNEL);
+ if (!optrom) {
+ qla_printk(KERN_DEBUG, ha,
+ "Unable to allocate memory for optrom "
+ "burst write (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
+ }
+ }
+
+ rest_addr = ha->fdt_block_size - 1;
+ sec_mask = ~rest_addr;
+
+ ret = qla82xx_unprotect_flash(ha);
+ if (ret) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to unprotect flash for update.\n");
+ goto write_done;
+ }
+
+ for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
+ /* Are we at the beginning of a sector? */
+ if ((faddr & rest_addr) == 0) {
+
+ ret = qla82xx_erase_sector(ha, faddr);
+ if (ret) {
+ DEBUG9(qla_printk(KERN_ERR, ha,
+ "Unable to erase sector: "
+ "address=%x.\n", faddr));
+ break;
+ }
+ }
+
+ /* Go with burst-write. */
+ if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
+ /* Copy data to DMA'ble buffer. */
+ memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
+
+ ret = qla2x00_load_ram(vha, optrom_dma,
+ (ha->flash_data_off | faddr),
+ OPTROM_BURST_DWORDS);
+ if (ret != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to burst-write optrom segment "
+ "(%x/%x/%llx).\n", ret,
+ (ha->flash_data_off | faddr),
+ (unsigned long long)optrom_dma);
+ qla_printk(KERN_WARNING, ha,
+ "Reverting to slow-write.\n");
+
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+ optrom = NULL;
+ } else {
+ liter += OPTROM_BURST_DWORDS - 1;
+ faddr += OPTROM_BURST_DWORDS - 1;
+ dwptr += OPTROM_BURST_DWORDS - 1;
+ continue;
+ }
+ }
+
+ ret = qla82xx_write_flash_dword(ha, faddr,
+ cpu_to_le32(*dwptr));
+ if (ret) {
+ DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
+ "flash address=%x data=%x.\n", __func__,
+ ha->host_no, faddr, *dwptr));
+ break;
+ }
+ }
+
+ ret = qla82xx_protect_flash(ha);
+ if (ret)
+ qla_printk(KERN_WARNING, ha,
+ "Unable to protect flash after update.\n");
+write_done:
+ if (optrom)
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+ return ret;
+}
+
+int
+qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ int rval;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
+ length >> 2);
+ scsi_unblock_requests(vha->host);
+
+ /* Convert return ISP82xx to generic */
+ if (rval)
+ rval = QLA_FUNCTION_FAILED;
+ else
+ rval = QLA_SUCCESS;
+ return rval;
+}
+
+void
+qla82xx_start_iocbs(srb_t *sp)
+{
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ reg = &ha->iobase->isp82;
+ dbval = 0x04 | (ha->portnum << 5);
+
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
+ wmb();
+ }
+}
+
+/*
+ * qla82xx_device_bootstrap
+ * Initialize device, set DEV_READY, start fw
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+static int
+qla82xx_device_bootstrap(scsi_qla_host_t *vha)
+{
+ int rval, i, timeout;
+ uint32_t old_count, count;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (qla82xx_need_reset(ha))
+ goto dev_initialize;
+
+ old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ goto dev_ready;
+ }
+
+dev_initialize:
+ /* set to DEV_INITIALIZING */
+ qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
+
+ /* Driver that sets device state to initializating sets IDC version */
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
+
+ qla82xx_idc_unlock(ha);
+ rval = qla82xx_start_firmware(vha);
+ qla82xx_idc_lock(ha);
+
+ if (rval != QLA_SUCCESS) {
+ qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ qla82xx_clear_drv_active(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
+ return rval;
+ }
+
+dev_ready:
+ qla_printk(KERN_INFO, ha, "HW State: READY\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
+
+ return QLA_SUCCESS;
+}
+
+static void
+qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Disable the board */
+ qla_printk(KERN_INFO, ha, "Disabling the board\n");
+
+ /* Set DEV_FAILED flag to disable timer */
+ vha->device_flags |= DFLG_DEV_FAILED;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ vha->flags.online = 0;
+ vha->flags.init_done = 0;
+}
+
+/*
+ * qla82xx_need_reset_handler
+ * Code to start reset sequence
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+static void
+qla82xx_need_reset_handler(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state, drv_state, drv_active;
+ unsigned long reset_timeout;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ if (vha->flags.online) {
+ qla82xx_idc_unlock(ha);
+ qla2x00_abort_isp_cleanup(vha);
+ ha->isp_ops->get_flash_version(vha, req->ring);
+ ha->isp_ops->nvram_config(vha);
+ qla82xx_idc_lock(ha);
+ }
+
+ qla82xx_set_rst_ready(ha);
+
+ /* wait for 10 seconds for reset ack from all functions */
+ reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+
+ while (drv_state != drv_active) {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ qla_printk(KERN_INFO, ha,
+ "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
+ break;
+ }
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ }
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+
+ /* Force to DEV_COLD unless someone else is starting a reset */
+ if (dev_state != QLA82XX_DEV_INITIALIZING) {
+ qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+ }
+}
+
+static void
+qla82xx_check_fw_alive(scsi_qla_host_t *vha)
+{
+ uint32_t fw_heartbeat_counter, halt_status;
+ struct qla_hw_data *ha = vha->hw;
+
+ fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
+ vha->seconds_since_last_heartbeat++;
+ /* FW not alive after 2 seconds */
+ if (vha->seconds_since_last_heartbeat == 2) {
+ vha->seconds_since_last_heartbeat = 0;
+ halt_status = qla82xx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+ if (halt_status & HALT_STATUS_UNRECOVERABLE) {
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): %s - detect abort needed\n",
+ vha->host_no, __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ vha->fw_heartbeat_counter = fw_heartbeat_counter;
+}
+
+/*
+ * qla82xx_device_state_handler
+ * Main state handler
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+int
+qla82xx_device_state_handler(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state;
+ int rval = QLA_SUCCESS;
+ unsigned long dev_init_timeout;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla82xx_idc_lock(ha);
+ if (!vha->flags.init_done)
+ qla82xx_set_drv_active(vha);
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+
+ /* wait for 30 seconds for device to go ready */
+ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+
+ while (1) {
+
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "%s: device init failed!\n",
+ QLA2XXX_DRIVER_NAME));
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla_printk(KERN_INFO, ha,
+ "2:Device state is 0x%x = %s\n", dev_state,
+ dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
+
+ switch (dev_state) {
+ case QLA82XX_DEV_READY:
+ goto exit;
+ case QLA82XX_DEV_COLD:
+ rval = qla82xx_device_bootstrap(vha);
+ goto exit;
+ case QLA82XX_DEV_INITIALIZING:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ break;
+ case QLA82XX_DEV_NEED_RESET:
+ if (!ql2xdontresethba)
+ qla82xx_need_reset_handler(vha);
+ break;
+ case QLA82XX_DEV_NEED_QUIESCENT:
+ qla82xx_set_qsnt_ready(ha);
+ case QLA82XX_DEV_QUIESCENT:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ break;
+ case QLA82XX_DEV_FAILED:
+ qla82xx_dev_failed_handler(vha);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit;
+ default:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ }
+ }
+exit:
+ qla82xx_idc_unlock(ha);
+ return rval;
+}
+
+void qla82xx_watchdog(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+
+ /* don't poll if reset is going on */
+ if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
+ if (dev_state == QLA82XX_DEV_NEED_RESET) {
+ qla_printk(KERN_WARNING, ha,
+ "%s(): Adapter reset needed!\n", __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ qla82xx_check_fw_alive(vha);
+ }
+ }
+}
+
+int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+ int rval;
+ rval = qla82xx_device_state_handler(vha);
+ return rval;
+}
+
+/*
+ * qla82xx_abort_isp
+ * Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qla82xx_abort_isp(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t dev_state;
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ qla_printk(KERN_WARNING, ha,
+ "%s(%ld): Device in failed state, "
+ "Exiting.\n", __func__, vha->host_no);
+ return QLA_SUCCESS;
+ }
+
+ qla82xx_idc_lock(ha);
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state == QLA82XX_DEV_READY) {
+ qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_NEED_RESET);
+ } else
+ qla_printk(KERN_INFO, ha, "HW State: %s\n",
+ dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
+ qla82xx_idc_unlock(ha);
+
+ rval = qla82xx_device_state_handler(vha);
+
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_rst_ready(ha);
+ qla82xx_idc_unlock(ha);
+
+ if (rval == QLA_SUCCESS)
+ qla82xx_restart_isp(vha);
+
+ if (rval) {
+ vha->flags.online = 1;
+ if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ if (ha->isp_abort_cnt == 0) {
+ qla_printk(KERN_WARNING, ha,
+ "ISP error recovery failed - "
+ "board disabled\n");
+ /*
+ * The next call disables the board
+ * completely.
+ */
+ ha->isp_ops->reset_adapter(vha);
+ vha->flags.online = 0;
+ clear_bit(ISP_ABORT_RETRY,
+ &vha->dpc_flags);
+ rval = QLA_SUCCESS;
+ } else { /* schedule another ISP abort */
+ ha->isp_abort_cnt--;
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "qla%ld: ISP abort - retry remaining %d\n",
+ vha->host_no, ha->isp_abort_cnt));
+ rval = QLA_FUNCTION_FAILED;
+ }
+ } else {
+ ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "(%ld): ISP error recovery - retrying (%d) "
+ "more times\n", vha->host_no, ha->isp_abort_cnt));
+ set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+ return rval;
+}
+
+/*
+ * qla82xx_fcoe_ctx_reset
+ * Perform a quick reset and aborts all outstanding commands.
+ * This will only perform an FCoE context reset and avoids a full blown
+ * chip reset.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * is_reset_path = flag for identifying the reset path.
+ *
+ * Returns:
+ * 0 = success
+ */
+int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
+{
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (vha->flags.online) {
+ /* Abort all outstanding commands, so as to be requeued later */
+ qla2x00_abort_isp_cleanup(vha);
+ }
+
+ /* Stop currently executing firmware.
+ * This will destroy existing FCoE context at the F/W end.
+ */
+ qla2x00_try_to_stop_firmware(vha);
+
+ /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
+ rval = qla82xx_restart_isp(vha);
+
+ return rval;
+}
+
+/*
+ * qla2x00_wait_for_fcoe_ctx_reset
+ * Wait till the FCoE context is reset.
+ *
+ * Note:
+ * Does context switching here.
+ * Release SPIN_LOCK (if any) before calling this routine.
+ *
+ * Return:
+ * Success (fcoe_ctx reset is done) : 0
+ * Failed (fcoe_ctx reset not completed within max loop timout ) : 1
+ */
+int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
+{
+ int status = QLA_FUNCTION_FAILED;
+ unsigned long wait_reset;
+
+ wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+ while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ && time_before(jiffies, wait_reset)) {
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ status = QLA_SUCCESS;
+ break;
+ }
+ }
+ DEBUG2(printk(KERN_INFO
+ "%s status=%d\n", __func__, status));
+
+ return status;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
new file mode 100644
index 0000000..f8f99a5
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -0,0 +1,889 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NX_H
+#define __QLA_NX_H
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+*/
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+/*CRB_RELATED*/
+#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200)
+#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
+
+#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
+#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
+#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
+#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+
+#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
+#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
+#define QLA82XX_HW_H2_CH_HUB_ADR 0x03
+#define QLA82XX_HW_H3_CH_HUB_ADR 0x01
+#define QLA82XX_HW_H4_CH_HUB_ADR 0x06
+#define QLA82XX_HW_H5_CH_HUB_ADR 0x07
+#define QLA82XX_HW_H6_CH_HUB_ADR 0x08
+
+/* Hub 0 */
+#define QLA82XX_HW_MN_CRB_AGT_ADR 0x15
+#define QLA82XX_HW_MS_CRB_AGT_ADR 0x25
+
+/* Hub 1 */
+#define QLA82XX_HW_PS_CRB_AGT_ADR 0x73
+#define QLA82XX_HW_QMS_CRB_AGT_ADR 0x00
+#define QLA82XX_HW_RPMX3_CRB_AGT_ADR 0x0b
+#define QLA82XX_HW_SQGS0_CRB_AGT_ADR 0x01
+#define QLA82XX_HW_SQGS1_CRB_AGT_ADR 0x02
+#define QLA82XX_HW_SQGS2_CRB_AGT_ADR 0x03
+#define QLA82XX_HW_SQGS3_CRB_AGT_ADR 0x04
+#define QLA82XX_HW_C2C0_CRB_AGT_ADR 0x58
+#define QLA82XX_HW_C2C1_CRB_AGT_ADR 0x59
+#define QLA82XX_HW_C2C2_CRB_AGT_ADR 0x5a
+#define QLA82XX_HW_RPMX2_CRB_AGT_ADR 0x0a
+#define QLA82XX_HW_RPMX4_CRB_AGT_ADR 0x0c
+#define QLA82XX_HW_RPMX7_CRB_AGT_ADR 0x0f
+#define QLA82XX_HW_RPMX9_CRB_AGT_ADR 0x12
+#define QLA82XX_HW_SMB_CRB_AGT_ADR 0x18
+
+/* Hub 2 */
+#define QLA82XX_HW_NIU_CRB_AGT_ADR 0x31
+#define QLA82XX_HW_I2C0_CRB_AGT_ADR 0x19
+#define QLA82XX_HW_I2C1_CRB_AGT_ADR 0x29
+
+#define QLA82XX_HW_SN_CRB_AGT_ADR 0x10
+#define QLA82XX_HW_I2Q_CRB_AGT_ADR 0x20
+#define QLA82XX_HW_LPC_CRB_AGT_ADR 0x22
+#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR 0x21
+#define QLA82XX_HW_QM_CRB_AGT_ADR 0x66
+#define QLA82XX_HW_SQG0_CRB_AGT_ADR 0x60
+#define QLA82XX_HW_SQG1_CRB_AGT_ADR 0x61
+#define QLA82XX_HW_SQG2_CRB_AGT_ADR 0x62
+#define QLA82XX_HW_SQG3_CRB_AGT_ADR 0x63
+#define QLA82XX_HW_RPMX1_CRB_AGT_ADR 0x09
+#define QLA82XX_HW_RPMX5_CRB_AGT_ADR 0x0d
+#define QLA82XX_HW_RPMX6_CRB_AGT_ADR 0x0e
+#define QLA82XX_HW_RPMX8_CRB_AGT_ADR 0x11
+
+/* Hub 3 */
+#define QLA82XX_HW_PH_CRB_AGT_ADR 0x1A
+#define QLA82XX_HW_SRE_CRB_AGT_ADR 0x50
+#define QLA82XX_HW_EG_CRB_AGT_ADR 0x51
+#define QLA82XX_HW_RPMX0_CRB_AGT_ADR 0x08
+
+/* Hub 4 */
+#define QLA82XX_HW_PEGN0_CRB_AGT_ADR 0x40
+#define QLA82XX_HW_PEGN1_CRB_AGT_ADR 0x41
+#define QLA82XX_HW_PEGN2_CRB_AGT_ADR 0x42
+#define QLA82XX_HW_PEGN3_CRB_AGT_ADR 0x43
+#define QLA82XX_HW_PEGNI_CRB_AGT_ADR 0x44
+#define QLA82XX_HW_PEGND_CRB_AGT_ADR 0x45
+#define QLA82XX_HW_PEGNC_CRB_AGT_ADR 0x46
+#define QLA82XX_HW_PEGR0_CRB_AGT_ADR 0x47
+#define QLA82XX_HW_PEGR1_CRB_AGT_ADR 0x48
+#define QLA82XX_HW_PEGR2_CRB_AGT_ADR 0x49
+#define QLA82XX_HW_PEGR3_CRB_AGT_ADR 0x4a
+#define QLA82XX_HW_PEGN4_CRB_AGT_ADR 0x4b
+
+/* Hub 5 */
+#define QLA82XX_HW_PEGS0_CRB_AGT_ADR 0x40
+#define QLA82XX_HW_PEGS1_CRB_AGT_ADR 0x41
+#define QLA82XX_HW_PEGS2_CRB_AGT_ADR 0x42
+#define QLA82XX_HW_PEGS3_CRB_AGT_ADR 0x43
+#define QLA82XX_HW_PEGSI_CRB_AGT_ADR 0x44
+#define QLA82XX_HW_PEGSD_CRB_AGT_ADR 0x45
+#define QLA82XX_HW_PEGSC_CRB_AGT_ADR 0x46
+
+/* Hub 6 */
+#define QLA82XX_HW_CAS0_CRB_AGT_ADR 0x46
+#define QLA82XX_HW_CAS1_CRB_AGT_ADR 0x47
+#define QLA82XX_HW_CAS2_CRB_AGT_ADR 0x48
+#define QLA82XX_HW_CAS3_CRB_AGT_ADR 0x49
+#define QLA82XX_HW_NCM_CRB_AGT_ADR 0x16
+#define QLA82XX_HW_TMR_CRB_AGT_ADR 0x17
+#define QLA82XX_HW_XDMA_CRB_AGT_ADR 0x05
+#define QLA82XX_HW_OCM0_CRB_AGT_ADR 0x06
+#define QLA82XX_HW_OCM1_CRB_AGT_ADR 0x07
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+/* */
+#define QLA82XX_HW_PX_MAP_CRB_PH 0
+#define QLA82XX_HW_PX_MAP_CRB_PS 1
+#define QLA82XX_HW_PX_MAP_CRB_MN 2
+#define QLA82XX_HW_PX_MAP_CRB_MS 3
+#define QLA82XX_HW_PX_MAP_CRB_SRE 5
+#define QLA82XX_HW_PX_MAP_CRB_NIU 6
+#define QLA82XX_HW_PX_MAP_CRB_QMN 7
+#define QLA82XX_HW_PX_MAP_CRB_SQN0 8
+#define QLA82XX_HW_PX_MAP_CRB_SQN1 9
+#define QLA82XX_HW_PX_MAP_CRB_SQN2 10
+#define QLA82XX_HW_PX_MAP_CRB_SQN3 11
+#define QLA82XX_HW_PX_MAP_CRB_QMS 12
+#define QLA82XX_HW_PX_MAP_CRB_SQS0 13
+#define QLA82XX_HW_PX_MAP_CRB_SQS1 14
+#define QLA82XX_HW_PX_MAP_CRB_SQS2 15
+#define QLA82XX_HW_PX_MAP_CRB_SQS3 16
+#define QLA82XX_HW_PX_MAP_CRB_PGN0 17
+#define QLA82XX_HW_PX_MAP_CRB_PGN1 18
+#define QLA82XX_HW_PX_MAP_CRB_PGN2 19
+#define QLA82XX_HW_PX_MAP_CRB_PGN3 20
+#define QLA82XX_HW_PX_MAP_CRB_PGN4 QLA82XX_HW_PX_MAP_CRB_SQS2
+#define QLA82XX_HW_PX_MAP_CRB_PGND 21
+#define QLA82XX_HW_PX_MAP_CRB_PGNI 22
+#define QLA82XX_HW_PX_MAP_CRB_PGS0 23
+#define QLA82XX_HW_PX_MAP_CRB_PGS1 24
+#define QLA82XX_HW_PX_MAP_CRB_PGS2 25
+#define QLA82XX_HW_PX_MAP_CRB_PGS3 26
+#define QLA82XX_HW_PX_MAP_CRB_PGSD 27
+#define QLA82XX_HW_PX_MAP_CRB_PGSI 28
+#define QLA82XX_HW_PX_MAP_CRB_SN 29
+#define QLA82XX_HW_PX_MAP_CRB_EG 31
+#define QLA82XX_HW_PX_MAP_CRB_PH2 32
+#define QLA82XX_HW_PX_MAP_CRB_PS2 33
+#define QLA82XX_HW_PX_MAP_CRB_CAM 34
+#define QLA82XX_HW_PX_MAP_CRB_CAS0 35
+#define QLA82XX_HW_PX_MAP_CRB_CAS1 36
+#define QLA82XX_HW_PX_MAP_CRB_CAS2 37
+#define QLA82XX_HW_PX_MAP_CRB_C2C0 38
+#define QLA82XX_HW_PX_MAP_CRB_C2C1 39
+#define QLA82XX_HW_PX_MAP_CRB_TIMR 40
+#define QLA82XX_HW_PX_MAP_CRB_RPMX1 42
+#define QLA82XX_HW_PX_MAP_CRB_RPMX2 43
+#define QLA82XX_HW_PX_MAP_CRB_RPMX3 44
+#define QLA82XX_HW_PX_MAP_CRB_RPMX4 45
+#define QLA82XX_HW_PX_MAP_CRB_RPMX5 46
+#define QLA82XX_HW_PX_MAP_CRB_RPMX6 47
+#define QLA82XX_HW_PX_MAP_CRB_RPMX7 48
+#define QLA82XX_HW_PX_MAP_CRB_XDMA 49
+#define QLA82XX_HW_PX_MAP_CRB_I2Q 50
+#define QLA82XX_HW_PX_MAP_CRB_ROMUSB 51
+#define QLA82XX_HW_PX_MAP_CRB_CAS3 52
+#define QLA82XX_HW_PX_MAP_CRB_RPMX0 53
+#define QLA82XX_HW_PX_MAP_CRB_RPMX8 54
+#define QLA82XX_HW_PX_MAP_CRB_RPMX9 55
+#define QLA82XX_HW_PX_MAP_CRB_OCM0 56
+#define QLA82XX_HW_PX_MAP_CRB_OCM1 57
+#define QLA82XX_HW_PX_MAP_CRB_SMB 58
+#define QLA82XX_HW_PX_MAP_CRB_I2C0 59
+#define QLA82XX_HW_PX_MAP_CRB_I2C1 60
+#define QLA82XX_HW_PX_MAP_CRB_LPC 61
+#define QLA82XX_HW_PX_MAP_CRB_PGNC 62
+#define QLA82XX_HW_PX_MAP_CRB_PGR0 63
+#define QLA82XX_HW_PX_MAP_CRB_PGR1 4
+#define QLA82XX_HW_PX_MAP_CRB_PGR2 30
+#define QLA82XX_HW_PX_MAP_CRB_PGR3 41
+
+/* This field defines CRB adr [31:20] of the agents */
+/* */
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_MN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PH_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_MS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_QMS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_C2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_C2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX7_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX9_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SMB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_NIU_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SRE_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_EG_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_QM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX5_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX6_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX8_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGNI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGND_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGNC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSD_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_NCM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_TMR_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_XDMA_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2Q_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_ROMUSB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_OCM0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_OCM1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_LPC_CRB_AGT_ADR)
+
+#define ROMUSB_GLB (QLA82XX_CRB_ROMUSB + 0x00000)
+#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLA82XX_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLA82XX_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLA82XX_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLA82XX_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLA82XX_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLA82XX_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+#define ROMUSB_ROM (QLA82XX_CRB_ROMUSB + 0x10000)
+#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */
+#define QLA82XX_PCI_CRB_WINDOW(A) \
+ (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
+#define QLA82XX_CRB_C2C_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0)
+#define QLA82XX_CRB_C2C_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1)
+#define QLA82XX_CRB_C2C_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2)
+#define QLA82XX_CRB_CAM \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM)
+#define QLA82XX_CRB_CASPER \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS)
+#define QLA82XX_CRB_CASPER_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0)
+#define QLA82XX_CRB_CASPER_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1)
+#define QLA82XX_CRB_CASPER_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2)
+#define QLA82XX_CRB_DDR_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS)
+#define QLA82XX_CRB_DDR_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN)
+#define QLA82XX_CRB_EPG \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG)
+#define QLA82XX_CRB_I2Q \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q)
+#define QLA82XX_CRB_NIU \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU)
+
+#define QLA82XX_CRB_PCIX_HOST \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH)
+#define QLA82XX_CRB_PCIX_HOST2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2)
+#define QLA82XX_CRB_PCIX_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS)
+#define QLA82XX_CRB_PCIE \
+ QLA82XX_CRB_PCIX_MD
+
+/* window 1 pcie slot */
+#define QLA82XX_CRB_PCIE2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2)
+#define QLA82XX_CRB_PEG_MD_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0)
+#define QLA82XX_CRB_PEG_MD_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1)
+#define QLA82XX_CRB_PEG_MD_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2)
+#define QLA82XX_CRB_PEG_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_D \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD)
+#define QLA82XX_CRB_PEG_MD_I \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI)
+#define QLA82XX_CRB_PEG_NET_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0)
+#define QLA82XX_CRB_PEG_NET_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1)
+#define QLA82XX_CRB_PEG_NET_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2)
+#define QLA82XX_CRB_PEG_NET_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3)
+#define QLA82XX_CRB_PEG_NET_4 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4)
+#define QLA82XX_CRB_PEG_NET_D \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND)
+#define QLA82XX_CRB_PEG_NET_I \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI)
+#define QLA82XX_CRB_PQM_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS)
+#define QLA82XX_CRB_PQM_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN)
+#define QLA82XX_CRB_QDR_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS)
+#define QLA82XX_CRB_QDR_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN)
+#define QLA82XX_CRB_ROMUSB \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB)
+#define QLA82XX_CRB_RPMX_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0)
+#define QLA82XX_CRB_RPMX_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1)
+#define QLA82XX_CRB_RPMX_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2)
+#define QLA82XX_CRB_RPMX_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3)
+#define QLA82XX_CRB_RPMX_4 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4)
+#define QLA82XX_CRB_RPMX_5 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5)
+#define QLA82XX_CRB_RPMX_6 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6)
+#define QLA82XX_CRB_RPMX_7 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7)
+#define QLA82XX_CRB_SQM_MD_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0)
+#define QLA82XX_CRB_SQM_MD_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1)
+#define QLA82XX_CRB_SQM_MD_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2)
+#define QLA82XX_CRB_SQM_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3)
+#define QLA82XX_CRB_SQM_NET_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0)
+#define QLA82XX_CRB_SQM_NET_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1)
+#define QLA82XX_CRB_SQM_NET_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2)
+#define QLA82XX_CRB_SQM_NET_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3)
+#define QLA82XX_CRB_SRE \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE)
+#define QLA82XX_CRB_TIMER \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR)
+#define QLA82XX_CRB_XDMA \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA)
+#define QLA82XX_CRB_I2C0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0)
+#define QLA82XX_CRB_I2C1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1)
+#define QLA82XX_CRB_OCM0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0)
+#define QLA82XX_CRB_SMB \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB)
+#define QLA82XX_CRB_MAX \
+ QLA82XX_PCI_CRB_WINDOW(64)
+
+/*
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ * Base addresses of major components on-chip.
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ */
+#define QLA82XX_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLA82XX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL)
+#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL)
+#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
+#define QLA82XX_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLA82XX_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
+
+#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
+#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
+
+#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
+#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
+#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000
+#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff
+#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000
+#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
+#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_CONTROL (0x000)
+#define MIU_TAG (0x004)
+#define MIU_TEST_AGT_CTRL (0x090)
+#define MIU_TEST_AGT_ADDR_LO (0x094)
+#define MIU_TEST_AGT_ADDR_HI (0x098)
+#define MIU_TEST_AGT_WRDATA_LO (0x0a0)
+#define MIU_TEST_AGT_WRDATA_HI (0x0a4)
+#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i)))
+#define MIU_TEST_AGT_RDDATA_LO (0x0a8)
+#define MIU_TEST_AGT_RDDATA_HI (0x0ac)
+#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i)))
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_START 1
+#define MIU_TA_CTL_ENABLE 2
+#define MIU_TA_CTL_WRITE 4
+#define MIU_TA_CTL_BUSY 8
+
+/*CAM RAM */
+# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
+# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
+
+#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000
+#define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff
+#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
+#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
+#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
+#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0))
+
+#define QLA82XX_CAMRAM_DB1 (QLA82XX_CAM_RAM(0x1b8))
+#define QLA82XX_CAMRAM_DB2 (QLA82XX_CAM_RAM(0x1bc))
+
+#define HALT_STATUS_UNRECOVERABLE 0x80000000
+#define HALT_STATUS_RECOVERABLE 0x40000000
+
+/* Driver Coexistence Defines */
+#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
+#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
+#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
+#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
+#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
+#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
+#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
+
+/* Every driver should use these Device State */
+#define QLA82XX_DEV_COLD 1
+#define QLA82XX_DEV_INITIALIZING 2
+#define QLA82XX_DEV_READY 3
+#define QLA82XX_DEV_NEED_RESET 4
+#define QLA82XX_DEV_NEED_QUIESCENT 5
+#define QLA82XX_DEV_FAILED 6
+#define QLA82XX_DEV_QUIESCENT 7
+#define MAX_STATES 8 /* Increment if new state added */
+
+#define QLA82XX_IDC_VERSION 1
+#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30
+#define QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT 10
+
+#define QLA82XX_ROM_LOCK_ID (QLA82XX_CAM_RAM(0x100))
+#define QLA82XX_CRB_WIN_LOCK_ID (QLA82XX_CAM_RAM(0x124))
+#define QLA82XX_FW_VERSION_MAJOR (QLA82XX_CAM_RAM(0x150))
+#define QLA82XX_FW_VERSION_MINOR (QLA82XX_CAM_RAM(0x154))
+#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
+#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
+
+#define PCIE_CHICKEN3 (0x120c8)
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+
+#define QLA82XX_PCIX_PS_REG(reg) (QLA82XX_CRB_PCIX_MD + (reg))
+#define QLA82XX_PCIX_PS2_REG(reg) (QLA82XX_CRB_PCIE2 + (reg))
+
+#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
+#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
+#define PCIE_SEM5_LOCK (0x1c028) /* Coexistence lock */
+#define PCIE_SEM5_UNLOCK (0x1c02c) /* Coexistence unlock */
+#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
+#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
+
+/* Different drive state */
+#define QLA82XX_DRVST_NOT_RDY 0
+#define QLA82XX_DRVST_RST_RDY 1
+#define QLA82XX_DRVST_QSNT_RDY 2
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021
+
+#define QLA82XX_MSIX_TBL_SPACE 8192
+#define QLA82XX_PCI_REG_MSIX_TBL 0x44
+#define QLA82XX_PCI_MSIX_CONTROL 0x40
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map {
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+
+struct crb_addr_pair {
+ long addr;
+ long data;
+};
+
+#define ADDR_ERROR ((unsigned long) 0xffffffff)
+#define MAX_CTL_CHECK 1000
+
+/***************************************************************************
+ * PCI related defines.
+ **************************************************************************/
+
+/*
+ * Interrupt related defines.
+ */
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+/*
+ * Message Signaled Interrupts
+ */
+#define PCIX_MSI_F0 (0x13000)
+#define PCIX_MSI_F1 (0x13004)
+#define PCIX_MSI_F2 (0x13008)
+#define PCIX_MSI_F3 (0x1300c)
+#define PCIX_MSI_F4 (0x13010)
+#define PCIX_MSI_F5 (0x13014)
+#define PCIX_MSI_F6 (0x13018)
+#define PCIX_MSI_F7 (0x1301c)
+#define PCIX_MSI_F(FUNC) (0x13000 + ((FUNC) * 4))
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+/*
+ * Interrupt state machine and other bits.
+ */
+#define PCIE_MISCCFG_RC (0x1206c)
+
+#define ISR_INT_TARGET_STATUS \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_STATUS_F1 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_STATUS_F2 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_STATUS_F3 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_STATUS_F4 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_STATUS_F5 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_STATUS_F6 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_STATUS_F7 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+
+#define ISR_INT_TARGET_MASK \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_MASK_F1 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_MASK_F2 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_MASK_F3 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_MASK_F4 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_MASK_F5 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_MASK_F6 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_MASK_F7 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define ISR_INT_VECTOR \
+ (QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK \
+ (QLA82XX_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_STATE_REG \
+ (QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC))
+
+#define ISR_MSI_INT_TRIGGER(FUNC) \
+ (QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+
+#define ISR_IS_LEGACY_INTR_IDLE(VAL) (((VAL) & 0x300) == 0)
+#define ISR_IS_LEGACY_INTR_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qla82xx_legacy_intr_set {
+ uint32_t int_vec_bit;
+ uint32_t tgt_status_reg;
+ uint32_t tgt_mask_reg;
+ uint32_t pci_int_reg;
+};
+
+#define QLA82XX_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+#define BOOTLD_START 0x10000
+#define IMAGE_START 0x100000
+#define FLASH_ADDR_START 0x43000
+
+/* Magic number to let user know flash is programmed */
+#define QLA82XX_BDINFO_MAGIC 0x12345678
+#define FW_SIZE_OFFSET (0x3e840c)
+
+#define QLA82XX_IS_REVISION_P3PLUS(_rev_) ((_rev_) >= 0x50)
+#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+/* Request and response queue size */
+#define REQUEST_ENTRY_CNT_82XX 128 /* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_82XX 128 /* Number of response entries.*/
+
+/*
+ * ISP 8021 I/O Register Set structure definitions.
+ */
+struct device_reg_82xx {
+ uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
+ uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */
+ uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */
+
+ uint16_t mailbox_in[32]; /* Mail box In registers */
+ uint16_t unused_1[32];
+ uint32_t hint; /* Host interrupt register */
+#define HINT_MBX_INT_PENDING BIT_0
+ uint16_t unused_2[62];
+ uint16_t mailbox_out[32]; /* Mail box Out registers */
+ uint32_t unused_3[48];
+
+ uint32_t host_status; /* host status */
+#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
+#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
+ uint32_t host_int; /* Interrupt status. */
+#define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */
+};
+
+struct fcp_cmnd {
+ struct scsi_lun lun;
+ uint8_t crn;
+ uint8_t task_attribute;
+ uint8_t task_managment;
+ uint8_t additional_cdb_len;
+ uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
+};
+
+struct dsd_dma {
+ struct list_head list;
+ dma_addr_t dsd_list_dma;
+ void *dsd_addr;
+};
+
+#define QLA_DSDS_PER_IOCB 37
+#define QLA_DSD_SIZE 12
+struct ct6_dsd {
+ uint16_t fcp_cmnd_len;
+ dma_addr_t fcp_cmnd_dma;
+ struct fcp_cmnd *fcp_cmnd;
+ int dsd_use_cnt;
+ struct list_head dsd_list;
+};
+
+#define MBC_TOGGLE_INTR 0x10
+
+/* Flash offset */
+#define FLT_REG_BOOTLOAD_82XX 0x72
+#define FLT_REG_BOOT_CODE_82XX 0x78
+#define FLT_REG_FW_82XX 0x74
+#define FLT_REG_GOLD_FW_82XX 0x75
+#define FLT_REG_VPD_82XX 0x81
+
+#define FA_VPD_SIZE_82XX 0x400
+
+#define FA_FLASH_LAYOUT_ADDR_82 0xFC400
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+* Instructions
+*/
+#define M25P_INSTR_WREN 0x06
+#define M25P_INSTR_WRDI 0x04
+#define M25P_INSTR_RDID 0x9f
+#define M25P_INSTR_RDSR 0x05
+#define M25P_INSTR_WRSR 0x01
+#define M25P_INSTR_READ 0x03
+#define M25P_INSTR_FAST_READ 0x0b
+#define M25P_INSTR_PP 0x02
+#define M25P_INSTR_SE 0xd8
+#define M25P_INSTR_BE 0xc7
+#define M25P_INSTR_DP 0xb9
+#define M25P_INSTR_RES 0xab
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 48c37e3..be1a8fc 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -24,11 +24,18 @@
*/
char qla2x00_version_str[40];
+static int apidev_major;
+
/*
* SRB allocation cache
*/
static struct kmem_cache *srb_cachep;
+/*
+ * CT6 CTX allocation cache
+ */
+static struct kmem_cache *ctx_cachep;
+
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -65,13 +72,19 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"Option to enable extended error logging, "
"Default is 0 - no logging. 1 - log errors.");
+int ql2xshiftctondsd = 6;
+module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xshiftctondsd,
+ "Set to control shifting of command type processing "
+ "based on total number of SG elements.");
+
static void qla2x00_free_device(scsi_qla_host_t *);
int ql2xfdmienable=1;
module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfdmienable,
- "Enables FDMI registratons "
- "Default is 0 - no FDMI. 1 - perfom FDMI.");
+ "Enables FDMI registrations. "
+ "0 - no FDMI. Default is 1 - perform FDMI.");
#define MAX_Q_DEPTH 32
static int ql2xmaxqdepth = MAX_Q_DEPTH;
@@ -79,6 +92,19 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to report for target devices.");
+/* Do not change the value of this after module load */
+int ql2xenabledif = 1;
+module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xenabledif,
+ " Enable T10-CRC-DIF "
+ " Default is 0 - No DIF Support. 1 - Enable it");
+
+int ql2xenablehba_err_chk;
+module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xenablehba_err_chk,
+ " Enable T10-CRC-DIF Error isolation by HBA"
+ " Default is 0 - Error isolation disabled, 1 - Enable it");
+
int ql2xiidmaenable=1;
module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xiidmaenable,
@@ -114,6 +140,32 @@ MODULE_PARM_DESC(ql2xetsenable,
"Enables firmware ETS burst."
"Default is 0 - skip ETS enablement.");
+int ql2xdbwr;
+module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xdbwr,
+ "Option to specify scheme for request queue posting\n"
+ " 0 -- Regular doorbell.\n"
+ " 1 -- CAMRAM doorbell (faster).\n");
+
+int ql2xdontresethba;
+module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xdontresethba,
+ "Option to specify reset behaviour\n"
+ " 0 (Default) -- Reset on failure.\n"
+ " 1 -- Do not reset on failure.\n");
+
+int ql2xtargetreset = 1;
+module_param(ql2xtargetreset, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xtargetreset,
+ "Enable target reset."
+ "Default is 1 - use hw defaults.");
+
+
+int ql2xasynctmfenable;
+module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xasynctmfenable,
+ "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
+ "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
/*
* SCSI host template entry points
*/
@@ -183,6 +235,10 @@ qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
static inline void
qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
{
+ /* Currently used for 82XX only. */
+ if (vha->device_flags & DFLG_DEV_FAILED)
+ return;
+
mod_timer(&vha->timer, jiffies + interval * HZ);
}
@@ -500,6 +556,14 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
if (fcport->drport)
goto qc24_target_busy;
+ if (!vha->flags.difdix_supported &&
+ scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ DEBUG2(qla_printk(KERN_ERR, ha,
+ "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
+ cmd->cmnd[0]));
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
@@ -618,6 +682,50 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
return (return_status);
}
+/*
+ * qla2x00_wait_for_reset_ready
+ * Wait till the HBA is online after going through
+ * <= MAX_RETRIES_OF_ISP_ABORT or
+ * finally HBA is disabled ie marked offline or flash
+ * operations are in progress.
+ *
+ * Input:
+ * ha - pointer to host adapter structure
+ *
+ * Note:
+ * Does context switching-Release SPIN_LOCK
+ * (if any) before calling this routine.
+ *
+ * Return:
+ * Success (Adapter is online/no flash ops) : 0
+ * Failed (Adapter is offline/disabled/flash ops in progress) : 1
+ */
+int
+qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
+{
+ int return_status;
+ unsigned long wait_online;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+ while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
+ test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ ha->optrom_state != QLA_SWAITING ||
+ ha->dpc_active) && time_before(jiffies, wait_online))
+ msleep(1000);
+
+ if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING)
+ return_status = QLA_SUCCESS;
+ else
+ return_status = QLA_FUNCTION_FAILED;
+
+ DEBUG2(printk("%s return_status=%d\n", __func__, return_status));
+
+ return return_status;
+}
+
int
qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
{
@@ -739,7 +847,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (sp == NULL)
continue;
- if (sp->ctx)
+ if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
+ !IS_PROT_IO(sp))
continue;
if (sp->cmd != cmd)
continue;
@@ -805,7 +914,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
- if (sp->ctx)
+ if ((sp->ctx) && !IS_PROT_IO(sp))
continue;
if (vha->vp_idx != sp->fcport->vha->vp_idx)
continue;
@@ -834,6 +943,24 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
return status;
}
+void qla82xx_wait_for_pending_commands(scsi_qla_host_t *vha)
+{
+ int cnt;
+ srb_t *sp;
+ struct req_que *req = vha->req;
+
+ DEBUG2(qla_printk(KERN_INFO, vha->hw,
+ "Waiting for pending commands\n"));
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ sp, WAIT_HOST) == QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_INFO, vha->hw,
+ "Done wait for pending commands\n"));
+ }
+ }
+}
+
static char *reset_errors[] = {
"HBA not online",
"HBA not ready",
@@ -1004,7 +1131,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
- if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
/*
@@ -1020,11 +1147,19 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
} else {
+ if (IS_QLA82XX(vha->hw)) {
+ if (!qla82xx_fcoe_ctx_reset(vha)) {
+ /* Ctx reset success */
+ ret = SUCCESS;
+ goto eh_host_reset_lock;
+ }
+ /* fall thru if ctx reset failed */
+ }
if (ha->wq)
flush_workqueue(ha->wq);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
/* failed. schedule dpc to try */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
@@ -1064,7 +1199,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
- if (ha->flags.enable_target_reset) {
+ if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->port_type != FCT_TARGET)
continue;
@@ -1078,7 +1213,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
}
- if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
+ if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
DEBUG2_3(printk("%s(%ld): failed: "
@@ -1125,23 +1260,28 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
- if (!sp->ctx) {
+ if (!sp->ctx ||
+ (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
+ IS_PROT_IO(sp)) {
sp->cmd->result = res;
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
- if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
- del_timer_sync(&ctx->timer);
- ctx->free(sp);
+ if (ctx->type == SRB_LOGIN_CMD ||
+ ctx->type == SRB_LOGOUT_CMD) {
+ ctx->u.iocb_cmd->free(sp);
} else {
- struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
- if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
+ struct fc_bsg_job *bsg_job =
+ ctx->u.bsg_job;
+ if (bsg_job->request->msgcode
+ == FC_BSG_HST_CT)
kfree(sp->fcport);
- sp_bsg->bsg_job->req->errors = 0;
- sp_bsg->bsg_job->reply->result = res;
- sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
+ bsg_job->req->errors = 0;
+ bsg_job->reply->result = res;
+ bsg_job->job_done(bsg_job);
kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
+ mempool_free(sp,
+ ha->srb_mempool);
}
}
}
@@ -1379,6 +1519,7 @@ static struct isp_operations qla2100_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla2300_isp_ops = {
@@ -1414,6 +1555,7 @@ static struct isp_operations qla2300_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla24xx_isp_ops = {
@@ -1449,6 +1591,7 @@ static struct isp_operations qla24xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla25xx_isp_ops = {
@@ -1483,7 +1626,8 @@ static struct isp_operations qla25xx_isp_ops = {
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
- .start_scsi = qla24xx_start_scsi,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla81xx_isp_ops = {
@@ -1519,6 +1663,43 @@ static struct isp_operations qla81xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+};
+
+static struct isp_operations qla82xx_isp_ops = {
+ .pci_config = qla82xx_pci_config,
+ .reset_chip = qla82xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla82xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla82xx_load_risc,
+ .pci_info_str = qla82xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla82xx_intr_handler,
+ .enable_intrs = qla82xx_enable_intrs,
+ .disable_intrs = qla82xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla24xx_read_nvram_data,
+ .write_nvram = qla24xx_write_nvram_data,
+ .fw_dump = qla24xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla24xx_beacon_blink,
+ .read_optrom = qla82xx_read_optrom_data,
+ .write_optrom = qla82xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla82xx_start_scsi,
+ .abort_isp = qla82xx_abort_isp,
};
static inline void
@@ -1607,10 +1788,22 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8021:
+ ha->device_type |= DT_ISP8021;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ /* Initialize 82XX ISP flags */
+ qla82xx_init_flags(ha);
+ break;
}
- /* Get adapter physical port no from interrupt pin register. */
- pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+ if (IS_QLA82XX(ha))
+ ha->port_no = !(ha->portnum & 1);
+ else
+ /* Get adapter physical port no from interrupt pin register. */
+ pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+
if (ha->port_no & 1)
ha->flags.port0 = 1;
else
@@ -1624,6 +1817,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
uint16_t msix;
int cpus;
+ if (IS_QLA82XX(ha))
+ return qla82xx_iospace_config(ha);
+
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
qla_printk(KERN_WARNING, ha,
@@ -1767,7 +1963,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
}
@@ -1897,6 +2094,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
ha->nvram_conf_off = ~0;
ha->nvram_data_off = ~0;
+ } else if (IS_QLA82XX(ha)) {
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_82XX;
+ rsp_length = RESPONSE_ENTRY_CNT_82XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_82XX;
+ ha->isp_ops = &qla82xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+ ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+ ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
}
mutex_init(&ha->vport_lock);
@@ -1969,6 +2179,7 @@ que_init:
" pointers\n");
goto probe_init_failed;
}
+
ha->rsp_q_map[0] = rsp;
ha->req_q_map[0] = req;
rsp->req = req;
@@ -1987,6 +2198,12 @@ que_init:
rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
}
+ if (IS_QLA82XX(ha)) {
+ req->req_q_out = &ha->iobase->isp82.req_q_out[0];
+ rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
+ rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
+ }
+
if (qla2x00_initialize_adapter(base_vha)) {
qla_printk(KERN_WARNING, ha,
"Failed to initialize adapter\n");
@@ -1995,6 +2212,14 @@ que_init:
"Adapter flags %x.\n",
base_vha->host_no, base_vha->device_flags));
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ }
+
ret = -ENODEV;
goto probe_failed;
}
@@ -2033,6 +2258,24 @@ skip_dpc:
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
base_vha->host_no, ha));
+ if (IS_QLA25XX(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4) {
+ base_vha->flags.difdix_supported = 1;
+ DEBUG18(qla_printk(KERN_INFO, ha,
+ "Registering for DIF/DIX type 1 and 3"
+ " protection.\n"));
+ scsi_host_set_prot(host,
+ SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION);
+ scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
+ } else
+ base_vha->flags.difdix_supported = 0;
+ }
+
+ ha->isp_ops->enable_intrs(ha);
+
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
@@ -2040,8 +2283,6 @@ skip_dpc:
base_vha->flags.init_done = 1;
base_vha->flags.online = 1;
- ha->isp_ops->enable_intrs(ha);
-
scsi_scan_host(host);
qla2x00_alloc_sysfs_attr(base_vha);
@@ -2083,9 +2324,17 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
- if (ha->iobase)
- iounmap(ha->iobase);
-
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_drv_active(ha);
+ qla82xx_idc_unlock(ha);
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ if (!ql2xdbwr)
+ iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ } else {
+ if (ha->iobase)
+ iounmap(ha->iobase);
+ }
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
ha = NULL;
@@ -2152,11 +2401,17 @@ qla2x00_remove_one(struct pci_dev *pdev)
scsi_host_put(base_vha->host);
- if (ha->iobase)
- iounmap(ha->iobase);
+ if (IS_QLA82XX(ha)) {
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ if (!ql2xdbwr)
+ iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ } else {
+ if (ha->iobase)
+ iounmap(ha->iobase);
- if (ha->mqiobase)
- iounmap(ha->mqiobase);
+ if (ha->mqiobase)
+ iounmap(ha->mqiobase);
+ }
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
@@ -2205,8 +2460,10 @@ qla2x00_free_device(scsi_qla_host_t *vha)
vha->flags.online = 0;
/* turn-off interrupts on the card */
- if (ha->interrupts_on)
+ if (ha->interrupts_on) {
+ vha->flags.init_done = 0;
ha->isp_ops->disable_intrs(ha);
+ }
qla2x00_free_irqs(vha);
@@ -2351,10 +2608,25 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->srb_mempool)
goto fail_free_gid_list;
+ if (IS_QLA82XX(ha)) {
+ /* Allocate cache for CT6 Ctx. */
+ if (!ctx_cachep) {
+ ctx_cachep = kmem_cache_create("qla2xxx_ctx",
+ sizeof(struct ct6_dsd), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!ctx_cachep)
+ goto fail_free_gid_list;
+ }
+ ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
+ ctx_cachep);
+ if (!ha->ctx_mempool)
+ goto fail_free_srb_mempool;
+ }
+
/* Get memory for cached NVRAM */
ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
if (!ha->nvram)
- goto fail_free_srb_mempool;
+ goto fail_free_ctx_mempool;
snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
ha->pdev->device);
@@ -2363,6 +2635,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->s_dma_pool)
goto fail_free_nvram;
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ DSD_LIST_DMA_POOL_SIZE, 8, 0);
+ if (!ha->dl_dma_pool) {
+ qla_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - dl_dma_pool\n");
+ goto fail_s_dma_pool;
+ }
+
+ ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ FCP_CMND_DMA_POOL_SIZE, 8, 0);
+ if (!ha->fcp_cmnd_dma_pool) {
+ qla_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - fcp_cmnd_dma_pool\n");
+ goto fail_dl_dma_pool;
+ }
+ }
+
/* Allocate memory for SNS commands */
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
/* Get consistent memory allocated for SNS commands */
@@ -2429,16 +2719,28 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->npiv_info = NULL;
/* Get consistent memory allocated for EX-INIT-CB. */
- if (IS_QLA81XX(ha)) {
+ if (IS_QLA8XXX_TYPE(ha)) {
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
goto fail_ex_init_cb;
}
+ INIT_LIST_HEAD(&ha->gbl_dsd_list);
+
+ /* Get consistent memory allocated for Async Port-Database. */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->async_pd_dma);
+ if (!ha->async_pd)
+ goto fail_async_pd;
+ }
+
INIT_LIST_HEAD(&ha->vp_list);
return 1;
+fail_async_pd:
+ dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
fail_ex_init_cb:
kfree(ha->npiv_info);
fail_npiv_info:
@@ -2465,11 +2767,24 @@ fail_free_ms_iocb:
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
fail_dma_pool:
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+ ha->fcp_cmnd_dma_pool = NULL;
+ }
+fail_dl_dma_pool:
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ dma_pool_destroy(ha->dl_dma_pool);
+ ha->dl_dma_pool = NULL;
+ }
+fail_s_dma_pool:
dma_pool_destroy(ha->s_dma_pool);
ha->s_dma_pool = NULL;
fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
+fail_free_ctx_mempool:
+ mempool_destroy(ha->ctx_mempool);
+ ha->ctx_mempool = NULL;
fail_free_srb_mempool:
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
@@ -2538,7 +2853,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
if (ha->ex_init_cb)
- dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
+ dma_pool_free(ha->s_dma_pool,
+ ha->ex_init_cb, ha->ex_init_cb_dma);
+
+ if (ha->async_pd)
+ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
if (ha->s_dma_pool)
dma_pool_destroy(ha->s_dma_pool);
@@ -2547,14 +2866,39 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
ha->gid_list_dma);
+ if (IS_QLA82XX(ha)) {
+ if (!list_empty(&ha->gbl_dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr,
+ tdsd_ptr, &ha->gbl_dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool,
+ dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ }
+ }
+
+ if (ha->dl_dma_pool)
+ dma_pool_destroy(ha->dl_dma_pool);
+
+ if (ha->fcp_cmnd_dma_pool)
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+
+ if (ha->ctx_mempool)
+ mempool_destroy(ha->ctx_mempool);
+
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
- ha->init_cb, ha->init_cb_dma);
+ ha->init_cb, ha->init_cb_dma);
vfree(ha->optrom_buffer);
kfree(ha->nvram);
kfree(ha->npiv_info);
ha->srb_mempool = NULL;
+ ha->ctx_mempool = NULL;
ha->eft = NULL;
ha->eft_dma = 0;
ha->sns_cmd = NULL;
@@ -2567,8 +2911,12 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->init_cb_dma = 0;
ha->ex_init_cb = NULL;
ha->ex_init_cb_dma = 0;
+ ha->async_pd = NULL;
+ ha->async_pd_dma = 0;
ha->s_dma_pool = NULL;
+ ha->dl_dma_pool = NULL;
+ ha->fcp_cmnd_dma_pool = NULL;
ha->gid_list = NULL;
ha->gid_list_dma = 0;
@@ -2691,6 +3039,8 @@ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
+qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
+qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
int
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
@@ -2760,6 +3110,14 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla2x00_async_logout_done(vha, e->u.logio.fcport,
e->u.logio.data);
break;
+ case QLA_EVT_ASYNC_ADISC:
+ qla2x00_async_adisc(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_ASYNC_ADISC_DONE:
+ qla2x00_async_adisc_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
case QLA_EVT_UEVENT:
qla2x00_uevent_emit(vha, e->u.uevent.code);
break;
@@ -2785,9 +3143,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
* If the port is not ONLINE then try to login
* to it if we haven't run out of retries.
*/
- if (atomic_read(&fcport->state) !=
- FCS_ONLINE && fcport->login_retry) {
-
+ if (atomic_read(&fcport->state) != FCS_ONLINE &&
+ fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
fcport->login_retry--;
if (fcport->flags & FCF_FABRIC_DEVICE) {
if (fcport->flags & FCF_FCP2_DEVICE)
@@ -2798,6 +3155,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
fcport->d_id.b.al_pa);
if (IS_ALOGIO_CAPABLE(ha)) {
+ fcport->flags |= FCF_ASYNC_SENT;
data[0] = 0;
data[1] = QLA_LOGIO_LOGIN_RETRIED;
status = qla2x00_post_async_login_work(
@@ -2896,6 +3254,45 @@ qla2x00_do_dpc(void *data)
qla2x00_do_work(base_vha);
+ if (IS_QLA82XX(ha)) {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ qla_printk(KERN_INFO, ha,
+ "HW State: FAILED\n");
+ qla82xx_device_state_handler(base_vha);
+ continue;
+ }
+
+ if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
+ &base_vha->dpc_flags)) {
+
+ DEBUG(printk(KERN_INFO
+ "scsi(%ld): dpc: sched "
+ "qla82xx_fcoe_ctx_reset ha = %p\n",
+ base_vha->host_no, ha));
+ if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags))) {
+ if (qla82xx_fcoe_ctx_reset(base_vha)) {
+ /* FCoE-ctx reset failed.
+ * Escalate to chip-reset
+ */
+ set_bit(ISP_ABORT_NEEDED,
+ &base_vha->dpc_flags);
+ }
+ clear_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags);
+ }
+
+ DEBUG(printk("scsi(%ld): dpc:"
+ " qla82xx_fcoe_ctx_reset end\n",
+ base_vha->host_no));
+ }
+ }
+
if (test_and_clear_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags)) {
@@ -2905,7 +3302,7 @@ qla2x00_do_dpc(void *data)
if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
/* failed. retry later */
set_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags);
@@ -3038,11 +3435,31 @@ static void
qla2x00_sp_free_dma(srb_t *sp)
{
struct scsi_cmnd *cmd = sp->cmd;
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
if (sp->flags & SRB_DMA_VALID) {
scsi_dma_unmap(cmd);
sp->flags &= ~SRB_DMA_VALID;
}
+
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, sp->ctx,
+ ((struct crc_context *)sp->ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
CMD_SP(cmd) = NULL;
}
@@ -3053,8 +3470,18 @@ qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
qla2x00_sp_free_dma(sp);
- mempool_free(sp, ha->srb_mempool);
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx = sp->ctx;
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
+ ctx->fcp_cmnd_dma);
+ list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx->dsd_use_cnt;
+ mempool_free(sp->ctx, ha->ctx_mempool);
+ sp->ctx = NULL;
+ }
+ mempool_free(sp, ha->srb_mempool);
cmd->scsi_done(cmd);
}
@@ -3079,6 +3506,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+ if (IS_QLA82XX(ha))
+ qla82xx_watchdog(vha);
+
/* Hardware read to raise pending EEH errors during mailbox waits. */
if (!pci_channel_offline(ha->pdev))
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
@@ -3143,7 +3573,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
sp = req->outstanding_cmds[index];
if (!sp)
continue;
- if (sp->ctx)
+ if (sp->ctx && !IS_PROT_IO(sp))
continue;
sfcp = sp->fcport;
if (!(sfcp->flags & FCF_FCP2_DEVICE))
@@ -3193,6 +3623,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
start_dpc ||
test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
+ test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
qla2xxx_wake_dpc(vha);
@@ -3202,7 +3634,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Firmware interface routines. */
-#define FW_BLOBS 7
+#define FW_BLOBS 8
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
@@ -3210,6 +3642,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_ISP24XX 4
#define FW_ISP25XX 5
#define FW_ISP81XX 6
+#define FW_ISP82XX 7
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -3218,6 +3651,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_FILE_ISP24XX "ql2400_fw.bin"
#define FW_FILE_ISP25XX "ql2500_fw.bin"
#define FW_FILE_ISP81XX "ql8100_fw.bin"
+#define FW_FILE_ISP82XX "ql8200_fw.bin"
static DEFINE_MUTEX(qla_fw_lock);
@@ -3229,6 +3663,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP24XX, },
{ .name = FW_FILE_ISP25XX, },
{ .name = FW_FILE_ISP81XX, },
+ { .name = FW_FILE_ISP82XX, },
};
struct fw_blob *
@@ -3252,6 +3687,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP25XX];
} else if (IS_QLA81XX(ha)) {
blob = &qla_fw_blobs[FW_ISP81XX];
+ } else if (IS_QLA82XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP82XX];
}
mutex_lock(&qla_fw_lock);
@@ -3392,11 +3829,10 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
msleep(1000);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
+ if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- pci_cleanup_aer_uncorrect_error_status(pdev);
DEBUG17(qla_printk(KERN_WARNING, ha,
"slot_reset-return:ret=%x\n", ret));
@@ -3420,6 +3856,8 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
"from slot/link_reset");
}
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
ha->flags.eeh_busy = 0;
}
@@ -3445,6 +3883,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -3460,6 +3899,10 @@ static struct pci_driver qla2xxx_pci_driver = {
.err_handler = &qla2xxx_err_handler,
};
+static struct file_operations apidev_fops = {
+ .owner = THIS_MODULE,
+};
+
/**
* qla2x00_module_init - Module initialization.
**/
@@ -3488,6 +3931,13 @@ qla2x00_module_init(void)
kmem_cache_destroy(srb_cachep);
return -ENODEV;
}
+
+ apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
+ if (apidev_major < 0) {
+ printk(KERN_WARNING "qla2xxx: Unable to register char device "
+ "%s\n", QLA2XXX_APIDEV);
+ }
+
qla2xxx_transport_vport_template =
fc_attach_transport(&qla2xxx_transport_vport_functions);
if (!qla2xxx_transport_vport_template) {
@@ -3513,9 +3963,12 @@ qla2x00_module_init(void)
static void __exit
qla2x00_module_exit(void)
{
+ unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
pci_unregister_driver(&qla2xxx_pci_driver);
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
+ if (ctx_cachep)
+ kmem_cache_destroy(ctx_cachep);
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 8b3de4e..de92504 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -423,9 +423,6 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
/* Flash Manipulation Routines */
/*****************************************************************************/
-#define OPTROM_BURST_SIZE 0x1000
-#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
-
static inline uint32_t
flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
{
@@ -565,6 +562,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
*start = FA_FLASH_LAYOUT_ADDR;
else if (IS_QLA81XX(ha))
*start = FA_FLASH_LAYOUT_ADDR_81;
+ else if (IS_QLA82XX(ha)) {
+ *start = FA_FLASH_LAYOUT_ADDR_82;
+ goto end;
+ }
/* Begin with first PCI expansion ROM header. */
buf = (uint8_t *)req->ring;
dcode = (uint32_t *)req->ring;
@@ -648,6 +649,12 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
const uint32_t def_npiv_conf1[] =
{ FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR,
FA_NPIV_CONF1_ADDR_81 };
+ const uint32_t fcp_prio_cfg0[] =
+ { FA_FCP_PRIO0_ADDR, FA_FCP_PRIO0_ADDR_25,
+ 0 };
+ const uint32_t fcp_prio_cfg1[] =
+ { FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25,
+ 0 };
uint32_t def;
uint16_t *wptr;
uint16_t cnt, chksum;
@@ -703,10 +710,14 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
break;
case FLT_REG_VPD_0:
ha->flt_region_vpd_nvram = start;
+ if (IS_QLA82XX(ha))
+ break;
if (ha->flags.port0)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_1:
+ if (IS_QLA82XX(ha))
+ break;
if (!ha->flags.port0)
ha->flt_region_vpd = start;
break;
@@ -732,6 +743,29 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_GOLD_FW:
ha->flt_region_gold_fw = start;
break;
+ case FLT_REG_FCP_PRIO_0:
+ if (ha->flags.port0)
+ ha->flt_region_fcp_prio = start;
+ break;
+ case FLT_REG_FCP_PRIO_1:
+ if (!ha->flags.port0)
+ ha->flt_region_fcp_prio = start;
+ break;
+ case FLT_REG_BOOT_CODE_82XX:
+ ha->flt_region_boot = start;
+ break;
+ case FLT_REG_FW_82XX:
+ ha->flt_region_fw = start;
+ break;
+ case FLT_REG_GOLD_FW_82XX:
+ ha->flt_region_gold_fw = start;
+ break;
+ case FLT_REG_BOOTLOAD_82XX:
+ ha->flt_region_bootload = start;
+ break;
+ case FLT_REG_VPD_82XX:
+ ha->flt_region_vpd = start;
+ break;
}
}
goto done;
@@ -750,12 +784,14 @@ no_flash_data:
ha->flt_region_boot = def_boot[def];
ha->flt_region_vpd_nvram = def_vpd_nvram[def];
ha->flt_region_vpd = ha->flags.port0 ?
- def_vpd0[def]: def_vpd1[def];
+ def_vpd0[def] : def_vpd1[def];
ha->flt_region_nvram = ha->flags.port0 ?
- def_nvram0[def]: def_nvram1[def];
+ def_nvram0[def] : def_nvram1[def];
ha->flt_region_fdt = def_fdt[def];
ha->flt_region_npiv_conf = ha->flags.port0 ?
- def_npiv_conf0[def]: def_npiv_conf1[def];
+ def_npiv_conf0[def] : def_npiv_conf1[def];
+ ha->flt_region_fcp_prio = ha->flags.port0 ?
+ fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
done:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
"vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x "
@@ -775,7 +811,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
uint16_t *wptr;
struct qla_fdt_layout *fdt;
uint8_t man_id, flash_id;
- uint16_t mid, fid;
+ uint16_t mid = 0, fid = 0;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
@@ -816,6 +852,10 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
goto done;
no_flash_data:
loc = locations[0];
+ if (IS_QLA82XX(ha)) {
+ ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+ goto done;
+ }
qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
mid = man_id;
fid = flash_id;
@@ -853,6 +893,31 @@ done:
ha->fdt_block_size));
}
+static void
+qla2xxx_get_idc_param(scsi_qla_host_t *vha)
+{
+#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
+ uint32_t *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ if (!IS_QLA82XX(ha))
+ return;
+
+ wptr = (uint32_t *)req->ring;
+ ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
+ QLA82XX_IDC_PARAM_ADDR , 8);
+
+ if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+ ha->nx_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
+ ha->nx_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
+ } else {
+ ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
+ ha->nx_reset_timeout = le32_to_cpu(*wptr);
+ }
+ return;
+}
+
int
qla2xxx_get_flash_info(scsi_qla_host_t *vha)
{
@@ -860,7 +925,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
uint32_t flt_addr;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
return QLA_SUCCESS;
ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -869,6 +934,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
qla2xxx_get_flt_info(vha, flt_addr);
qla2xxx_get_fdt_info(vha);
+ qla2xxx_get_idc_param(vha);
return QLA_SUCCESS;
}
@@ -885,7 +951,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
struct qla_npiv_entry *entry;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
return;
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1178,6 +1244,9 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t *dwptr;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLA82XX(ha))
+ return buf;
+
/* Dword reads to flash. */
dwptr = (uint32_t *)buf;
for (i = 0; i < bytes >> 2; i++, naddr++)
@@ -1233,6 +1302,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
ret = QLA_SUCCESS;
+ if (IS_QLA82XX(ha))
+ return ret;
+
/* Enable flash write. */
WRT_REG_DWORD(&reg->ctrl_status,
RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -1344,6 +1416,9 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ if (IS_QLA82XX(ha))
+ return;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Save the Original GPIOE. */
@@ -1525,6 +1600,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ if (IS_QLA82XX(ha))
+ return QLA_SUCCESS;
+
if (ha->beacon_blink_led == 0) {
/* Enable firmware for update */
ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
@@ -1567,6 +1645,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ if (IS_QLA82XX(ha))
+ return QLA_SUCCESS;
+
ha->beacon_blink_led = 0;
ha->beacon_color_state = QLA_LED_ALL_ON;
@@ -2576,6 +2657,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
int i;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLA82XX(ha))
+ return ret;
+
if (!mbuf)
return QLA_FUNCTION_FAILED;
@@ -2722,3 +2806,50 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
return 0;
}
+
+int
+qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
+{
+ int len, max_len;
+ uint32_t fcp_prio_addr;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fcp_prio_cfg) {
+ ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
+ if (!ha->fcp_prio_cfg) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for fcp priority data "
+ "(%x).\n", FCP_PRIO_CFG_SIZE);
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+ memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
+
+ fcp_prio_addr = ha->flt_region_fcp_prio;
+
+ /* first read the fcp priority data header from flash */
+ ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
+ fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
+
+ if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0))
+ goto fail;
+
+ /* read remaining FCP CMD config data from flash */
+ fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2);
+ len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
+ max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
+
+ ha->isp_ops->read_optrom(vha, (uint8_t *)&ha->fcp_prio_cfg->entry[0],
+ fcp_prio_addr << 2, (len < max_len ? len : max_len));
+
+ /* revalidate the entire FCP priority config data, including entries */
+ if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1))
+ goto fail;
+
+ ha->flags.fcp_prio_enabled = 1;
+ return QLA_SUCCESS;
+fail:
+ vfree(ha->fcp_prio_cfg);
+ ha->fcp_prio_cfg = NULL;
+ return QLA_FUNCTION_FAILED;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 81b5f29..4288026 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -114,6 +114,7 @@
*/
#define MAC_ADDR_LEN 6 /* in bytes */
#define IP_ADDR_LEN 4 /* in bytes */
+#define IPv6_ADDR_LEN 16 /* IPv6 address size */
#define DRIVER_NAME "qla4xxx"
#define MAX_LINKED_CMDS_PER_LUN 3
@@ -147,6 +148,8 @@
#define MAX_RESET_HA_RETRIES 2
+#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+
/*
* SCSI Request Block structure (srb) that is placed
* on cmd->SCp location of every I/O [We have 22 bytes available]
@@ -169,7 +172,7 @@ struct srb {
struct scsi_cmnd *cmd; /* (4) SCSI command block */
dma_addr_t dma_handle; /* (4) for unmap of single transfers */
- atomic_t ref_count; /* reference count for this srb */
+ struct kref srb_ref; /* reference count for this srb */
uint32_t fw_ddb_index;
uint8_t err_id; /* error id */
#define SRB_ERR_PORT 1 /* Request failed because "port down" */
@@ -220,7 +223,7 @@ struct ddb_entry {
uint16_t os_target_id; /* Target ID */
uint16_t fw_ddb_index; /* DDB firmware index */
- uint8_t reserved[2];
+ uint16_t options;
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
uint32_t CmdSn;
@@ -245,10 +248,18 @@ struct ddb_entry {
uint16_t port;
uint32_t tpgt;
- uint8_t ip_addr[ISCSI_IPADDR_SIZE];
+ uint8_t ip_addr[IP_ADDR_LEN];
uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
uint8_t iscsi_alias[0x20];
uint8_t isid[6];
+ uint16_t iscsi_max_burst_len;
+ uint16_t iscsi_max_outsnd_r2t;
+ uint16_t iscsi_first_burst_len;
+ uint16_t iscsi_max_rcv_data_seg_len;
+ uint16_t iscsi_max_snd_data_seg_len;
+
+ struct in6_addr remote_ipv6_addr;
+ struct in6_addr link_local_ipv6_addr;
};
/*
@@ -301,6 +312,7 @@ struct scsi_qla_host {
#define DPC_ISNS_RESTART 7 /* 0x00000080 */
#define DPC_AEN 9 /* 0x00000200 */
#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
+#define DPC_LINK_CHANGED 18 /* 0x00040000 */
struct Scsi_Host *host; /* pointer to host data */
uint32_t tot_ddbs;
@@ -320,8 +332,7 @@ struct scsi_qla_host {
#define MIN_IOBASE_LEN 0x100
uint16_t req_q_count;
- uint8_t marker_needed;
- uint8_t rsvd1;
+ uint8_t rsvd1[2];
unsigned long host_no;
@@ -441,8 +452,35 @@ struct scsi_qla_host {
/* Saved srb for status continuation entry processing */
struct srb *status_srb;
+
+ /* IPv6 support info from InitFW */
+ uint8_t acb_version;
+ uint8_t ipv4_addr_state;
+ uint16_t ipv4_options;
+
+ uint32_t resvd2;
+ uint32_t ipv6_options;
+ uint32_t ipv6_addl_options;
+ uint8_t ipv6_link_local_state;
+ uint8_t ipv6_addr0_state;
+ uint8_t ipv6_addr1_state;
+ uint8_t ipv6_default_router_state;
+ struct in6_addr ipv6_link_local_addr;
+ struct in6_addr ipv6_addr0;
+ struct in6_addr ipv6_addr1;
+ struct in6_addr ipv6_default_router_addr;
};
+static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
+{
+ return ((ha->ipv4_options & IPOPT_IPv4_PROTOCOL_ENABLE) != 0);
+}
+
+static inline int is_ipv6_enabled(struct scsi_qla_host *ha)
+{
+ return ((ha->ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0);
+}
+
static inline int is_qla4010(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 9cd7a60..855226e 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -215,6 +215,7 @@ union external_hw_config_reg {
/* Mailbox command definitions */
#define MBOX_CMD_ABOUT_FW 0x0009
#define MBOX_CMD_PING 0x000B
+#define MBOX_CMD_ABORT_TASK 0x0015
#define MBOX_CMD_LUN_RESET 0x0016
#define MBOX_CMD_TARGET_WARM_RESET 0x0017
#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
@@ -258,13 +259,15 @@ union external_hw_config_reg {
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
#define FW_STATE_CONFIG_WAIT 0x0001
-#define FW_STATE_WAIT_LOGIN 0x0002
+#define FW_STATE_WAIT_AUTOCONNECT 0x0002
#define FW_STATE_ERROR 0x0004
-#define FW_STATE_DHCP_IN_PROGRESS 0x0008
+#define FW_STATE_CONFIGURING_IP 0x0008
/* Mailbox 3 */
#define FW_ADDSTATE_OPTICAL_MEDIA 0x0001
-#define FW_ADDSTATE_DHCP_ENABLED 0x0002
+#define FW_ADDSTATE_DHCPv4_ENABLED 0x0002
+#define FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED 0x0004
+#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008
#define FW_ADDSTATE_LINK_UP 0x0010
#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
@@ -320,6 +323,8 @@ union external_hw_config_reg {
/* Host Adapter Initialization Control Block (from host) */
struct addr_ctrl_blk {
uint8_t version; /* 00 */
+#define IFCB_VER_MIN 0x01
+#define IFCB_VER_MAX 0x02
uint8_t control; /* 01 */
uint16_t fw_options; /* 02-03 */
@@ -351,11 +356,16 @@ struct addr_ctrl_blk {
uint16_t iscsi_opts; /* 30-31 */
uint16_t ipv4_tcp_opts; /* 32-33 */
uint16_t ipv4_ip_opts; /* 34-35 */
+#define IPOPT_IPv4_PROTOCOL_ENABLE 0x8000
uint16_t iscsi_max_pdu_size; /* 36-37 */
uint8_t ipv4_tos; /* 38 */
uint8_t ipv4_ttl; /* 39 */
uint8_t acb_version; /* 3A */
+#define ACB_NOT_SUPPORTED 0x00
+#define ACB_SUPPORTED 0x02 /* Capable of ACB Version 2
+ Features */
+
uint8_t res2; /* 3B */
uint16_t def_timeout; /* 3C-3D */
uint16_t iscsi_fburst_len; /* 3E-3F */
@@ -397,16 +407,35 @@ struct addr_ctrl_blk {
uint32_t cookie; /* 200-203 */
uint16_t ipv6_port; /* 204-205 */
uint16_t ipv6_opts; /* 206-207 */
+#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000
+
uint16_t ipv6_addtl_opts; /* 208-209 */
+#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB
+ Only */
+#define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR 0x0001
+
uint16_t ipv6_tcp_opts; /* 20A-20B */
uint8_t ipv6_tcp_wsf; /* 20C */
uint16_t ipv6_flow_lbl; /* 20D-20F */
- uint8_t ipv6_gw_addr[16]; /* 210-21F */
+ uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */
uint16_t ipv6_vlan_tag; /* 220-221 */
uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
uint8_t ipv6_addr0_state; /* 223 */
uint8_t ipv6_addr1_state; /* 224 */
- uint8_t ipv6_gw_state; /* 225 */
+#define IP_ADDRSTATE_UNCONFIGURED 0
+#define IP_ADDRSTATE_INVALID 1
+#define IP_ADDRSTATE_ACQUIRING 2
+#define IP_ADDRSTATE_TENTATIVE 3
+#define IP_ADDRSTATE_DEPRICATED 4
+#define IP_ADDRSTATE_PREFERRED 5
+#define IP_ADDRSTATE_DISABLING 6
+
+ uint8_t ipv6_dflt_rtr_state; /* 225 */
+#define IPV6_RTRSTATE_UNKNOWN 0
+#define IPV6_RTRSTATE_MANUAL 1
+#define IPV6_RTRSTATE_ADVERTISED 3
+#define IPV6_RTRSTATE_STALE 4
+
uint8_t ipv6_traffic_class; /* 226 */
uint8_t ipv6_hop_limit; /* 227 */
uint8_t ipv6_if_id[8]; /* 228-22F */
@@ -424,7 +453,7 @@ struct addr_ctrl_blk {
struct init_fw_ctrl_blk {
struct addr_ctrl_blk pri;
- struct addr_ctrl_blk sec;
+/* struct addr_ctrl_blk sec;*/
};
/*************************************************************************/
@@ -433,6 +462,9 @@ struct dev_db_entry {
uint16_t options; /* 00-01 */
#define DDB_OPT_DISC_SESSION 0x10
#define DDB_OPT_TARGET 0x02 /* device is a target */
+#define DDB_OPT_IPV6_DEVICE 0x100
+#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */
+#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */
uint16_t exec_throttle; /* 02-03 */
uint16_t exec_count; /* 04-05 */
@@ -468,7 +500,7 @@ struct dev_db_entry {
* pointer to a string so we
* don't have to reserve soooo
* much RAM */
- uint8_t ipv6_addr[0x10];/* 1A0-1AF */
+ uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
uint8_t res5[0x10]; /* 1B0-1BF */
uint16_t ddb_link; /* 1C0-1C1 */
uint16_t chap_tbl_idx; /* 1C2-1C3 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 96ebfb0..c4636f6 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -25,6 +25,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
int qla4xxx_relogin_device(struct scsi_qla_host * ha,
struct ddb_entry * ddb_entry);
+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
int lun);
int qla4xxx_reset_target(struct scsi_qla_host * ha,
@@ -65,13 +66,14 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
int qla4xxx_init_rings(struct scsi_qla_host * ha);
struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
uint32_t index);
-void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
+void qla4xxx_srb_compl(struct kref *ref);
int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
-int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
- uint32_t fw_ddb_index, uint32_t state);
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_error);
void qla4xxx_dump_buffer(void *b, uint32_t size);
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod);
+int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err);
extern int ql4xextended_error_logging;
extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 92329a4..5510df8 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -189,6 +189,78 @@ static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
return qla4xxx_get_firmware_status(ha);
}
+static uint8_t
+qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
+{
+ uint8_t ipv4_wait = 0;
+ uint8_t ipv6_wait = 0;
+ int8_t ip_address[IPv6_ADDR_LEN] = {0} ;
+
+ /* If both IPv4 & IPv6 are enabled, possibly only one
+ * IP address may be acquired, so check to see if we
+ * need to wait for another */
+ if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) {
+ if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) &&
+ ((ha->addl_fw_state &
+ FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) {
+ ipv4_wait = 1;
+ }
+ if (((ha->ipv6_addl_options &
+ IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) &&
+ ((ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) ||
+ (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) ||
+ (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING))) {
+
+ ipv6_wait = 1;
+
+ if ((ha->ipv6_link_local_state ==
+ IP_ADDRSTATE_PREFERRED) ||
+ (ha->ipv6_addr0_state == IP_ADDRSTATE_PREFERRED) ||
+ (ha->ipv6_addr1_state == IP_ADDRSTATE_PREFERRED)) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "Preferred IP configured."
+ " Don't wait!\n", ha->host_no,
+ __func__));
+ ipv6_wait = 0;
+ }
+ if (memcmp(&ha->ipv6_default_router_addr, ip_address,
+ IPv6_ADDR_LEN) == 0) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "No Router configured. "
+ "Don't wait!\n", ha->host_no,
+ __func__));
+ ipv6_wait = 0;
+ }
+ if ((ha->ipv6_default_router_state ==
+ IPV6_RTRSTATE_MANUAL) &&
+ (ha->ipv6_link_local_state ==
+ IP_ADDRSTATE_TENTATIVE) &&
+ (memcmp(&ha->ipv6_link_local_addr,
+ &ha->ipv6_default_router_addr, 4) == 0)) {
+ DEBUG2(printk("scsi%ld: %s: LinkLocal Router & "
+ "IP configured. Don't wait!\n",
+ ha->host_no, __func__));
+ ipv6_wait = 0;
+ }
+ }
+ if (ipv4_wait || ipv6_wait) {
+ DEBUG2(printk("scsi%ld: %s: Wait for additional "
+ "IP(s) \"", ha->host_no, __func__));
+ if (ipv4_wait)
+ DEBUG2(printk("IPv4 "));
+ if (ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6LinkLocal "));
+ if (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6Addr0 "));
+ if (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6Addr1 "));
+ DEBUG2(printk("\"\n"));
+ }
+ }
+
+ return ipv4_wait|ipv6_wait;
+}
+
static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
{
uint32_t timeout_count;
@@ -226,38 +298,80 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
continue;
}
+ if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
+ "AUTOCONNECT in progress\n",
+ ha->host_no, __func__));
+ }
+
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
+ " CONFIGURING IP\n",
+ ha->host_no, __func__));
+ /*
+ * Check for link state after 15 secs and if link is
+ * still DOWN then, cable is unplugged. Ignore "DHCP
+ * in Progress/CONFIGURING IP" bit to check if firmware
+ * is in ready state or not after 15 secs.
+ * This is applicable for both 2.x & 3.x firmware
+ */
+ if (timeout_count <= (ADAPTER_INIT_TOV - 15)) {
+ if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s:"
+ " LINK UP (Cable plugged)\n",
+ ha->host_no, __func__));
+ } else if (ha->firmware_state &
+ (FW_STATE_CONFIGURING_IP |
+ FW_STATE_READY)) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "LINK DOWN (Cable unplugged)\n",
+ ha->host_no, __func__));
+ ha->firmware_state = FW_STATE_READY;
+ }
+ }
+ }
+
if (ha->firmware_state == FW_STATE_READY) {
- DEBUG2(dev_info(&ha->pdev->dev, "Firmware Ready..\n"));
- /* The firmware is ready to process SCSI commands. */
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: MEDIA TYPE - %s\n",
- ha->host_no,
- __func__, (ha->addl_fw_state &
- FW_ADDSTATE_OPTICAL_MEDIA)
- != 0 ? "OPTICAL" : "COPPER"));
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: DHCP STATE Enabled "
- "%s\n",
- ha->host_no, __func__,
- (ha->addl_fw_state &
- FW_ADDSTATE_DHCP_ENABLED) != 0 ?
- "YES" : "NO"));
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: LINK %s\n",
- ha->host_no, __func__,
- (ha->addl_fw_state &
- FW_ADDSTATE_LINK_UP) != 0 ?
- "UP" : "DOWN"));
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: iSNS Service "
- "Started %s\n",
- ha->host_no, __func__,
- (ha->addl_fw_state &
- FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
- "YES" : "NO"));
-
- ready = 1;
- break;
+ /* If DHCP IP Addr is available, retrieve it now. */
+ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR,
+ &ha->dpc_flags))
+ qla4xxx_get_dhcp_ip_address(ha);
+
+ if (!qla4xxx_wait_for_ip_config(ha) ||
+ timeout_count == 1) {
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "Firmware Ready..\n"));
+ /* The firmware is ready to process SCSI
+ commands. */
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: MEDIA TYPE"
+ " - %s\n", ha->host_no,
+ __func__, (ha->addl_fw_state &
+ FW_ADDSTATE_OPTICAL_MEDIA)
+ != 0 ? "OPTICAL" : "COPPER"));
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: DHCPv4 STATE"
+ " Enabled %s\n", ha->host_no,
+ __func__, (ha->addl_fw_state &
+ FW_ADDSTATE_DHCPv4_ENABLED) != 0 ?
+ "YES" : "NO"));
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: LINK %s\n",
+ ha->host_no, __func__,
+ (ha->addl_fw_state &
+ FW_ADDSTATE_LINK_UP) != 0 ?
+ "UP" : "DOWN"));
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: iSNS Service "
+ "Started %s\n",
+ ha->host_no, __func__,
+ (ha->addl_fw_state &
+ FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
+ "YES" : "NO"));
+
+ ready = 1;
+ break;
+ }
}
DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - "
"seconds expired= %d\n", ha->host_no, __func__,
@@ -272,15 +386,19 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
msleep(1000);
} /* end of for */
- if (timeout_count == 0)
+ if (timeout_count <= 0)
DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
ha->host_no, __func__));
- if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) {
- DEBUG2(printk("scsi%ld: %s: FW is reporting its waiting to"
- " grab an IP address from DHCP server\n",
- ha->host_no, __func__));
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
+ DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting "
+ "it's waiting to configure an IP address\n",
+ ha->host_no, __func__));
ready = 1;
+ } else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
+ DEBUG2(printk("scsi%ld: %s: FW initialized, but "
+ "auto-discovery still in process\n",
+ ha->host_no, __func__));
}
return ready;
@@ -387,6 +505,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
int status = QLA_ERROR;
+ uint32_t conn_err;
if (ddb_entry == NULL) {
DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no,
@@ -407,7 +526,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
fw_ddb_entry_dma, NULL, NULL,
- &ddb_entry->fw_ddb_device_state, NULL,
+ &ddb_entry->fw_ddb_device_state, &conn_err,
&ddb_entry->tcp_source_port_num,
&ddb_entry->connection_id) ==
QLA_ERROR) {
@@ -419,6 +538,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
}
status = QLA_SUCCESS;
+ ddb_entry->options = le16_to_cpu(fw_ddb_entry->options);
ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid);
ddb_entry->task_mgmt_timeout =
le16_to_cpu(fw_ddb_entry->def_timeout);
@@ -442,11 +562,44 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
- DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n",
- ha->host_no, __func__, fw_ddb_index,
- ddb_entry->fw_ddb_device_state, status));
-
- exit_update_ddb:
+ ddb_entry->iscsi_max_burst_len = fw_ddb_entry->iscsi_max_burst_len;
+ ddb_entry->iscsi_max_outsnd_r2t = fw_ddb_entry->iscsi_max_outsnd_r2t;
+ ddb_entry->iscsi_first_burst_len = fw_ddb_entry->iscsi_first_burst_len;
+ ddb_entry->iscsi_max_rcv_data_seg_len =
+ fw_ddb_entry->iscsi_max_rcv_data_seg_len;
+ ddb_entry->iscsi_max_snd_data_seg_len =
+ fw_ddb_entry->iscsi_max_snd_data_seg_len;
+
+ if (ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+ memcpy(&ddb_entry->remote_ipv6_addr,
+ fw_ddb_entry->ip_addr,
+ min(sizeof(ddb_entry->remote_ipv6_addr),
+ sizeof(fw_ddb_entry->ip_addr)));
+ memcpy(&ddb_entry->link_local_ipv6_addr,
+ fw_ddb_entry->link_local_ipv6_addr,
+ min(sizeof(ddb_entry->link_local_ipv6_addr),
+ sizeof(fw_ddb_entry->link_local_ipv6_addr)));
+
+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d "
+ "State %04x ConnErr %08x IP %pI6 "
+ ":%04d \"%s\"\n",
+ __func__, fw_ddb_index,
+ ddb_entry->os_target_id,
+ ddb_entry->fw_ddb_device_state,
+ conn_err, fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name));
+ } else
+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d "
+ "State %04x ConnErr %08x IP %pI4 "
+ ":%04d \"%s\"\n",
+ __func__, fw_ddb_index,
+ ddb_entry->os_target_id,
+ ddb_entry->fw_ddb_device_state,
+ conn_err, fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name));
+exit_update_ddb:
if (fw_ddb_entry)
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
@@ -492,6 +645,40 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
}
/**
+ * qla4_is_relogin_allowed - Are we allowed to login?
+ * @ha: Pointer to host adapter structure.
+ * @conn_err: Last connection error associated with the ddb
+ *
+ * This routine tests the given connection error to determine if
+ * we are allowed to login.
+ **/
+int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err)
+{
+ uint32_t err_code, login_rsp_sts_class;
+ int relogin = 1;
+
+ err_code = ((conn_err & 0x00ff0000) >> 16);
+ login_rsp_sts_class = ((conn_err & 0x0000ff00) >> 8);
+ if (err_code == 0x1c || err_code == 0x06) {
+ DEBUG2(dev_info(&ha->pdev->dev,
+ ": conn_err=0x%08x, send target completed"
+ " or access denied failure\n", conn_err));
+ relogin = 0;
+ }
+ if ((err_code == 0x08) && (login_rsp_sts_class == 0x02)) {
+ /* Login Response PDU returned an error.
+ Login Response Status in Error Code Detail
+ indicates login should not be retried.*/
+ DEBUG2(dev_info(&ha->pdev->dev,
+ ": conn_err=0x%08x, do not retry relogin\n",
+ conn_err));
+ relogin = 0;
+ }
+
+ return relogin;
+}
+
+/**
* qla4xxx_configure_ddbs - builds driver ddb list
* @ha: Pointer to host adapter structure.
*
@@ -505,18 +692,30 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
uint32_t fw_ddb_index = 0;
uint32_t next_fw_ddb_index = 0;
uint32_t ddb_state;
- uint32_t conn_err, err_code;
+ uint32_t conn_err;
struct ddb_entry *ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t ipv6_device;
uint32_t new_tgt;
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DMA alloc failed\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
fw_ddb_index = next_fw_ddb_index) {
/* First, let's see if a device exists here */
- if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, NULL, 0, NULL,
- &next_fw_ddb_index, &ddb_state,
- &conn_err, NULL, NULL) ==
- QLA_ERROR) {
+ if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
+ 0, NULL, &next_fw_ddb_index,
+ &ddb_state, &conn_err,
+ NULL, NULL) ==
+ QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s: get_ddb_entry, "
"fw_ddb_index %d failed", ha->host_no,
__func__, fw_ddb_index));
@@ -533,18 +732,19 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
/* Try and login to device */
DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n",
ha->host_no, __func__, fw_ddb_index));
- err_code = ((conn_err & 0x00ff0000) >> 16);
- if (err_code == 0x1c || err_code == 0x06) {
- DEBUG2(printk("scsi%ld: %s send target "
- "completed "
- "or access denied failure\n",
- ha->host_no, __func__));
- } else {
+ ipv6_device = le16_to_cpu(fw_ddb_entry->options) &
+ DDB_OPT_IPV6_DEVICE;
+ if (qla4_is_relogin_allowed(ha, conn_err) &&
+ ((!ipv6_device &&
+ *((uint32_t *)fw_ddb_entry->ip_addr))
+ || ipv6_device)) {
qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index,
- NULL, 0, NULL, &next_fw_ddb_index,
- &ddb_state, &conn_err, NULL, NULL)
- == QLA_ERROR) {
+ NULL, 0, NULL,
+ &next_fw_ddb_index,
+ &ddb_state, &conn_err,
+ NULL, NULL)
+ == QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s:"
"get_ddb_entry %d failed\n",
ha->host_no,
@@ -599,7 +799,6 @@ next_one:
struct qla4_relog_scan {
int halt_wait;
uint32_t conn_err;
- uint32_t err_code;
uint32_t fw_ddb_index;
uint32_t next_fw_ddb_index;
uint32_t fw_ddb_device_state;
@@ -609,18 +808,7 @@ static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs)
{
struct ddb_entry *ddb_entry;
- /*
- * Don't want to do a relogin if connection
- * error is 0x1c.
- */
- rs->err_code = ((rs->conn_err & 0x00ff0000) >> 16);
- if (rs->err_code == 0x1c || rs->err_code == 0x06) {
- DEBUG2(printk(
- "scsi%ld: %s send target"
- " completed or "
- "access denied failure\n",
- ha->host_no, __func__));
- } else {
+ if (qla4_is_relogin_allowed(ha, rs->conn_err)) {
/* We either have a device that is in
* the process of relogging in or a
* device that is waiting to be
@@ -908,7 +1096,7 @@ static void qla4x00_pci_config(struct scsi_qla_host *ha)
static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
- uint32_t max_wait_time;
+ unsigned long max_wait_time;
unsigned long flags;
uint32_t mbox_status;
@@ -940,7 +1128,10 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for firmware to come UP. */
- max_wait_time = FIRMWARE_UP_TOV * 4;
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Wait up to %d seconds for "
+ "boot firmware to complete...\n",
+ ha->host_no, __func__, FIRMWARE_UP_TOV));
+ max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ);
do {
uint32_t ctrl_status;
@@ -954,16 +1145,15 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
if (mbox_status == MBOX_STS_COMMAND_COMPLETE)
break;
- DEBUG2(printk("scsi%ld: %s: Waiting for boot firmware to "
- "complete... ctrl_sts=0x%x, remaining=%d\n",
- ha->host_no, __func__, ctrl_status,
- max_wait_time));
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
+ "firmware to complete... ctrl_sts=0x%x\n",
+ ha->host_no, __func__, ctrl_status));
- msleep(250);
- } while ((max_wait_time--));
+ msleep_interruptible(250);
+ } while (!time_after_eq(jiffies, max_wait_time));
if (mbox_status == MBOX_STS_COMMAND_COMPLETE) {
- DEBUG(printk("scsi%ld: %s: Firmware has started\n",
+ DEBUG(printk(KERN_INFO "scsi%ld: %s: Firmware has started\n",
ha->host_no, __func__));
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1141,6 +1331,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
int status = QLA_ERROR;
int8_t ip_address[IP_ADDR_LEN] = {0} ;
+ clear_bit(AF_ONLINE, &ha->flags);
ha->eeprom_cmd_data = 0;
qla4x00_pci_config(ha);
@@ -1166,7 +1357,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
* the ddb_list and wait for DHCP lease acquired aen to come in
* followed by 0x8014 aen" to trigger the tgt discovery process.
*/
- if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP)
goto exit_init_online;
/* Skip device discovery if ip and subnet is zero */
@@ -1270,8 +1461,8 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
*
* This routine processes a Decive Database Changed AEN Event.
**/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index, uint32_t state)
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_err)
{
struct ddb_entry * ddb_entry;
uint32_t old_fw_ddb_device_state;
@@ -1318,19 +1509,24 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
* the device came back.
*/
} else {
- /* Device went away, try to relogin. */
- /* Mark device missing */
- if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
+ /* Device went away, mark device missing */
+ if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) {
+ DEBUG2(dev_info(&ha->pdev->dev, "%s mark missing "
+ "ddb_entry 0x%p sess 0x%p conn 0x%p\n",
+ __func__, ddb_entry,
+ ddb_entry->sess, ddb_entry->conn));
qla4xxx_mark_device_missing(ha, ddb_entry);
+ }
+
/*
* Relogin if device state changed to a not active state.
- * However, do not relogin if this aen is a result of an IOCTL
- * logout (DF_NO_RELOGIN) or if this is a discovered device.
+ * However, do not relogin if a RELOGIN is in process, or
+ * we are not allowed to relogin to this DDB.
*/
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED &&
!test_bit(DF_RELOGIN, &ddb_entry->flags) &&
!test_bit(DF_NO_RELOGIN, &ddb_entry->flags) &&
- !test_bit(DF_ISNS_DISCOVERED, &ddb_entry->flags)) {
+ qla4_is_relogin_allowed(ha, conn_err)) {
/*
* This triggers a relogin. After the relogin_timer
* expires, the relogin gets scheduled. We must wait a
@@ -1338,7 +1534,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
* with failed device_state or a logout response before
* we can issue another relogin.
*/
- /* Firmware padds this timeout: (time2wait +1).
+ /* Firmware pads this timeout: (time2wait +1).
* Driver retry to login should be longer than F/W.
* Otherwise F/W will fail
* set_ddb() mbx cmd with 0x4005 since it still
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index e0c3215..e66f3f2 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -299,7 +299,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
wmb();
- srb->cmd->host_scribble = (unsigned char *)srb;
+ srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
/* update counters */
srb->state = SRB_ACTIVE_STATE;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index c196d55..596c303 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -97,7 +97,7 @@ qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
/* Place command on done queue. */
if (srb->req_sense_len == 0) {
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
ha->status_srb = NULL;
}
}
@@ -329,7 +329,7 @@ status_entry_exit:
/* complete the request, if not waiting for status_continuation pkt */
srb->cc_stat = sts_entry->completionStatus;
if (ha->status_srb == NULL)
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
}
/**
@@ -393,7 +393,7 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
/* ETRY normally by sending it back with
* DID_BUS_BUSY */
srb->cmd->result = DID_BUS_BUSY << 16;
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
break;
case ET_CONTINUE:
@@ -498,15 +498,22 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
break;
case MBOX_ASTS_LINK_UP:
- DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
- ha->host_no, mbox_status));
set_bit(AF_LINK_UP, &ha->flags);
+ if (test_bit(AF_INIT_DONE, &ha->flags))
+ set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+
+ DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter"
+ " LINK UP\n", ha->host_no,
+ mbox_status));
break;
case MBOX_ASTS_LINK_DOWN:
- DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
- ha->host_no, mbox_status));
clear_bit(AF_LINK_UP, &ha->flags);
+ set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+
+ DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter"
+ " LINK DOWN\n", ha->host_no,
+ mbox_status));
break;
case MBOX_ASTS_HEARTBEAT:
@@ -831,7 +838,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
qla4xxx_reinitialize_ddb_list(ha);
} else if (mbox_sts[1] == 1) { /* Specific device. */
qla4xxx_process_ddb_changed(ha, mbox_sts[2],
- mbox_sts[3]);
+ mbox_sts[3], mbox_sts[4]);
}
break;
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index caeb7d1..75496fb 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -172,108 +172,207 @@ mbox_exit:
return status;
}
+uint8_t
+qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+{
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
+ mbox_cmd[1] = 0;
+ mbox_cmd[2] = LSDW(init_fw_cb_dma);
+ mbox_cmd[3] = MSDW(init_fw_cb_dma);
+ mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+ mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;
+
+ if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+ "MBOX_CMD_INITIALIZE_FIRMWARE"
+ " failed w/ status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+uint8_t
+qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+{
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
+ mbox_cmd[2] = LSDW(init_fw_cb_dma);
+ mbox_cmd[3] = MSDW(init_fw_cb_dma);
+ mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+
+ if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+ "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
+ " failed w/ status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+void
+qla4xxx_update_local_ip(struct scsi_qla_host *ha,
+ struct addr_ctrl_blk *init_fw_cb)
+{
+ /* Save IPv4 Address Info */
+ memcpy(ha->ip_address, init_fw_cb->ipv4_addr,
+ min(sizeof(ha->ip_address), sizeof(init_fw_cb->ipv4_addr)));
+ memcpy(ha->subnet_mask, init_fw_cb->ipv4_subnet,
+ min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->ipv4_subnet)));
+ memcpy(ha->gateway, init_fw_cb->ipv4_gw_addr,
+ min(sizeof(ha->gateway), sizeof(init_fw_cb->ipv4_gw_addr)));
+
+ if (is_ipv6_enabled(ha)) {
+ /* Save IPv6 Address */
+ ha->ipv6_link_local_state = init_fw_cb->ipv6_lnk_lcl_addr_state;
+ ha->ipv6_addr0_state = init_fw_cb->ipv6_addr0_state;
+ ha->ipv6_addr1_state = init_fw_cb->ipv6_addr1_state;
+ ha->ipv6_default_router_state = init_fw_cb->ipv6_dflt_rtr_state;
+ ha->ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
+ ha->ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
+
+ memcpy(&ha->ipv6_link_local_addr.in6_u.u6_addr8[8],
+ init_fw_cb->ipv6_if_id,
+ min(sizeof(ha->ipv6_link_local_addr)/2,
+ sizeof(init_fw_cb->ipv6_if_id)));
+ memcpy(&ha->ipv6_addr0, init_fw_cb->ipv6_addr0,
+ min(sizeof(ha->ipv6_addr0),
+ sizeof(init_fw_cb->ipv6_addr0)));
+ memcpy(&ha->ipv6_addr1, init_fw_cb->ipv6_addr1,
+ min(sizeof(ha->ipv6_addr1),
+ sizeof(init_fw_cb->ipv6_addr1)));
+ memcpy(&ha->ipv6_default_router_addr,
+ init_fw_cb->ipv6_dflt_rtr_addr,
+ min(sizeof(ha->ipv6_default_router_addr),
+ sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
+ }
+}
+
+uint8_t
+qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd,
+ uint32_t *mbox_sts,
+ struct addr_ctrl_blk *init_fw_cb,
+ dma_addr_t init_fw_cb_dma)
+{
+ if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
+ != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
+ ha->host_no, __func__));
+ return QLA_ERROR;
+ }
+
+ DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
+
+ /* Save some info in adapter structure. */
+ ha->acb_version = init_fw_cb->acb_version;
+ ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
+ ha->tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
+ ha->ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
+ ha->ipv4_addr_state = le16_to_cpu(init_fw_cb->ipv4_addr_state);
+ ha->heartbeat_interval = init_fw_cb->hb_interval;
+ memcpy(ha->name_string, init_fw_cb->iscsi_name,
+ min(sizeof(ha->name_string),
+ sizeof(init_fw_cb->iscsi_name)));
+ /*memcpy(ha->alias, init_fw_cb->Alias,
+ min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
+
+ /* Save Command Line Paramater info */
+ ha->port_down_retry_count = le16_to_cpu(init_fw_cb->conn_ka_timeout);
+ ha->discovery_wait = ql4xdiscoverywait;
+
+ if (ha->acb_version == ACB_SUPPORTED) {
+ ha->ipv6_options = init_fw_cb->ipv6_opts;
+ ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts;
+ }
+ qla4xxx_update_local_ip(ha, init_fw_cb);
+
+ return QLA_SUCCESS;
+}
+
/**
* qla4xxx_initialize_fw_cb - initializes firmware control block.
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
{
- struct init_fw_ctrl_blk *init_fw_cb;
+ struct addr_ctrl_blk *init_fw_cb;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
return 10;
}
- memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
- mbox_cmd[2] = LSDW(init_fw_cb_dma);
- mbox_cmd[3] = MSDW(init_fw_cb_dma);
- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
-
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
dma_free_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
- return status;
+ goto exit_init_fw_cb;
}
/* Initialize request and response queues. */
qla4xxx_init_rings(ha);
/* Fill in the request and response queue information. */
- init_fw_cb->pri.rqq_consumer_idx = cpu_to_le16(ha->request_out);
- init_fw_cb->pri.compq_producer_idx = cpu_to_le16(ha->response_in);
- init_fw_cb->pri.rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
- init_fw_cb->pri.compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
- init_fw_cb->pri.rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
- init_fw_cb->pri.rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
- init_fw_cb->pri.compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
- init_fw_cb->pri.compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
- init_fw_cb->pri.shdwreg_addr_lo =
- cpu_to_le32(LSDW(ha->shadow_regs_dma));
- init_fw_cb->pri.shdwreg_addr_hi =
- cpu_to_le32(MSDW(ha->shadow_regs_dma));
+ init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
+ init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
+ init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
+ init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+ init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
+ init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
+ init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
+ init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
+ init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
+ init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
/* Set up required options. */
- init_fw_cb->pri.fw_options |=
+ init_fw_cb->fw_options |=
__constant_cpu_to_le16(FWOPT_SESSION_MODE |
FWOPT_INITIATOR_MODE);
- init_fw_cb->pri.fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
-
- /* Save some info in adapter structure. */
- ha->firmware_options = le16_to_cpu(init_fw_cb->pri.fw_options);
- ha->tcp_options = le16_to_cpu(init_fw_cb->pri.ipv4_tcp_opts);
- ha->heartbeat_interval = init_fw_cb->pri.hb_interval;
- memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
- memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
- memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
- memcpy(ha->name_string, init_fw_cb->pri.iscsi_name,
- min(sizeof(ha->name_string),
- sizeof(init_fw_cb->pri.iscsi_name)));
- /*memcpy(ha->alias, init_fw_cb->Alias,
- min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
-
- /* Save Command Line Paramater info */
- ha->port_down_retry_count = le16_to_cpu(init_fw_cb->pri.conn_ka_timeout);
- ha->discovery_wait = ql4xdiscoverywait;
+ init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
- /* Send Initialize Firmware Control Block. */
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
-
- mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
- mbox_cmd[1] = 0;
- mbox_cmd[2] = LSDW(init_fw_cb_dma);
- mbox_cmd[3] = MSDW(init_fw_cb_dma);
- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
+ if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
+ != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
+ ha->host_no, __func__));
+ goto exit_init_fw_cb;
+ }
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) ==
- QLA_SUCCESS)
- status = QLA_SUCCESS;
- else {
- DEBUG2(printk("scsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE "
- "failed w/ status %04X\n", ha->host_no, __func__,
- mbox_sts[0]));
+ if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
+ init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
+ ha->host_no, __func__));
+ goto exit_init_fw_cb;
}
- dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
+ status = QLA_SUCCESS;
+
+exit_init_fw_cb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
return status;
}
@@ -284,13 +383,13 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
**/
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
{
- struct init_fw_ctrl_blk *init_fw_cb;
+ struct addr_ctrl_blk *init_fw_cb;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
@@ -299,35 +398,21 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
}
/* Get Initialize Firmware Control Block. */
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
-
- memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
- mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
- mbox_cmd[2] = LSDW(init_fw_cb_dma);
- mbox_cmd[3] = MSDW(init_fw_cb_dma);
- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
-
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
ha->host_no, __func__));
dma_free_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
return QLA_ERROR;
}
/* Save IP Address. */
- memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
- memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
- memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
-
- dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
+ qla4xxx_update_local_ip(ha, init_fw_cb);
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
return QLA_SUCCESS;
}
@@ -409,6 +494,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
uint16_t *connection_id)
{
int status = QLA_ERROR;
+ uint16_t options;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -441,14 +527,26 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
goto exit_get_fwddb;
}
if (fw_ddb_entry) {
- dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
- "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
- fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
- mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr[0],
- fw_ddb_entry->ip_addr[1], fw_ddb_entry->ip_addr[2],
- fw_ddb_entry->ip_addr[3],
- le16_to_cpu(fw_ddb_entry->port),
- fw_ddb_entry->iscsi_name);
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d "
+ "Next %d State %04x ConnErr %08x %pI6 "
+ ":%04d \"%s\"\n", __func__, fw_ddb_index,
+ mbox_sts[0], mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5],
+ fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name);
+ } else {
+ dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d "
+ "Next %d State %04x ConnErr %08x %pI4 "
+ ":%04d \"%s\"\n", __func__, fw_ddb_index,
+ mbox_sts[0], mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5],
+ fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name);
+ }
}
if (num_valid_ddb_entries)
*num_valid_ddb_entries = mbox_sts[2];
@@ -664,6 +762,59 @@ exit_get_event_log:
}
/**
+ * qla4xxx_abort_task - issues Abort Task
+ * @ha: Pointer to host adapter structure.
+ * @srb: Pointer to srb entry
+ *
+ * This routine performs a LUN RESET on the specified target/lun.
+ * The caller must ensure that the ddb_entry and lun_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct scsi_cmnd *cmd = srb->cmd;
+ int status = QLA_SUCCESS;
+ unsigned long flags = 0;
+ uint32_t index;
+
+ /*
+ * Send abort task command to ISP, so that the ISP will return
+ * request with ABORT status
+ */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ index = (unsigned long)(unsigned char *)cmd->host_scribble;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Firmware already posted completion on response queue */
+ if (index == MAX_SRBS)
+ return status;
+
+ mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
+ mbox_cmd[1] = srb->fw_ddb_index;
+ mbox_cmd[2] = index;
+ /* Immediate Command Enable */
+ mbox_cmd[5] = 0x01;
+
+ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
+ status = QLA_ERROR;
+
+ DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%d: abort task FAILED: "
+ "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
+ ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
+ mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
+ }
+
+ return status;
+}
+
+/**
* qla4xxx_reset_lun - issues LUN Reset
* @ha: Pointer to host adapter structure.
* @db_entry: Pointer to device database entry
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2ccad36..38b1d38 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -74,6 +74,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
*/
static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
void (*done) (struct scsi_cmnd *));
+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
@@ -88,6 +89,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.proc_name = DRIVER_NAME,
.queuecommand = qla4xxx_queuecommand,
+ .eh_abort_handler = qla4xxx_eh_abort,
.eh_device_reset_handler = qla4xxx_eh_device_reset,
.eh_target_reset_handler = qla4xxx_eh_target_reset,
.eh_host_reset_handler = qla4xxx_eh_host_reset,
@@ -384,12 +386,12 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
if (!srb)
return srb;
- atomic_set(&srb->ref_count, 1);
+ kref_init(&srb->srb_ref);
srb->ha = ha;
srb->ddb = ddb_entry;
srb->cmd = cmd;
srb->flags = 0;
- cmd->SCp.ptr = (void *)srb;
+ CMD_SP(cmd) = (void *)srb;
cmd->scsi_done = done;
return srb;
@@ -403,12 +405,14 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
scsi_dma_unmap(cmd);
srb->flags &= ~SRB_DMA_VALID;
}
- cmd->SCp.ptr = NULL;
+ CMD_SP(cmd) = NULL;
}
-void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb)
+void qla4xxx_srb_compl(struct kref *ref)
{
+ struct srb *srb = container_of(ref, struct srb, srb_ref);
struct scsi_cmnd *cmd = srb->cmd;
+ struct scsi_qla_host *ha = srb->ha;
qla4xxx_srb_free_dma(ha, srb);
@@ -685,6 +689,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
+ test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
test_bit(DPC_AEN, &ha->dpc_flags)) &&
ha->dpc_thread) {
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
@@ -886,11 +891,10 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
srb = qla4xxx_del_from_active_array(ha, i);
if (srb != NULL) {
srb->cmd->result = DID_RESET << 16;
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
}
/**
@@ -1069,6 +1073,54 @@ static void qla4xxx_do_dpc(struct work_struct *work)
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
+ /* ---- link change? --- */
+ if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ /* ---- link down? --- */
+ list_for_each_entry_safe(ddb_entry, dtemp,
+ &ha->ddb_list, list) {
+ if (atomic_read(&ddb_entry->state) ==
+ DDB_STATE_ONLINE)
+ qla4xxx_mark_device_missing(ha,
+ ddb_entry);
+ }
+ } else {
+ /* ---- link up? --- *
+ * F/W will auto login to all devices ONLY ONCE after
+ * link up during driver initialization and runtime
+ * fatal error recovery. Therefore, the driver must
+ * manually relogin to devices when recovering from
+ * connection failures, logouts, expired KATO, etc. */
+
+ list_for_each_entry_safe(ddb_entry, dtemp,
+ &ha->ddb_list, list) {
+ if ((atomic_read(&ddb_entry->state) ==
+ DDB_STATE_MISSING) ||
+ (atomic_read(&ddb_entry->state) ==
+ DDB_STATE_DEAD)) {
+ if (ddb_entry->fw_ddb_device_state ==
+ DDB_DS_SESSION_ACTIVE) {
+ atomic_set(&ddb_entry->state,
+ DDB_STATE_ONLINE);
+ dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: ddb[%d]"
+ " os[%d] marked"
+ " ONLINE\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index,
+ ddb_entry->os_target_id);
+
+ iscsi_unblock_session(
+ ddb_entry->sess);
+ } else
+ qla4xxx_relogin_device(
+ ha, ddb_entry);
+ }
+
+ }
+ }
+ }
+
/* ---- relogin device? --- */
if (adapter_up(ha) &&
test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
@@ -1430,12 +1482,14 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index)
{
struct srb *srb = NULL;
- struct scsi_cmnd *cmd;
+ struct scsi_cmnd *cmd = NULL;
- if (!(cmd = scsi_host_find_tag(ha->host, index)))
+ cmd = scsi_host_find_tag(ha->host, index);
+ if (!cmd)
return srb;
- if (!(srb = (struct srb *)cmd->host_scribble))
+ srb = (struct srb *)CMD_SP(cmd);
+ if (!srb)
return srb;
/* update counters */
@@ -1443,14 +1497,15 @@ struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t in
ha->req_q_count += srb->iocb_cnt;
ha->iocb_cnt -= srb->iocb_cnt;
if (srb->cmd)
- srb->cmd->host_scribble = NULL;
+ srb->cmd->host_scribble =
+ (unsigned char *)(unsigned long) MAX_SRBS;
}
return srb;
}
/**
* qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
- * @ha: actual ha whose done queue will contain the comd returned by firmware.
+ * @ha: Pointer to host adapter structure.
* @cmd: Scsi Command to wait on.
*
* This routine waits for the command to be returned by the Firmware
@@ -1465,7 +1520,7 @@ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
do {
/* Checking to see if its returned to OS */
- rp = (struct srb *) cmd->SCp.ptr;
+ rp = (struct srb *) CMD_SP(cmd);
if (rp == NULL) {
done++;
break;
@@ -1534,6 +1589,62 @@ static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
}
/**
+ * qla4xxx_eh_abort - callback for abort task.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to abort the specified
+ * command.
+ **/
+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
+{
+ struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+ unsigned int id = cmd->device->id;
+ unsigned int lun = cmd->device->lun;
+ unsigned long serial = cmd->serial_number;
+ struct srb *srb = NULL;
+ int ret = SUCCESS;
+ int wait = 0;
+
+ dev_info(&ha->pdev->dev,
+ "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n",
+ ha->host_no, id, lun, cmd, serial);
+
+ srb = (struct srb *) CMD_SP(cmd);
+
+ if (!srb)
+ return SUCCESS;
+
+ kref_get(&srb->srb_ref);
+
+ if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
+ DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
+ ha->host_no, id, lun));
+ ret = FAILED;
+ } else {
+ DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
+ ha->host_no, id, lun));
+ wait = 1;
+ }
+
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+
+ /* Wait for command to complete */
+ if (wait) {
+ if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
+ DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
+ ha->host_no, id, lun));
+ ret = FAILED;
+ }
+ }
+
+ dev_info(&ha->pdev->dev,
+ "scsi%ld:%d:%d: Abort command - %s\n",
+ ha->host_no, id, lun, (ret == SUCCESS) ? "succeded" : "failed");
+
+ return ret;
+}
+
+/**
* qla4xxx_eh_device_reset - callback for target reset.
* @cmd: Pointer to Linux's SCSI command structure
*
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 6980cb2..28a6c49 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,5 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.01.00-k9"
-
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k1"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1c08f61..ad0ed21 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -67,6 +67,9 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/scsi.h>
+
static void scsi_done(struct scsi_cmnd *cmd);
/*
@@ -747,10 +750,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
cmd->result = (DID_NO_CONNECT << 16);
scsi_done(cmd);
} else {
+ trace_scsi_dispatch_cmd_start(cmd);
rtn = host->hostt->queuecommand(cmd, scsi_done);
}
spin_unlock_irqrestore(host->host_lock, flags);
if (rtn) {
+ trace_scsi_dispatch_cmd_error(cmd, rtn);
if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
rtn != SCSI_MLQUEUE_TARGET_BUSY)
rtn = SCSI_MLQUEUE_HOST_BUSY;
@@ -781,6 +786,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
*/
static void scsi_done(struct scsi_cmnd *cmd)
{
+ trace_scsi_dispatch_cmd_done(cmd);
blk_complete_request(cmd->request);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 3a5bfd1..136329b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -12,7 +12,7 @@
* SAS disks.
*
*
- * For documentation see http://www.torque.net/sg/sdebug26.html
+ * For documentation see http://sg.danny.cz/sg/sdebug26.html
*
* D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
* dpg: work for devfs large number of disks [20010809]
@@ -58,8 +58,8 @@
#include "sd.h"
#include "scsi_logging.h"
-#define SCSI_DEBUG_VERSION "1.81"
-static const char * scsi_debug_version_date = "20070104";
+#define SCSI_DEBUG_VERSION "1.82"
+static const char * scsi_debug_version_date = "20100324";
/* Additional Sense Code (ASC) */
#define NO_ADDITIONAL_SENSE 0x0
@@ -108,6 +108,7 @@ static const char * scsi_debug_version_date = "20070104";
#define DEF_ATO 1
#define DEF_PHYSBLK_EXP 0
#define DEF_LOWEST_ALIGNED 0
+#define DEF_OPT_BLKS 64
#define DEF_UNMAP_MAX_BLOCKS 0
#define DEF_UNMAP_MAX_DESC 0
#define DEF_UNMAP_GRANULARITY 0
@@ -147,12 +148,18 @@ static const char * scsi_debug_version_date = "20070104";
#define SAM2_LUN_ADDRESS_METHOD 0
#define SAM2_WLUN_REPORT_LUNS 0xc101
+/* Can queue up to this number of commands. Typically commands that
+ * that have a non-zero delay are queued. */
+#define SCSI_DEBUG_CANQUEUE 255
+
static int scsi_debug_add_host = DEF_NUM_HOST;
static int scsi_debug_delay = DEF_DELAY;
static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
static int scsi_debug_every_nth = DEF_EVERY_NTH;
static int scsi_debug_max_luns = DEF_MAX_LUNS;
+static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
static int scsi_debug_num_parts = DEF_NUM_PARTS;
+static int scsi_debug_no_uld = 0;
static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
static int scsi_debug_opts = DEF_OPTS;
static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
@@ -169,6 +176,7 @@ static int scsi_debug_guard = DEF_GUARD;
static int scsi_debug_ato = DEF_ATO;
static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
+static int scsi_debug_opt_blks = DEF_OPT_BLKS;
static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
@@ -192,7 +200,6 @@ static int sdebug_sectors_per; /* sectors per cylinder */
#define SDEBUG_SENSE_LEN 32
-#define SCSI_DEBUG_CANQUEUE 255
#define SCSI_DEBUG_MAX_CMD_LEN 32
struct sdebug_dev_info {
@@ -699,9 +706,13 @@ static int inquiry_evpd_b0(unsigned char * arr)
unsigned int gran;
memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
+
+ /* Optimal transfer length granularity */
gran = 1 << scsi_debug_physblk_exp;
arr[2] = (gran >> 8) & 0xff;
arr[3] = gran & 0xff;
+
+ /* Maximum Transfer Length */
if (sdebug_store_sectors > 0x400) {
arr[4] = (sdebug_store_sectors >> 24) & 0xff;
arr[5] = (sdebug_store_sectors >> 16) & 0xff;
@@ -709,6 +720,9 @@ static int inquiry_evpd_b0(unsigned char * arr)
arr[7] = sdebug_store_sectors & 0xff;
}
+ /* Optimal Transfer Length */
+ put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
+
if (scsi_debug_unmap_max_desc) {
unsigned int blocks;
@@ -717,15 +731,20 @@ static int inquiry_evpd_b0(unsigned char * arr)
else
blocks = 0xffffffff;
+ /* Maximum Unmap LBA Count */
put_unaligned_be32(blocks, &arr[16]);
+
+ /* Maximum Unmap Block Descriptor Count */
put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
}
+ /* Unmap Granularity Alignment */
if (scsi_debug_unmap_alignment) {
put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
arr[28] |= 0x80; /* UGAVALID */
}
+ /* Optimal Unmap Granularity */
if (scsi_debug_unmap_granularity) {
put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
return 0x3c; /* Mandatory page length for thin provisioning */
@@ -2266,7 +2285,7 @@ static void timer_intr_handler(unsigned long indx)
struct sdebug_queued_cmd * sqcp;
unsigned long iflags;
- if (indx >= SCSI_DEBUG_CANQUEUE) {
+ if (indx >= scsi_debug_max_queue) {
printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
"large\n");
return;
@@ -2380,6 +2399,8 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
sdp->host->cmd_per_lun);
blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
+ if (scsi_debug_no_uld)
+ sdp->no_uld_attach = 1;
return 0;
}
@@ -2406,7 +2427,7 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
struct sdebug_queued_cmd *sqcp;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
del_timer_sync(&sqcp->cmnd_timer);
@@ -2416,7 +2437,7 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
}
}
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- return (k < SCSI_DEBUG_CANQUEUE) ? 1 : 0;
+ return (k < scsi_debug_max_queue) ? 1 : 0;
}
/* Deletes (stops) timers of all queued commands */
@@ -2427,7 +2448,7 @@ static void stop_all_queued(void)
struct sdebug_queued_cmd *sqcp;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
if (sqcp->in_use && sqcp->a_cmnd) {
del_timer_sync(&sqcp->cmnd_timer);
@@ -2533,7 +2554,7 @@ static void __init init_all_queued(void)
struct sdebug_queued_cmd * sqcp;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
init_timer(&sqcp->cmnd_timer);
sqcp->in_use = 0;
@@ -2625,12 +2646,12 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
struct sdebug_queued_cmd * sqcp = NULL;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
if (! sqcp->in_use)
break;
}
- if (k >= SCSI_DEBUG_CANQUEUE) {
+ if (k >= scsi_debug_max_queue) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
return 1; /* report busy to mid level */
@@ -2662,7 +2683,9 @@ module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
+module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
+module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
@@ -2677,6 +2700,7 @@ module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
+module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
@@ -2695,7 +2719,9 @@ MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
+MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
+MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
@@ -2705,6 +2731,7 @@ MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)")
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
@@ -2970,6 +2997,31 @@ static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
sdebug_max_luns_store);
+static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
+}
+static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
+ const char * buf, size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
+ (n <= SCSI_DEBUG_CANQUEUE)) {
+ scsi_debug_max_queue = n;
+ return count;
+ }
+ return -EINVAL;
+}
+DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
+ sdebug_max_queue_store);
+
+static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
+}
+DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
+
static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
@@ -3107,7 +3159,9 @@ static int do_create_driverfs_files(void)
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
@@ -3139,7 +3193,9 @@ static void do_remove_driverfs_files(void)
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
+ driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
+ driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
@@ -3830,12 +3886,13 @@ static int sdebug_driver_probe(struct device * dev)
sdbg_host = to_sdebug_host(dev);
- hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
- if (NULL == hpnt) {
- printk(KERN_ERR "%s: scsi_register failed\n", __func__);
- error = -ENODEV;
+ sdebug_driver_template.can_queue = scsi_debug_max_queue;
+ hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
+ if (NULL == hpnt) {
+ printk(KERN_ERR "%s: scsi_register failed\n", __func__);
+ error = -ENODEV;
return error;
- }
+ }
sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 7ad53fa..a5d630f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -39,6 +39,8 @@
#include "scsi_logging.h"
#include "scsi_transport_api.h"
+#include <trace/events/scsi.h>
+
#define SENSE_TIMEOUT (10*HZ)
/*
@@ -52,6 +54,7 @@
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
if (shost->host_busy == shost->host_failed) {
+ trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5,
printk("Waking error handler thread\n"));
@@ -127,6 +130,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
struct scsi_cmnd *scmd = req->special;
enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
+ trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
if (scmd->device->host->transportt->eh_timed_out)
@@ -970,9 +974,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
"0x%p\n", current->comm,
scmd));
rtn = scsi_try_to_abort_cmd(scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(scmd)) {
scsi_eh_finish_cmd(scmd, done_q);
}
@@ -1099,8 +1104,9 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
" 0x%p\n", current->comm,
sdev));
rtn = scsi_try_bus_device_reset(bdr_scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
if (!scsi_device_online(sdev) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(bdr_scmd)) {
list_for_each_entry_safe(scmd, next,
work_q, eh_entry) {
@@ -1163,10 +1169,11 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
"to target %d\n",
current->comm, id));
rtn = scsi_try_target_reset(tgtr_scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (id == scmd_id(scmd))
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(tgtr_scmd))
scsi_eh_finish_cmd(scmd,
done_q);
@@ -1222,10 +1229,11 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
" %d\n", current->comm,
channel));
rtn = scsi_try_bus_reset(chan_scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (channel == scmd_channel(scmd))
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(scmd))
scsi_eh_finish_cmd(scmd,
done_q);
@@ -1259,9 +1267,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
, current->comm));
rtn = scsi_try_host_reset(scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
(!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
!scsi_eh_tur(scmd))
scsi_eh_finish_cmd(scmd, done_q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 38518b0..c992ecf 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -459,8 +459,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
found_target->reap_ref++;
spin_unlock_irqrestore(shost->host_lock, flags);
if (found_target->state != STARGET_DEL) {
- put_device(parent);
- kfree(starget);
+ put_device(dev);
return found_target;
}
/* Unfortunately, we found a dying target; need to
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 429c9b7..c23ab97 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -474,7 +474,7 @@ static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
/*
- * sdev_rd_attr: create a function and attribute variable for a
+ * sdev_rw_attr: create a function and attribute variable for a
* read/write field.
*/
#define sdev_rw_attr(field, format_string) \
@@ -486,7 +486,7 @@ sdev_store_##field (struct device *dev, struct device_attribute *attr, \
{ \
struct scsi_device *sdev; \
sdev = to_scsi_device(dev); \
- snscanf (buf, 20, format_string, &sdev->field); \
+ sscanf (buf, format_string, &sdev->field); \
return count; \
} \
static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
@@ -853,9 +853,6 @@ static int scsi_target_add(struct scsi_target *starget)
error = device_add(&starget->dev);
if (error) {
dev_err(&starget->dev, "target device_add failed, error %d\n", error);
- get_device(&starget->dev);
- scsi_target_reap(starget);
- put_device(&starget->dev);
return error;
}
transport_add_device(&starget->dev);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
new file mode 100644
index 0000000..b587289
--- /dev/null
+++ b/drivers/scsi/scsi_trace.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2010 FUJITSU LIMITED
+ * Copyright (C) 2010 Tomohiro Kusumi <kusumi.tomohiro@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <linux/trace_seq.h>
+#include <trace/events/scsi.h>
+
+#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
+#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+
+static const char *
+scsi_trace_misc(struct trace_seq *, unsigned char *, int);
+
+static const char *
+scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= ((cdb[1] & 0x1F) << 16);
+ lba |= (cdb[2] << 8);
+ lba |= cdb[3];
+ txlen = cdb[4];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu",
+ (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= (cdb[2] << 24);
+ lba |= (cdb[3] << 16);
+ lba |= (cdb[4] << 8);
+ lba |= cdb[5];
+ txlen |= (cdb[7] << 8);
+ txlen |= cdb[8];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= (cdb[2] << 24);
+ lba |= (cdb[3] << 16);
+ lba |= (cdb[4] << 8);
+ lba |= cdb[5];
+ txlen |= (cdb[6] << 24);
+ txlen |= (cdb[7] << 16);
+ txlen |= (cdb[8] << 8);
+ txlen |= cdb[9];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= ((u64)cdb[2] << 56);
+ lba |= ((u64)cdb[3] << 48);
+ lba |= ((u64)cdb[4] << 40);
+ lba |= ((u64)cdb[5] << 32);
+ lba |= (cdb[6] << 24);
+ lba |= (cdb[7] << 16);
+ lba |= (cdb[8] << 8);
+ lba |= cdb[9];
+ txlen |= (cdb[10] << 24);
+ txlen |= (cdb[11] << 16);
+ txlen |= (cdb[12] << 8);
+ txlen |= cdb[13];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME_16)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len, *cmd;
+ sector_t lba = 0, txlen = 0;
+ u32 ei_lbrt = 0;
+
+ switch (SERVICE_ACTION32(cdb)) {
+ case READ_32:
+ cmd = "READ";
+ break;
+ case VERIFY_32:
+ cmd = "VERIFY";
+ break;
+ case WRITE_32:
+ cmd = "WRITE";
+ break;
+ case WRITE_SAME_32:
+ cmd = "WRITE_SAME";
+ break;
+ default:
+ trace_seq_printf(p, "UNKNOWN");
+ goto out;
+ }
+
+ lba |= ((u64)cdb[12] << 56);
+ lba |= ((u64)cdb[13] << 48);
+ lba |= ((u64)cdb[14] << 40);
+ lba |= ((u64)cdb[15] << 32);
+ lba |= (cdb[16] << 24);
+ lba |= (cdb[17] << 16);
+ lba |= (cdb[18] << 8);
+ lba |= cdb[19];
+ ei_lbrt |= (cdb[20] << 24);
+ ei_lbrt |= (cdb[21] << 16);
+ ei_lbrt |= (cdb[22] << 8);
+ ei_lbrt |= cdb[23];
+ txlen |= (cdb[28] << 24);
+ txlen |= (cdb[29] << 16);
+ txlen |= (cdb[30] << 8);
+ txlen |= cdb[31];
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
+ cmd, (unsigned long long)lba,
+ (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+
+ if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
+ trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ unsigned int regions = cdb[7] << 8 | cdb[8];
+
+ trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len, *cmd;
+ sector_t lba = 0;
+ u32 alloc_len = 0;
+
+ switch (SERVICE_ACTION16(cdb)) {
+ case SAI_READ_CAPACITY_16:
+ cmd = "READ_CAPACITY_16";
+ break;
+ case SAI_GET_LBA_STATUS:
+ cmd = "GET_LBA_STATUS";
+ break;
+ default:
+ trace_seq_printf(p, "UNKNOWN");
+ goto out;
+ }
+
+ lba |= ((u64)cdb[2] << 56);
+ lba |= ((u64)cdb[3] << 48);
+ lba |= ((u64)cdb[4] << 40);
+ lba |= ((u64)cdb[5] << 32);
+ lba |= (cdb[6] << 24);
+ lba |= (cdb[7] << 16);
+ lba |= (cdb[8] << 8);
+ lba |= cdb[9];
+ alloc_len |= (cdb[10] << 24);
+ alloc_len |= (cdb[11] << 16);
+ alloc_len |= (cdb[12] << 8);
+ alloc_len |= cdb[13];
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
+ (unsigned long long)lba, alloc_len);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ switch (SERVICE_ACTION32(cdb)) {
+ case READ_32:
+ case VERIFY_32:
+ case WRITE_32:
+ case WRITE_SAME_32:
+ return scsi_trace_rw32(p, cdb, len);
+ default:
+ return scsi_trace_misc(p, cdb, len);
+ }
+}
+
+static const char *
+scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+
+ trace_seq_printf(p, "-");
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *
+scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ switch (cdb[0]) {
+ case READ_6:
+ case WRITE_6:
+ return scsi_trace_rw6(p, cdb, len);
+ case READ_10:
+ case VERIFY:
+ case WRITE_10:
+ case WRITE_SAME:
+ return scsi_trace_rw10(p, cdb, len);
+ case READ_12:
+ case VERIFY_12:
+ case WRITE_12:
+ return scsi_trace_rw12(p, cdb, len);
+ case READ_16:
+ case VERIFY_16:
+ case WRITE_16:
+ case WRITE_SAME_16:
+ return scsi_trace_rw16(p, cdb, len);
+ case UNMAP:
+ return scsi_trace_unmap(p, cdb, len);
+ case SERVICE_ACTION_IN:
+ return scsi_trace_service_action_in(p, cdb, len);
+ case VARIABLE_LENGTH_CMD:
+ return scsi_trace_varlen(p, cdb, len);
+ default:
+ return scsi_trace_misc(p, cdb, len);
+ }
+}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6cfffc8..0681378 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -834,7 +834,7 @@ static ssize_t
store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int val;
+ unsigned long val;
struct fc_rport *rport = transport_class_to_rport(dev);
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -848,6 +848,12 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
return -EINVAL;
/*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (val > UINT_MAX)
+ return -EINVAL;
+
+ /*
* If fast_io_fail is off we have to cap
* dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
*/
@@ -2865,7 +2871,7 @@ void
fc_remote_port_delete(struct fc_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
- int timeout = rport->dev_loss_tmo;
+ unsigned long timeout = rport->dev_loss_tmo;
unsigned long flags;
/*
@@ -3191,23 +3197,33 @@ fc_scsi_scan_rport(struct work_struct *work)
*
* This routine can be called from a FC LLD scsi_eh callback. It
* blocks the scsi_eh thread until the fc_rport leaves the
- * FC_PORTSTATE_BLOCKED. This is necessary to avoid the scsi_eh
- * failing recovery actions for blocked rports which would lead to
- * offlined SCSI devices.
+ * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
+ * necessary to avoid the scsi_eh failing recovery actions for blocked
+ * rports which would lead to offlined SCSI devices.
+ *
+ * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
+ * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
+ * passed back to scsi_eh.
*/
-void fc_block_scsi_eh(struct scsi_cmnd *cmnd)
+int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
- while (rport->port_state == FC_PORTSTATE_BLOCKED) {
+ while (rport->port_state == FC_PORTSTATE_BLOCKED &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
spin_unlock_irqrestore(shost->host_lock, flags);
msleep(1000);
spin_lock_irqsave(shost->host_lock, flags);
}
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
+ return FAST_IO_FAIL;
+
+ return 0;
}
EXPORT_SYMBOL(fc_block_scsi_eh);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index de6c603..829cc37 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1434,6 +1434,8 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#error RC16_LEN must not be more than SD_BUF_SIZE
#endif
+#define READ_CAPACITY_RETRIES_ON_RESET 10
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -1441,7 +1443,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int the_result;
- int retries = 3;
+ int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
unsigned int alignment;
unsigned long long lba;
unsigned sector_size;
@@ -1470,6 +1472,13 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
* Invalid Field in CDB, just retry
* silently with RC10 */
return -EINVAL;
+ if (sense_valid &&
+ sshdr.sense_key == UNIT_ATTENTION &&
+ sshdr.asc == 0x29 && sshdr.ascq == 0x00)
+ /* Device reset might occur several times,
+ * give it one more chance */
+ if (--reset_retries > 0)
+ continue;
}
retries--;
@@ -1528,7 +1537,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int the_result;
- int retries = 3;
+ int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
sector_t lba;
unsigned sector_size;
@@ -1544,8 +1553,16 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
- if (the_result)
+ if (the_result) {
sense_valid = scsi_sense_valid(&sshdr);
+ if (sense_valid &&
+ sshdr.sense_key == UNIT_ATTENTION &&
+ sshdr.asc == 0x29 && sshdr.ascq == 0x00)
+ /* Device reset might occur several times,
+ * give it one more chance */
+ if (--reset_retries > 0)
+ continue;
+ }
retries--;
} while (the_result && retries);
@@ -1574,6 +1591,8 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
static int sd_try_rc16_first(struct scsi_device *sdp)
{
+ if (sdp->host->max_cmd_len < 16)
+ return 0;
if (sdp->scsi_level > SCSI_SPC_2)
return 1;
if (scsi_device_protection(sdp))
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 5fda881..b701bf2 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -2224,14 +2224,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
}
-void
-wd33c93_release(void)
-{
-}
-
EXPORT_SYMBOL(wd33c93_host_reset);
EXPORT_SYMBOL(wd33c93_init);
-EXPORT_SYMBOL(wd33c93_release);
EXPORT_SYMBOL(wd33c93_abort);
EXPORT_SYMBOL(wd33c93_queuecommand);
EXPORT_SYMBOL(wd33c93_intr);
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index 00123f2..1ed5f3b 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -348,6 +348,5 @@ int wd33c93_queuecommand (struct scsi_cmnd *cmd,
void wd33c93_intr (struct Scsi_Host *instance);
int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
int wd33c93_host_reset (struct scsi_cmnd *);
-void wd33c93_release(void);
#endif /* WD33C93_H */
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 302836a..8b23165 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1423,8 +1423,8 @@ config SERIAL_SC26XX_CONSOLE
Support for Console on SC2681/SC2692 serial ports.
config SERIAL_BFIN_SPORT
- tristate "Blackfin SPORT emulate UART (EXPERIMENTAL)"
- depends on BLACKFIN && EXPERIMENTAL
+ tristate "Blackfin SPORT emulate UART"
+ depends on BLACKFIN
select SERIAL_CORE
help
Enable SPORT emulate UART on Blackfin series.
@@ -1439,28 +1439,52 @@ config SERIAL_BFIN_SPORT_CONSOLE
config SERIAL_BFIN_SPORT0_UART
bool "Enable UART over SPORT0"
- depends on SERIAL_BFIN_SPORT && !(BF542 || BF542M || BF544 || BF544M)
+ depends on SERIAL_BFIN_SPORT && !(BF542 || BF544)
help
Enable UART over SPORT0
+config SERIAL_BFIN_SPORT0_UART_CTSRTS
+ bool "Enable UART over SPORT0 hardware flow control"
+ depends on SERIAL_BFIN_SPORT0_UART
+ help
+ Enable hardware flow control in the driver.
+
config SERIAL_BFIN_SPORT1_UART
bool "Enable UART over SPORT1"
depends on SERIAL_BFIN_SPORT
help
Enable UART over SPORT1
+config SERIAL_BFIN_SPORT1_UART_CTSRTS
+ bool "Enable UART over SPORT1 hardware flow control"
+ depends on SERIAL_BFIN_SPORT1_UART
+ help
+ Enable hardware flow control in the driver.
+
config SERIAL_BFIN_SPORT2_UART
bool "Enable UART over SPORT2"
depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
help
Enable UART over SPORT2
+config SERIAL_BFIN_SPORT2_UART_CTSRTS
+ bool "Enable UART over SPORT2 hardware flow control"
+ depends on SERIAL_BFIN_SPORT2_UART
+ help
+ Enable hardware flow control in the driver.
+
config SERIAL_BFIN_SPORT3_UART
bool "Enable UART over SPORT3"
depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
help
Enable UART over SPORT3
+config SERIAL_BFIN_SPORT3_UART_CTSRTS
+ bool "Enable UART over SPORT3 hardware flow control"
+ depends on SERIAL_BFIN_SPORT3_UART
+ help
+ Enable hardware flow control in the driver.
+
config SERIAL_TIMBERDALE
tristate "Support for timberdale UART"
select SERIAL_CORE
@@ -1499,4 +1523,56 @@ config SERIAL_GRLIB_GAISLER_APBUART_CONSOLE
help
Support for running a console on the GRLIB APBUART
+config SERIAL_ALTERA_JTAGUART
+ tristate "Altera JTAG UART support"
+ select SERIAL_CORE
+ help
+ This driver supports the Altera JTAG UART port.
+
+config SERIAL_ALTERA_JTAGUART_CONSOLE
+ bool "Altera JTAG UART console support"
+ depends on SERIAL_ALTERA_JTAGUART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a Altera JTAG UART port to be the system console.
+
+config SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS
+ bool "Bypass output when no connection"
+ depends on SERIAL_ALTERA_JTAGUART_CONSOLE
+ select SERIAL_CORE_CONSOLE
+ help
+ Bypass console output and keep going even if there is no
+ JTAG terminal connection with the host.
+
+config SERIAL_ALTERA_UART
+ tristate "Altera UART support"
+ select SERIAL_CORE
+ help
+ This driver supports the Altera softcore UART port.
+
+config SERIAL_ALTERA_UART_MAXPORTS
+ int "Maximum number of Altera UART ports"
+ depends on SERIAL_ALTERA_UART
+ default 4
+ help
+ This setting lets you define the maximum number of the Altera
+ UART ports. The usual default varies from board to board, and
+ this setting is a way of catering for that.
+
+config SERIAL_ALTERA_UART_BAUDRATE
+ int "Default baudrate for Altera UART ports"
+ depends on SERIAL_ALTERA_UART
+ default 115200
+ help
+ This setting lets you define what the default baudrate is for the
+ Altera UART ports. The usual default varies from board to board,
+ and this setting is a way of catering for that.
+
+config SERIAL_ALTERA_UART_CONSOLE
+ bool "Altera UART console support"
+ depends on SERIAL_ALTERA_UART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a Altera UART port to be the system console.
+
endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 328f107..208a855 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -82,3 +82,5 @@ obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
+obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
+obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
diff --git a/drivers/serial/altera_jtaguart.c b/drivers/serial/altera_jtaguart.c
new file mode 100644
index 0000000..f9b49b5
--- /dev/null
+++ b/drivers/serial/altera_jtaguart.c
@@ -0,0 +1,504 @@
+/*
+ * altera_jtaguart.c -- Altera JTAG UART driver
+ *
+ * Based on mcf.c -- Freescale ColdFire UART driver
+ *
+ * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
+ * (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/altera_jtaguart.h>
+
+#define DRV_NAME "altera_jtaguart"
+
+/*
+ * Altera JTAG UART register definitions according to the Altera JTAG UART
+ * datasheet: http://www.altera.com/literature/hb/nios2/n2cpu_nii51009.pdf
+ */
+
+#define ALTERA_JTAGUART_SIZE 8
+
+#define ALTERA_JTAGUART_DATA_REG 0
+
+#define ALTERA_JTAGUART_DATA_DATA_MSK 0x000000FF
+#define ALTERA_JTAGUART_DATA_RVALID_MSK 0x00008000
+#define ALTERA_JTAGUART_DATA_RAVAIL_MSK 0xFFFF0000
+#define ALTERA_JTAGUART_DATA_RAVAIL_OFF 16
+
+#define ALTERA_JTAGUART_CONTROL_REG 4
+
+#define ALTERA_JTAGUART_CONTROL_RE_MSK 0x00000001
+#define ALTERA_JTAGUART_CONTROL_WE_MSK 0x00000002
+#define ALTERA_JTAGUART_CONTROL_RI_MSK 0x00000100
+#define ALTERA_JTAGUART_CONTROL_RI_OFF 8
+#define ALTERA_JTAGUART_CONTROL_WI_MSK 0x00000200
+#define ALTERA_JTAGUART_CONTROL_AC_MSK 0x00000400
+#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK 0xFFFF0000
+#define ALTERA_JTAGUART_CONTROL_WSPACE_OFF 16
+
+/*
+ * Local per-uart structure.
+ */
+struct altera_jtaguart {
+ struct uart_port port;
+ unsigned int sigs; /* Local copy of line sigs */
+ unsigned long imr; /* Local IMR mirror */
+};
+
+static unsigned int altera_jtaguart_tx_empty(struct uart_port *port)
+{
+ return (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int altera_jtaguart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void altera_jtaguart_set_mctrl(struct uart_port *port, unsigned int sigs)
+{
+}
+
+static void altera_jtaguart_start_tx(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+
+ pp->imr |= ALTERA_JTAGUART_CONTROL_WE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_stop_tx(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+
+ pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_stop_rx(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+
+ pp->imr &= ~ALTERA_JTAGUART_CONTROL_RE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+static void altera_jtaguart_enable_ms(struct uart_port *port)
+{
+}
+
+static void altera_jtaguart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ /* Just copy the old termios settings back */
+ if (old)
+ tty_termios_copy_hw(termios, old);
+}
+
+static void altera_jtaguart_rx_chars(struct altera_jtaguart *pp)
+{
+ struct uart_port *port = &pp->port;
+ unsigned char ch, flag;
+ unsigned long status;
+
+ while ((status = readl(port->membase + ALTERA_JTAGUART_DATA_REG)) &
+ ALTERA_JTAGUART_DATA_RVALID_MSK) {
+ ch = status & ALTERA_JTAGUART_DATA_DATA_MSK;
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+ uart_insert_char(port, 0, 0, ch, flag);
+ }
+
+ tty_flip_buffer_push(port->state->port.tty);
+}
+
+static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
+{
+ struct uart_port *port = &pp->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int pending, count;
+
+ if (port->x_char) {
+ /* Send special char - probably flow control */
+ writel(port->x_char, port->membase + ALTERA_JTAGUART_DATA_REG);
+ port->x_char = 0;
+ port->icount.tx++;
+ return;
+ }
+
+ pending = uart_circ_chars_pending(xmit);
+ if (pending > 0) {
+ count = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) >>
+ ALTERA_JTAGUART_CONTROL_WSPACE_OFF;
+ if (count > pending)
+ count = pending;
+ if (count > 0) {
+ pending -= count;
+ while (count--) {
+ writel(xmit->buf[xmit->tail],
+ port->membase + ALTERA_JTAGUART_DATA_REG);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+ if (pending < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ }
+ }
+
+ if (pending == 0) {
+ pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+ }
+}
+
+static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
+{
+ struct uart_port *port = data;
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+ unsigned int isr;
+
+ isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
+ ALTERA_JTAGUART_CONTROL_RI_OFF) & pp->imr;
+
+ spin_lock(&port->lock);
+
+ if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
+ altera_jtaguart_rx_chars(pp);
+ if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
+ altera_jtaguart_tx_chars(pp);
+
+ spin_unlock(&port->lock);
+
+ return IRQ_RETVAL(isr);
+}
+
+static void altera_jtaguart_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_ALTERA_JTAGUART;
+
+ /* Clear mask, so no surprise interrupts. */
+ writel(0, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static int altera_jtaguart_startup(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+ unsigned long flags;
+ int ret;
+
+ ret = request_irq(port->irq, altera_jtaguart_interrupt, IRQF_DISABLED,
+ DRV_NAME, port);
+ if (ret) {
+ pr_err(DRV_NAME ": unable to attach Altera JTAG UART %d "
+ "interrupt vector=%d\n", port->line, port->irq);
+ return ret;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Enable RX interrupts now */
+ pp->imr = ALTERA_JTAGUART_CONTROL_RE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void altera_jtaguart_shutdown(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Disable all interrupts now */
+ pp->imr = 0;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ free_irq(port->irq, port);
+}
+
+static const char *altera_jtaguart_type(struct uart_port *port)
+{
+ return (port->type == PORT_ALTERA_JTAGUART) ? "Altera JTAG UART" : NULL;
+}
+
+static int altera_jtaguart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+static void altera_jtaguart_release_port(struct uart_port *port)
+{
+ /* Nothing to release... */
+}
+
+static int altera_jtaguart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_ALTERA_JTAGUART)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Define the basic serial functions we support.
+ */
+static struct uart_ops altera_jtaguart_ops = {
+ .tx_empty = altera_jtaguart_tx_empty,
+ .get_mctrl = altera_jtaguart_get_mctrl,
+ .set_mctrl = altera_jtaguart_set_mctrl,
+ .start_tx = altera_jtaguart_start_tx,
+ .stop_tx = altera_jtaguart_stop_tx,
+ .stop_rx = altera_jtaguart_stop_rx,
+ .enable_ms = altera_jtaguart_enable_ms,
+ .break_ctl = altera_jtaguart_break_ctl,
+ .startup = altera_jtaguart_startup,
+ .shutdown = altera_jtaguart_shutdown,
+ .set_termios = altera_jtaguart_set_termios,
+ .type = altera_jtaguart_type,
+ .request_port = altera_jtaguart_request_port,
+ .release_port = altera_jtaguart_release_port,
+ .config_port = altera_jtaguart_config_port,
+ .verify_port = altera_jtaguart_verify_port,
+};
+
+#define ALTERA_JTAGUART_MAXPORTS 1
+static struct altera_jtaguart altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS];
+
+#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE)
+
+int __init early_altera_jtaguart_setup(struct altera_jtaguart_platform_uart
+ *platp)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < ALTERA_JTAGUART_MAXPORTS && platp[i].mapbase; i++) {
+ port = &altera_jtaguart_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_ALTERA_JTAGUART;
+ port->mapbase = platp[i].mapbase;
+ port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE);
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->flags = ASYNC_BOOT_AUTOCONF;
+ port->ops = &altera_jtaguart_ops;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
+static void altera_jtaguart_console_putc(struct console *co, const char c)
+{
+ struct uart_port *port = &(altera_jtaguart_ports + co->index)->port;
+ unsigned long status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ while (((status = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG)) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+ if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return; /* no connection activity */
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ cpu_relax();
+ spin_lock_irqsave(&port->lock, flags);
+ }
+ writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+#else
+static void altera_jtaguart_console_putc(struct console *co, const char c)
+{
+ struct uart_port *port = &(altera_jtaguart_ports + co->index)->port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ while ((readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ cpu_relax();
+ spin_lock_irqsave(&port->lock, flags);
+ }
+ writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+#endif
+
+static void altera_jtaguart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ for (; count; count--, s++) {
+ altera_jtaguart_console_putc(co, *s);
+ if (*s == '\n')
+ altera_jtaguart_console_putc(co, '\r');
+ }
+}
+
+static int __init altera_jtaguart_console_setup(struct console *co,
+ char *options)
+{
+ struct uart_port *port;
+
+ if (co->index < 0 || co->index >= ALTERA_JTAGUART_MAXPORTS)
+ return -EINVAL;
+ port = &altera_jtaguart_ports[co->index].port;
+ if (port->membase == 0)
+ return -ENODEV;
+ return 0;
+}
+
+static struct uart_driver altera_jtaguart_driver;
+
+static struct console altera_jtaguart_console = {
+ .name = "ttyJ",
+ .write = altera_jtaguart_console_write,
+ .device = uart_console_device,
+ .setup = altera_jtaguart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &altera_jtaguart_driver,
+};
+
+static int __init altera_jtaguart_console_init(void)
+{
+ register_console(&altera_jtaguart_console);
+ return 0;
+}
+
+console_initcall(altera_jtaguart_console_init);
+
+#define ALTERA_JTAGUART_CONSOLE (&altera_jtaguart_console)
+
+#else
+
+#define ALTERA_JTAGUART_CONSOLE NULL
+
+#endif /* CONFIG_ALTERA_JTAGUART_CONSOLE */
+
+static struct uart_driver altera_jtaguart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "altera_jtaguart",
+ .dev_name = "ttyJ",
+ .major = ALTERA_JTAGUART_MAJOR,
+ .minor = ALTERA_JTAGUART_MINOR,
+ .nr = ALTERA_JTAGUART_MAXPORTS,
+ .cons = ALTERA_JTAGUART_CONSOLE,
+};
+
+static int __devinit altera_jtaguart_probe(struct platform_device *pdev)
+{
+ struct altera_jtaguart_platform_uart *platp = pdev->dev.platform_data;
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < ALTERA_JTAGUART_MAXPORTS && platp[i].mapbase; i++) {
+ port = &altera_jtaguart_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_ALTERA_JTAGUART;
+ port->mapbase = platp[i].mapbase;
+ port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE);
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->ops = &altera_jtaguart_ops;
+ port->flags = ASYNC_BOOT_AUTOCONF;
+
+ uart_add_one_port(&altera_jtaguart_driver, port);
+ }
+
+ return 0;
+}
+
+static int __devexit altera_jtaguart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < ALTERA_JTAGUART_MAXPORTS; i++) {
+ port = &altera_jtaguart_ports[i].port;
+ if (port)
+ uart_remove_one_port(&altera_jtaguart_driver, port);
+ }
+
+ return 0;
+}
+
+static struct platform_driver altera_jtaguart_platform_driver = {
+ .probe = altera_jtaguart_probe,
+ .remove = __devexit_p(altera_jtaguart_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init altera_jtaguart_init(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&altera_jtaguart_driver);
+ if (rc)
+ return rc;
+ rc = platform_driver_register(&altera_jtaguart_platform_driver);
+ if (rc) {
+ uart_unregister_driver(&altera_jtaguart_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static void __exit altera_jtaguart_exit(void)
+{
+ platform_driver_unregister(&altera_jtaguart_platform_driver);
+ uart_unregister_driver(&altera_jtaguart_driver);
+}
+
+module_init(altera_jtaguart_init);
+module_exit(altera_jtaguart_exit);
+
+MODULE_DESCRIPTION("Altera JTAG UART driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/serial/altera_uart.c b/drivers/serial/altera_uart.c
new file mode 100644
index 0000000..bcee156
--- /dev/null
+++ b/drivers/serial/altera_uart.c
@@ -0,0 +1,570 @@
+/*
+ * altera_uart.c -- Altera UART driver
+ *
+ * Based on mcf.c -- Freescale ColdFire UART driver
+ *
+ * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
+ * (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/altera_uart.h>
+
+#define DRV_NAME "altera_uart"
+
+/*
+ * Altera UART register definitions according to the Nios UART datasheet:
+ * http://www.altera.com/literature/ds/ds_nios_uart.pdf
+ */
+
+#define ALTERA_UART_SIZE 32
+
+#define ALTERA_UART_RXDATA_REG 0
+#define ALTERA_UART_TXDATA_REG 4
+#define ALTERA_UART_STATUS_REG 8
+#define ALTERA_UART_CONTROL_REG 12
+#define ALTERA_UART_DIVISOR_REG 16
+#define ALTERA_UART_EOP_REG 20
+
+#define ALTERA_UART_STATUS_PE_MSK 0x0001 /* parity error */
+#define ALTERA_UART_STATUS_FE_MSK 0x0002 /* framing error */
+#define ALTERA_UART_STATUS_BRK_MSK 0x0004 /* break */
+#define ALTERA_UART_STATUS_ROE_MSK 0x0008 /* RX overrun error */
+#define ALTERA_UART_STATUS_TOE_MSK 0x0010 /* TX overrun error */
+#define ALTERA_UART_STATUS_TMT_MSK 0x0020 /* TX shift register state */
+#define ALTERA_UART_STATUS_TRDY_MSK 0x0040 /* TX ready */
+#define ALTERA_UART_STATUS_RRDY_MSK 0x0080 /* RX ready */
+#define ALTERA_UART_STATUS_E_MSK 0x0100 /* exception condition */
+#define ALTERA_UART_STATUS_DCTS_MSK 0x0400 /* CTS logic-level change */
+#define ALTERA_UART_STATUS_CTS_MSK 0x0800 /* CTS logic state */
+#define ALTERA_UART_STATUS_EOP_MSK 0x1000 /* EOP written/read */
+
+ /* Enable interrupt on... */
+#define ALTERA_UART_CONTROL_PE_MSK 0x0001 /* ...parity error */
+#define ALTERA_UART_CONTROL_FE_MSK 0x0002 /* ...framing error */
+#define ALTERA_UART_CONTROL_BRK_MSK 0x0004 /* ...break */
+#define ALTERA_UART_CONTROL_ROE_MSK 0x0008 /* ...RX overrun */
+#define ALTERA_UART_CONTROL_TOE_MSK 0x0010 /* ...TX overrun */
+#define ALTERA_UART_CONTROL_TMT_MSK 0x0020 /* ...TX shift register empty */
+#define ALTERA_UART_CONTROL_TRDY_MSK 0x0040 /* ...TX ready */
+#define ALTERA_UART_CONTROL_RRDY_MSK 0x0080 /* ...RX ready */
+#define ALTERA_UART_CONTROL_E_MSK 0x0100 /* ...exception*/
+
+#define ALTERA_UART_CONTROL_TRBK_MSK 0x0200 /* TX break */
+#define ALTERA_UART_CONTROL_DCTS_MSK 0x0400 /* Interrupt on CTS change */
+#define ALTERA_UART_CONTROL_RTS_MSK 0x0800 /* RTS signal */
+#define ALTERA_UART_CONTROL_EOP_MSK 0x1000 /* Interrupt on EOP */
+
+/*
+ * Local per-uart structure.
+ */
+struct altera_uart {
+ struct uart_port port;
+ unsigned int sigs; /* Local copy of line sigs */
+ unsigned short imr; /* Local IMR mirror */
+};
+
+static unsigned int altera_uart_tx_empty(struct uart_port *port)
+{
+ return (readl(port->membase + ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TMT_MSK) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int altera_uart_get_mctrl(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+ unsigned int sigs;
+
+ spin_lock_irqsave(&port->lock, flags);
+ sigs =
+ (readl(port->membase + ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_CTS_MSK) ? TIOCM_CTS : 0;
+ sigs |= (pp->sigs & TIOCM_RTS);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return sigs;
+}
+
+static void altera_uart_set_mctrl(struct uart_port *port, unsigned int sigs)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pp->sigs = sigs;
+ if (sigs & TIOCM_RTS)
+ pp->imr |= ALTERA_UART_CONTROL_RTS_MSK;
+ else
+ pp->imr &= ~ALTERA_UART_CONTROL_RTS_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_start_tx(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pp->imr |= ALTERA_UART_CONTROL_TRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_stop_tx(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_stop_rx(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pp->imr &= ~ALTERA_UART_CONTROL_RRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (break_state == -1)
+ pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
+ else
+ pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_enable_ms(struct uart_port *port)
+{
+}
+
+static void altera_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, baudclk;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ baudclk = port->uartclk / baud;
+
+ if (old)
+ tty_termios_copy_hw(termios, old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ spin_lock_irqsave(&port->lock, flags);
+ writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_rx_chars(struct altera_uart *pp)
+{
+ struct uart_port *port = &pp->port;
+ unsigned char ch, flag;
+ unsigned short status;
+
+ while ((status = readl(port->membase + ALTERA_UART_STATUS_REG)) &
+ ALTERA_UART_STATUS_RRDY_MSK) {
+ ch = readl(port->membase + ALTERA_UART_RXDATA_REG);
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (status & ALTERA_UART_STATUS_E_MSK) {
+ writel(status, port->membase + ALTERA_UART_STATUS_REG);
+
+ if (status & ALTERA_UART_STATUS_BRK_MSK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (status & ALTERA_UART_STATUS_PE_MSK) {
+ port->icount.parity++;
+ } else if (status & ALTERA_UART_STATUS_ROE_MSK) {
+ port->icount.overrun++;
+ } else if (status & ALTERA_UART_STATUS_FE_MSK) {
+ port->icount.frame++;
+ }
+
+ status &= port->read_status_mask;
+
+ if (status & ALTERA_UART_STATUS_BRK_MSK)
+ flag = TTY_BREAK;
+ else if (status & ALTERA_UART_STATUS_PE_MSK)
+ flag = TTY_PARITY;
+ else if (status & ALTERA_UART_STATUS_FE_MSK)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+ uart_insert_char(port, status, ALTERA_UART_STATUS_ROE_MSK, ch,
+ flag);
+ }
+
+ tty_flip_buffer_push(port->state->port.tty);
+}
+
+static void altera_uart_tx_chars(struct altera_uart *pp)
+{
+ struct uart_port *port = &pp->port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (port->x_char) {
+ /* Send special char - probably flow control */
+ writel(port->x_char, port->membase + ALTERA_UART_TXDATA_REG);
+ port->x_char = 0;
+ port->icount.tx++;
+ return;
+ }
+
+ while (readl(port->membase + ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TRDY_MSK) {
+ if (xmit->head == xmit->tail)
+ break;
+ writel(xmit->buf[xmit->tail],
+ port->membase + ALTERA_UART_TXDATA_REG);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (xmit->head == xmit->tail) {
+ pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ }
+}
+
+static irqreturn_t altera_uart_interrupt(int irq, void *data)
+{
+ struct uart_port *port = data;
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned int isr;
+
+ isr = readl(port->membase + ALTERA_UART_STATUS_REG) & pp->imr;
+ if (isr & ALTERA_UART_STATUS_RRDY_MSK)
+ altera_uart_rx_chars(pp);
+ if (isr & ALTERA_UART_STATUS_TRDY_MSK)
+ altera_uart_tx_chars(pp);
+ return IRQ_RETVAL(isr);
+}
+
+static void altera_uart_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_ALTERA_UART;
+
+ /* Clear mask, so no surprise interrupts. */
+ writel(0, port->membase + ALTERA_UART_CONTROL_REG);
+ /* Clear status register */
+ writel(0, port->membase + ALTERA_UART_STATUS_REG);
+}
+
+static int altera_uart_startup(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+ int ret;
+
+ ret = request_irq(port->irq, altera_uart_interrupt, IRQF_DISABLED,
+ DRV_NAME, port);
+ if (ret) {
+ pr_err(DRV_NAME ": unable to attach Altera UART %d "
+ "interrupt vector=%d\n", port->line, port->irq);
+ return ret;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Enable RX interrupts now */
+ pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void altera_uart_shutdown(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Disable all interrupts now */
+ pp->imr = 0;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ free_irq(port->irq, port);
+}
+
+static const char *altera_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_ALTERA_UART) ? "Altera UART" : NULL;
+}
+
+static int altera_uart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+static void altera_uart_release_port(struct uart_port *port)
+{
+ /* Nothing to release... */
+}
+
+static int altera_uart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_ALTERA_UART))
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Define the basic serial functions we support.
+ */
+static struct uart_ops altera_uart_ops = {
+ .tx_empty = altera_uart_tx_empty,
+ .get_mctrl = altera_uart_get_mctrl,
+ .set_mctrl = altera_uart_set_mctrl,
+ .start_tx = altera_uart_start_tx,
+ .stop_tx = altera_uart_stop_tx,
+ .stop_rx = altera_uart_stop_rx,
+ .enable_ms = altera_uart_enable_ms,
+ .break_ctl = altera_uart_break_ctl,
+ .startup = altera_uart_startup,
+ .shutdown = altera_uart_shutdown,
+ .set_termios = altera_uart_set_termios,
+ .type = altera_uart_type,
+ .request_port = altera_uart_request_port,
+ .release_port = altera_uart_release_port,
+ .config_port = altera_uart_config_port,
+ .verify_port = altera_uart_verify_port,
+};
+
+static struct altera_uart altera_uart_ports[CONFIG_SERIAL_ALTERA_UART_MAXPORTS];
+
+#if defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE)
+
+int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS && platp[i].mapbase; i++) {
+ port = &altera_uart_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_ALTERA_UART;
+ port->mapbase = platp[i].mapbase;
+ port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->uartclk = platp[i].uartclk;
+ port->flags = ASYNC_BOOT_AUTOCONF;
+ port->ops = &altera_uart_ops;
+ }
+
+ return 0;
+}
+
+static void altera_uart_console_putc(struct console *co, const char c)
+{
+ struct uart_port *port = &(altera_uart_ports + co->index)->port;
+ int i;
+
+ for (i = 0; i < 0x10000; i++) {
+ if (readl(port->membase + ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TRDY_MSK)
+ break;
+ }
+ writel(c, port->membase + ALTERA_UART_TXDATA_REG);
+ for (i = 0; i < 0x10000; i++) {
+ if (readl(port->membase + ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TRDY_MSK)
+ break;
+ }
+}
+
+static void altera_uart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ for (; count; count--, s++) {
+ altera_uart_console_putc(co, *s);
+ if (*s == '\n')
+ altera_uart_console_putc(co, '\r');
+ }
+}
+
+static int __init altera_uart_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = CONFIG_SERIAL_ALTERA_UART_BAUDRATE;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS)
+ return -EINVAL;
+ port = &altera_uart_ports[co->index].port;
+ if (port->membase == 0)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver altera_uart_driver;
+
+static struct console altera_uart_console = {
+ .name = "ttyS",
+ .write = altera_uart_console_write,
+ .device = uart_console_device,
+ .setup = altera_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &altera_uart_driver,
+};
+
+static int __init altera_uart_console_init(void)
+{
+ register_console(&altera_uart_console);
+ return 0;
+}
+
+console_initcall(altera_uart_console_init);
+
+#define ALTERA_UART_CONSOLE (&altera_uart_console)
+
+#else
+
+#define ALTERA_UART_CONSOLE NULL
+
+#endif /* CONFIG_ALTERA_UART_CONSOLE */
+
+/*
+ * Define the altera_uart UART driver structure.
+ */
+static struct uart_driver altera_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = DRV_NAME,
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = CONFIG_SERIAL_ALTERA_UART_MAXPORTS,
+ .cons = ALTERA_UART_CONSOLE,
+};
+
+static int __devinit altera_uart_probe(struct platform_device *pdev)
+{
+ struct altera_uart_platform_uart *platp = pdev->dev.platform_data;
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS && platp[i].mapbase; i++) {
+ port = &altera_uart_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_ALTERA_UART;
+ port->mapbase = platp[i].mapbase;
+ port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->uartclk = platp[i].uartclk;
+ port->ops = &altera_uart_ops;
+ port->flags = ASYNC_BOOT_AUTOCONF;
+
+ uart_add_one_port(&altera_uart_driver, port);
+ }
+
+ return 0;
+}
+
+static int altera_uart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS; i++) {
+ port = &altera_uart_ports[i].port;
+ if (port)
+ uart_remove_one_port(&altera_uart_driver, port);
+ }
+
+ return 0;
+}
+
+static struct platform_driver altera_uart_platform_driver = {
+ .probe = altera_uart_probe,
+ .remove = __devexit_p(altera_uart_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ },
+};
+
+static int __init altera_uart_init(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&altera_uart_driver);
+ if (rc)
+ return rc;
+ rc = platform_driver_register(&altera_uart_platform_driver);
+ if (rc) {
+ uart_unregister_driver(&altera_uart_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static void __exit altera_uart_exit(void)
+{
+ platform_driver_unregister(&altera_uart_platform_driver);
+ uart_unregister_driver(&altera_uart_driver);
+}
+
+module_init(altera_uart_init);
+module_exit(altera_uart_exit);
+
+MODULE_DESCRIPTION("Altera UART driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index c88f8ad..e57fb3d 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -34,32 +34,12 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
+#include <asm/bfin_sport.h>
#include <asm/delay.h>
#include <asm/portmux.h>
#include "bfin_sport_uart.h"
-#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
-unsigned short bfin_uart_pin_req_sport0[] =
- {P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0};
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
-unsigned short bfin_uart_pin_req_sport1[] =
- {P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0};
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
-unsigned short bfin_uart_pin_req_sport2[] =
- {P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, \
- P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0};
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
-unsigned short bfin_uart_pin_req_sport3[] =
- {P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, \
- P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0};
-#endif
-
struct sport_uart_port {
struct uart_port port;
int err_irq;
@@ -69,9 +49,13 @@ struct sport_uart_port {
unsigned short txmask2;
unsigned char stopb;
/* unsigned char parib; */
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ int cts_pin;
+ int rts_pin;
+#endif
};
-static void sport_uart_tx_chars(struct sport_uart_port *up);
+static int sport_uart_tx_chars(struct sport_uart_port *up);
static void sport_stop_tx(struct uart_port *port);
static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
@@ -219,6 +203,59 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+static unsigned int sport_get_mctrl(struct uart_port *port)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ if (up->cts_pin < 0)
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+ /* CTS PIN is negative assertive. */
+ if (SPORT_UART_GET_CTS(up))
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ else
+ return TIOCM_DSR | TIOCM_CAR;
+}
+
+static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ if (up->rts_pin < 0)
+ return;
+
+ /* RTS PIN is negative assertive. */
+ if (mctrl & TIOCM_RTS)
+ SPORT_UART_ENABLE_RTS(up);
+ else
+ SPORT_UART_DISABLE_RTS(up);
+}
+
+/*
+ * Handle any change of modem status signal.
+ */
+static irqreturn_t sport_mctrl_cts_int(int irq, void *dev_id)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)dev_id;
+ unsigned int status;
+
+ status = sport_get_mctrl(&up->port);
+ uart_handle_cts_change(&up->port, status & TIOCM_CTS);
+
+ return IRQ_HANDLED;
+}
+#else
+static unsigned int sport_get_mctrl(struct uart_port *port)
+{
+ pr_debug("%s enter\n", __func__);
+ return TIOCM_CTS | TIOCM_CD | TIOCM_DSR;
+}
+
+static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ pr_debug("%s enter\n", __func__);
+}
+#endif
+
/* Reqeust IRQ, Setup clock */
static int sport_startup(struct uart_port *port)
{
@@ -247,6 +284,21 @@ static int sport_startup(struct uart_port *port)
goto fail2;
}
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ if (up->cts_pin >= 0) {
+ if (request_irq(gpio_to_irq(up->cts_pin),
+ sport_mctrl_cts_int,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_DISABLED, "BFIN_SPORT_UART_CTS", up)) {
+ up->cts_pin = -1;
+ dev_info(port->dev, "Unable to attach BlackFin UART \
+ over SPORT CTS interrupt. So, disable it.\n");
+ }
+ }
+ if (up->rts_pin >= 0)
+ gpio_direction_output(up->rts_pin, 0);
+#endif
+
return 0;
fail2:
free_irq(up->port.irq+1, up);
@@ -256,23 +308,35 @@ static int sport_startup(struct uart_port *port)
return ret;
}
-static void sport_uart_tx_chars(struct sport_uart_port *up)
+/*
+ * sport_uart_tx_chars
+ *
+ * ret 1 means need to enable sport.
+ * ret 0 means do nothing.
+ */
+static int sport_uart_tx_chars(struct sport_uart_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
if (SPORT_GET_STAT(up) & TXF)
- return;
+ return 0;
if (up->port.x_char) {
tx_one_byte(up, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
- return;
+ return 1;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
- sport_stop_tx(&up->port);
- return;
+ /* The waiting loop to stop SPORT TX from TX interrupt is
+ * too long. This may block SPORT RX interrupts and cause
+ * RX FIFO overflow. So, do stop sport TX only after the last
+ * char in TX FIFO is moved into the shift register.
+ */
+ if (SPORT_GET_STAT(up) & TXHRE)
+ sport_stop_tx(&up->port);
+ return 0;
}
while(!(SPORT_GET_STAT(up) & TXF) && !uart_circ_empty(xmit)) {
@@ -283,6 +347,8 @@ static void sport_uart_tx_chars(struct sport_uart_port *up)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
+
+ return 1;
}
static unsigned int sport_tx_empty(struct uart_port *port)
@@ -298,23 +364,15 @@ static unsigned int sport_tx_empty(struct uart_port *port)
return 0;
}
-static unsigned int sport_get_mctrl(struct uart_port *port)
-{
- pr_debug("%s enter\n", __func__);
- return (TIOCM_CTS | TIOCM_CD | TIOCM_DSR);
-}
-
-static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- pr_debug("%s enter\n", __func__);
-}
-
static void sport_stop_tx(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
pr_debug("%s enter\n", __func__);
+ if (!(SPORT_GET_TCR1(up) & TSPEN))
+ return;
+
/* Although the hold register is empty, last byte is still in shift
* register and not sent out yet. So, put a dummy data into TX FIFO.
* Then, sport tx stops when last byte is shift out and the dummy
@@ -337,11 +395,12 @@ static void sport_start_tx(struct uart_port *port)
pr_debug("%s enter\n", __func__);
/* Write data into SPORT FIFO before enable SPROT to transmit */
- sport_uart_tx_chars(up);
+ if (sport_uart_tx_chars(up)) {
+ /* Enable transmit, then an interrupt will generated */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+ }
- /* Enable transmit, then an interrupt will generated */
- SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
- SSYNC();
pr_debug("%s exit\n", __func__);
}
@@ -379,6 +438,10 @@ static void sport_shutdown(struct uart_port *port)
free_irq(up->port.irq, up);
free_irq(up->port.irq+1, up);
free_irq(up->err_irq, up);
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ if (up->cts_pin >= 0)
+ free_irq(gpio_to_irq(up->cts_pin), up);
+#endif
}
static const char *sport_type(struct uart_port *port)
@@ -448,27 +511,14 @@ static void sport_set_termios(struct uart_port *port,
/* up->parib = 1; */
}
- port->read_status_mask = OE;
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= (FE | PE);
- if (termios->c_iflag & (BRKINT | PARMRK))
- port->read_status_mask |= BI;
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ port->read_status_mask = 0;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= FE | PE;
- if (termios->c_iflag & IGNBRK) {
- port->ignore_status_mask |= BI;
- /*
- * If we're ignoring parity and break indicators,
- * ignore overruns too (for real raw support).
- */
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= OE;
- }
/* RX extract mask */
up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8);
@@ -488,8 +538,6 @@ static void sport_set_termios(struct uart_port *port,
/* uart baud rate */
port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16);
- spin_lock_irqsave(&up->port.lock, flags);
-
/* Disable UART */
SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
@@ -542,6 +590,8 @@ struct uart_ops sport_uart_ops = {
static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS];
#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+#define CLASS_BFIN_SPORT_CONSOLE "bfin-sport-console"
+
static int __init
sport_uart_console_setup(struct console *co, char *options)
{
@@ -549,7 +599,11 @@ sport_uart_console_setup(struct console *co, char *options)
int baud = 57600;
int bits = 8;
int parity = 'n';
+# ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ int flow = 'r';
+# else
int flow = 'n';
+# endif
/* Check whether an invalid uart number has been specified */
if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS)
@@ -690,11 +744,11 @@ static int __devinit sport_uart_probe(struct platform_device *pdev)
if (bfin_sport_uart_ports[pdev->id] == NULL) {
bfin_sport_uart_ports[pdev->id] =
- kmalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
+ kzalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
sport = bfin_sport_uart_ports[pdev->id];
if (!sport) {
dev_err(&pdev->dev,
- "Fail to kmalloc sport_uart_port\n");
+ "Fail to malloc sport_uart_port\n");
return -ENOMEM;
}
@@ -720,13 +774,13 @@ static int __devinit sport_uart_probe(struct platform_device *pdev)
goto out_error_free_peripherals;
}
- sport->port.membase = ioremap(res->start,
- res->end - res->start);
+ sport->port.membase = ioremap(res->start, resource_size(res));
if (!sport->port.membase) {
dev_err(&pdev->dev, "Cannot map sport IO\n");
ret = -ENXIO;
goto out_error_free_peripherals;
}
+ sport->port.mapbase = res->start;
sport->port.irq = platform_get_irq(pdev, 0);
if (sport->port.irq < 0) {
@@ -741,6 +795,22 @@ static int __devinit sport_uart_probe(struct platform_device *pdev)
ret = -ENOENT;
goto out_error_unmap;
}
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL)
+ sport->cts_pin = -1;
+ else
+ sport->cts_pin = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (res == NULL)
+ sport->rts_pin = -1;
+ else
+ sport->rts_pin = res->start;
+
+ if (sport->rts_pin >= 0)
+ gpio_request(sport->rts_pin, DRV_NAME);
+#endif
}
#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
@@ -779,6 +849,10 @@ static int __devexit sport_uart_remove(struct platform_device *pdev)
if (sport) {
uart_remove_one_port(&sport_uart_reg, &sport->port);
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (sport->rts_pin >= 0)
+ gpio_free(sport->rts_pin);
+#endif
iounmap(sport->port.membase);
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
@@ -802,7 +876,7 @@ static struct platform_driver sport_uart_driver = {
#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
static __initdata struct early_platform_driver early_sport_uart_driver = {
- .class_str = DRV_NAME,
+ .class_str = CLASS_BFIN_SPORT_CONSOLE,
.pdrv = &sport_uart_driver,
.requested_id = EARLY_PLATFORM_ID_UNSET,
};
@@ -811,7 +885,8 @@ static int __init sport_uart_rs_console_init(void)
{
early_platform_driver_register(&early_sport_uart_driver, DRV_NAME);
- early_platform_driver_probe(DRV_NAME, BFIN_SPORT_UART_MAX_PORTS, 0);
+ early_platform_driver_probe(CLASS_BFIN_SPORT_CONSOLE,
+ BFIN_SPORT_UART_MAX_PORTS, 0);
register_console(&sport_uart_console);
@@ -824,7 +899,7 @@ static int __init sport_uart_init(void)
{
int ret;
- pr_info("Serial: Blackfin uart over sport driver\n");
+ pr_info("Blackfin uart over sport driver\n");
ret = uart_register_driver(&sport_uart_reg);
if (ret) {
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
index abe0361..9ce253e 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/serial/bfin_sport_uart.h
@@ -37,7 +37,21 @@
#define SPORT_GET_TFSDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_TFSDIV))
#define SPORT_GET_TX(sport) bfin_read16(((sport)->port.membase + OFFSET_TX))
#define SPORT_GET_RX(sport) bfin_read16(((sport)->port.membase + OFFSET_RX))
-#define SPORT_GET_RX32(sport) bfin_read32(((sport)->port.membase + OFFSET_RX))
+/*
+ * If another interrupt fires while doing a 32-bit read from RX FIFO,
+ * a fake RX underflow error will be generated. So disable interrupts
+ * to prevent interruption while reading the FIFO.
+ */
+#define SPORT_GET_RX32(sport) \
+({ \
+ unsigned int __ret; \
+ if (ANOMALY_05000473) \
+ local_irq_disable(); \
+ __ret = bfin_read32((sport)->port.membase + OFFSET_RX); \
+ if (ANOMALY_05000473) \
+ local_irq_enable(); \
+ __ret; \
+})
#define SPORT_GET_RCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR1))
#define SPORT_GET_RCR2(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR2))
#define SPORT_GET_RCLKDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_RCLKDIV))
@@ -58,4 +72,15 @@
#define SPORT_TX_FIFO_SIZE 8
+#define SPORT_UART_GET_CTS(x) gpio_get_value(x->cts_pin)
+#define SPORT_UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
+#define SPORT_UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT0_UART_CTSRTS) \
+ || defined(CONFIG_SERIAL_BFIN_SPORT1_UART_CTSRTS) \
+ || defined(CONFIG_SERIAL_BFIN_SPORT2_UART_CTSRTS) \
+ || defined(CONFIG_SERIAL_BFIN_SPORT3_UART_CTSRTS)
+# define CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+#endif
+
#endif /* _BFIN_SPORT_UART_H */
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index 786ba85..67ca642 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -68,12 +68,22 @@ static void timbuart_start_tx(struct uart_port *port)
tasklet_schedule(&uart->tasklet);
}
+static unsigned int timbuart_tx_empty(struct uart_port *port)
+{
+ u32 isr = ioread32(port->membase + TIMBUART_ISR);
+
+ return (isr & TXBE) ? TIOCSER_TEMT : 0;
+}
+
static void timbuart_flush_buffer(struct uart_port *port)
{
- u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
+ if (!timbuart_tx_empty(port)) {
+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
+ TIMBUART_CTRL_FLSHTX;
- iowrite8(ctl, port->membase + TIMBUART_CTRL);
- iowrite32(TXBF, port->membase + TIMBUART_ISR);
+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
+ iowrite32(TXBF, port->membase + TIMBUART_ISR);
+ }
}
static void timbuart_rx_chars(struct uart_port *port)
@@ -195,13 +205,6 @@ void timbuart_tasklet(unsigned long arg)
dev_dbg(uart->port.dev, "%s leaving\n", __func__);
}
-static unsigned int timbuart_tx_empty(struct uart_port *port)
-{
- u32 isr = ioread32(port->membase + TIMBUART_ISR);
-
- return (isr & TXBE) ? TIOCSER_TEMT : 0;
-}
-
static unsigned int timbuart_get_mctrl(struct uart_port *port)
{
u8 cts = ioread8(port->membase + TIMBUART_CTRL);
@@ -220,7 +223,7 @@ static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (mctrl & TIOCM_RTS)
iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
else
- iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
+ iowrite8(0, port->membase + TIMBUART_CTRL);
}
static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier)
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index f0a6c61..e6639a9 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -86,7 +86,7 @@ static int ulite_receive(struct uart_port *port, int stat)
/* stats */
if (stat & ULITE_STATUS_RXVALID) {
port->icount.rx++;
- ch = readb(port->membase + ULITE_RX);
+ ch = ioread32be(port->membase + ULITE_RX);
if (stat & ULITE_STATUS_PARITY)
port->icount.parity++;
@@ -131,7 +131,7 @@ static int ulite_transmit(struct uart_port *port, int stat)
return 0;
if (port->x_char) {
- writeb(port->x_char, port->membase + ULITE_TX);
+ iowrite32be(port->x_char, port->membase + ULITE_TX);
port->x_char = 0;
port->icount.tx++;
return 1;
@@ -140,7 +140,7 @@ static int ulite_transmit(struct uart_port *port, int stat)
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return 0;
- writeb(xmit->buf[xmit->tail], port->membase + ULITE_TX);
+ iowrite32be(xmit->buf[xmit->tail], port->membase + ULITE_TX);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
port->icount.tx++;
@@ -157,7 +157,7 @@ static irqreturn_t ulite_isr(int irq, void *dev_id)
int busy, n = 0;
do {
- int stat = readb(port->membase + ULITE_STATUS);
+ int stat = ioread32be(port->membase + ULITE_STATUS);
busy = ulite_receive(port, stat);
busy |= ulite_transmit(port, stat);
n++;
@@ -178,7 +178,7 @@ static unsigned int ulite_tx_empty(struct uart_port *port)
unsigned int ret;
spin_lock_irqsave(&port->lock, flags);
- ret = readb(port->membase + ULITE_STATUS);
+ ret = ioread32be(port->membase + ULITE_STATUS);
spin_unlock_irqrestore(&port->lock, flags);
return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
@@ -201,7 +201,7 @@ static void ulite_stop_tx(struct uart_port *port)
static void ulite_start_tx(struct uart_port *port)
{
- ulite_transmit(port, readb(port->membase + ULITE_STATUS));
+ ulite_transmit(port, ioread32be(port->membase + ULITE_STATUS));
}
static void ulite_stop_rx(struct uart_port *port)
@@ -230,17 +230,17 @@ static int ulite_startup(struct uart_port *port)
if (ret)
return ret;
- writeb(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
+ iowrite32be(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
port->membase + ULITE_CONTROL);
- writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+ iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
return 0;
}
static void ulite_shutdown(struct uart_port *port)
{
- writeb(0, port->membase + ULITE_CONTROL);
- readb(port->membase + ULITE_CONTROL); /* dummy */
+ iowrite32be(0, port->membase + ULITE_CONTROL);
+ ioread32be(port->membase + ULITE_CONTROL); /* dummy */
free_irq(port->irq, port);
}
@@ -352,7 +352,7 @@ static void ulite_console_wait_tx(struct uart_port *port)
/* Spin waiting for TX fifo to have space available */
for (i = 0; i < 100000; i++) {
- val = readb(port->membase + ULITE_STATUS);
+ val = ioread32be(port->membase + ULITE_STATUS);
if ((val & ULITE_STATUS_TXFULL) == 0)
break;
cpu_relax();
@@ -362,7 +362,7 @@ static void ulite_console_wait_tx(struct uart_port *port)
static void ulite_console_putchar(struct uart_port *port, int ch)
{
ulite_console_wait_tx(port);
- writeb(ch, port->membase + ULITE_TX);
+ iowrite32be(ch, port->membase + ULITE_TX);
}
static void ulite_console_write(struct console *co, const char *s,
@@ -379,8 +379,8 @@ static void ulite_console_write(struct console *co, const char *s,
spin_lock_irqsave(&port->lock, flags);
/* save and disable interrupt */
- ier = readb(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
- writeb(0, port->membase + ULITE_CONTROL);
+ ier = ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
+ iowrite32be(0, port->membase + ULITE_CONTROL);
uart_console_write(port, s, count, ulite_console_putchar);
@@ -388,7 +388,7 @@ static void ulite_console_write(struct console *co, const char *s,
/* restore interrupt state */
if (ier)
- writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+ iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
@@ -601,7 +601,7 @@ ulite_of_probe(struct of_device *op, const struct of_device_id *match)
id = of_get_property(op->node, "port-number", NULL);
- return ulite_assign(&op->dev, id ? *id : -1, res.start+3, irq);
+ return ulite_assign(&op->dev, id ? *id : -1, res.start, irq);
}
static int __devexit ulite_of_remove(struct of_device *op)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a191fa2..f950b63 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -180,10 +180,10 @@ config SPI_OMAP_UWIRE
This hooks up to the MicroWire controller on OMAP1 chips.
config SPI_OMAP24XX
- tristate "McSPI driver for OMAP24xx/OMAP34xx"
- depends on ARCH_OMAP2 || ARCH_OMAP3
+ tristate "McSPI driver for OMAP"
+ depends on ARCH_OMAP2PLUS
help
- SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
+ SPI master controller for OMAP24XX and later Multichannel SPI
(McSPI) modules.
config SPI_OMAP_100K
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 9681536..59ae76b 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -233,6 +233,8 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return; /* We don't have a ChipCommon */
+ if (cc->dev->id.revision >= 11)
+ cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
ssb_pmu_init(cc);
chipco_powercontrol_init(cc);
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
@@ -370,6 +372,7 @@ u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
}
+EXPORT_SYMBOL(ssb_chipco_gpio_control);
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index bc9bdb2..51275aa 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -834,6 +834,9 @@ int ssb_bus_pcibus_register(struct ssb_bus *bus,
if (!err) {
ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on "
"PCI device %s\n", dev_name(&host_pci->dev));
+ } else {
+ ssb_printk(KERN_ERR PFX "Failed to register PCI version"
+ " of SSB with error %d\n", err);
}
return err;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index a8dbb06..989e275 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -168,7 +168,7 @@ err_pci:
}
/* Get the word-offset for a SSB_SPROM_XXX define. */
-#define SPOFF(offset) (((offset) - SSB_SPROM_BASE) / sizeof(u16))
+#define SPOFF(offset) ((offset) / sizeof(u16))
/* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */
#define SPEX16(_outvar, _offset, _mask, _shift) \
out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift))
@@ -254,7 +254,7 @@ static int sprom_do_read(struct ssb_bus *bus, u16 *sprom)
int i;
for (i = 0; i < bus->sprom_size; i++)
- sprom[i] = ioread16(bus->mmio + SSB_SPROM_BASE + (i * 2));
+ sprom[i] = ioread16(bus->mmio + bus->sprom_offset + (i * 2));
return 0;
}
@@ -285,7 +285,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
ssb_printk("75%%");
else if (i % 2)
ssb_printk(".");
- writew(sprom[i], bus->mmio + SSB_SPROM_BASE + (i * 2));
+ writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
mmiowb();
msleep(20);
}
@@ -621,6 +621,14 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
int err = -ENOMEM;
u16 *buf;
+ if (!ssb_is_sprom_available(bus)) {
+ ssb_printk(KERN_ERR PFX "No SPROM available!\n");
+ return -ENODEV;
+ }
+
+ bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
+ SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
+
buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
if (!buf)
goto out;
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index f2f920f..007bc3a 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -176,3 +176,17 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void)
{
return fallback_sprom;
}
+
+/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
+bool ssb_is_sprom_available(struct ssb_bus *bus)
+{
+ /* status register only exists on chipcomon rev >= 11 and we need check
+ for >= 31 only */
+ /* this routine differs from specs as we do not access SPROM directly
+ on PCMCIA */
+ if (bus->bustype == SSB_BUSTYPE_PCI &&
+ bus->chipco.dev->id.revision >= 31)
+ return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
+
+ return true;
+}
diff --git a/drivers/staging/arlan/arlan-main.c b/drivers/staging/arlan/arlan-main.c
index 88fdd53..8028452 100644
--- a/drivers/staging/arlan/arlan-main.c
+++ b/drivers/staging/arlan/arlan-main.c
@@ -1458,7 +1458,7 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
!netdev_mc_empty(dev))
{
char hw_dst_addr[6];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
memcpy_fromio(hw_dst_addr, arlan->ultimateDestAddress, 6);
@@ -1469,12 +1469,13 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
printk(KERN_ERR "%s mcast 0x0100 \n", dev->name);
else if (hw_dst_addr[1] == 0x40)
printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name);
- netdev_for_each_mc_entry(dmi, dev) {
+ netdev_for_each_mc_entry(ha, dev) {
if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
printk(KERN_ERR "%s mcl %pM\n",
- dev->name, dmi->dmi_addr);
+ dev->name,
+ ha->addr);
for (i = 0; i < 6; i++)
- if (dmi->dmi_addr[i] != hw_dst_addr[i])
+ if (ha->addr[i] != hw_dst_addr[i])
break;
if (i == 6)
break;
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 1786db2..8b274b7 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -48,6 +48,7 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308
*/
#include "../comedidev.h"
+#include <linux/semaphore.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index ab047f2..abc82c3 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -404,7 +404,7 @@ void et131x_multicast(struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
uint32_t PacketFilter = 0;
unsigned long flags;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
spin_lock_irqsave(&adapter->Lock, flags);
@@ -449,10 +449,10 @@ void et131x_multicast(struct net_device *netdev)
/* Set values in the private adapter struct */
i = 0;
- netdev_for_each_mc_addr(mclist, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == NIC_MAX_MCAST_LIST)
break;
- memcpy(adapter->MCList[i++], mclist->dmi_addr, ETH_ALEN);
+ memcpy(adapter->MCList[i++], ha->addr, ETH_ALEN);
}
adapter->MCAddressCount = i;
diff --git a/drivers/staging/rt2860/iface/rtmp_usb.h b/drivers/staging/rt2860/iface/rtmp_usb.h
index 6bb384a..33479cc 100644
--- a/drivers/staging/rt2860/iface/rtmp_usb.h
+++ b/drivers/staging/rt2860/iface/rtmp_usb.h
@@ -81,8 +81,8 @@ extern u8 EpToQueue[6];
#define RT28XX_PUT_DEVICE usb_put_dev
#define RTUSB_ALLOC_URB(iso) usb_alloc_urb(iso, GFP_ATOMIC)
#define RTUSB_SUBMIT_URB(pUrb) usb_submit_urb(pUrb, GFP_ATOMIC)
-#define RTUSB_URB_ALLOC_BUFFER(pUsb_Dev, BufSize, pDma_addr) usb_buffer_alloc(pUsb_Dev, BufSize, GFP_ATOMIC, pDma_addr)
-#define RTUSB_URB_FREE_BUFFER(pUsb_Dev, BufSize, pTransferBuf, Dma_addr) usb_buffer_free(pUsb_Dev, BufSize, pTransferBuf, Dma_addr)
+#define RTUSB_URB_ALLOC_BUFFER(pUsb_Dev, BufSize, pDma_addr) usb_alloc_coherent(pUsb_Dev, BufSize, GFP_ATOMIC, pDma_addr)
+#define RTUSB_URB_FREE_BUFFER(pUsb_Dev, BufSize, pTransferBuf, Dma_addr) usb_free_coherent(pUsb_Dev, BufSize, pTransferBuf, Dma_addr)
#define RTUSB_FREE_URB(pUrb) usb_free_urb(pUrb)
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 7daeced..bebf0fd 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -1367,12 +1367,12 @@ static void slic_mcast_set_list(struct net_device *dev)
struct adapter *adapter = netdev_priv(dev);
int status = STATUS_SUCCESS;
char *addresses;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
ASSERT(adapter);
- netdev_for_each_mc_addr(mc_list, dev) {
- addresses = (char *) &mc_list->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addresses = (char *) &ha->addr;
status = slic_mcast_add_list(adapter, addresses);
if (status != STATUS_SUCCESS)
break;
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index a78ade0..577f2bf 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -1063,7 +1063,8 @@ static ssize_t metrics_misc_show(struct device *fbdev,
atomic_read(&dev->lost_pixels) ? "yes" : "no");
}
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *a,
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *a,
char *buf, loff_t off, size_t count) {
struct device *fbdev = container_of(kobj, struct device, kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
@@ -1508,8 +1509,8 @@ static void dlfb_free_urb_list(struct dlfb_data *dev)
urb = unode->urb;
/* Free each separately allocated piece */
- usb_buffer_free(urb->dev, dev->urbs.size,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, dev->urbs.size,
+ urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
kfree(node);
}
@@ -1543,8 +1544,8 @@ static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size)
}
unode->urb = urb;
- buf = usb_buffer_alloc(dev->udev, MAX_TRANSFER, GFP_KERNEL,
- &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, MAX_TRANSFER, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index bc26740..da30658 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -21,7 +21,7 @@
#include "usbip_common.h"
#include "stub.h"
-#include "../../usb/core/hcd.h"
+#include <linux/usb/hcd.h>
static int is_clear_halt_cmd(struct urb *urb)
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index e3fa421..5240816 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -562,7 +562,7 @@ EXPORT_SYMBOL_GPL(sockfd_to_socket);
/* there may be more cases to tweak the flags. */
static unsigned int tweak_transfer_flags(unsigned int flags)
{
- flags &= ~(URB_NO_TRANSFER_DMA_MAP|URB_NO_SETUP_DMA_MAP);
+ flags &= ~URB_NO_TRANSFER_DMA_MAP;
return flags;
}
diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
index 5e37517..41a1fe5 100644
--- a/drivers/staging/usbip/vhci.h
+++ b/drivers/staging/usbip/vhci.h
@@ -18,7 +18,7 @@
*/
#include <linux/platform_device.h>
-#include "../../usb/core/hcd.h"
+#include <linux/usb/hcd.h>
struct vhci_device {
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index e40a2e9..18f4dfe 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3080,7 +3080,7 @@ static void device_set_multi(struct net_device *dev) {
PSMgmtObject pMgmt = pDevice->pMgmt;
u32 mc_filter[2];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
@@ -3100,8 +3100,8 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
MACvSelectPage1(pDevice->PortOffset);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index a8e1adb..49270db 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1596,7 +1596,7 @@ static void device_set_multi(struct net_device *dev) {
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
u32 mc_filter[2];
int ii;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE byTmpMode = 0;
int rc;
@@ -1632,8 +1632,8 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
for (ii = 0; ii < 4; ii++) {
diff --git a/drivers/staging/wavelan/wavelan.c b/drivers/staging/wavelan/wavelan.c
index 54ca631..f44ef35 100644
--- a/drivers/staging/wavelan/wavelan.c
+++ b/drivers/staging/wavelan/wavelan.c
@@ -3419,7 +3419,7 @@ static void wv_82586_config(struct net_device * dev)
ac_cfg_t cfg; /* Configure action */
ac_ias_t ias; /* IA-setup action */
ac_mcs_t mcs; /* Multicast setup */
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
#ifdef DEBUG_CONFIG_TRACE
printk(KERN_DEBUG "%s: ->wv_82586_config()\n", dev->name);
@@ -3531,16 +3531,16 @@ static void wv_82586_config(struct net_device * dev)
/* Any address to set? */
if (lp->mc_count) {
- netdev_for_each_mc_addr(dmi, dev)
- outsw(PIOP1(ioaddr), (u16 *) dmi->dmi_addr,
+ netdev_for_each_mc_addr(ha, dev)
+ outsw(PIOP1(ioaddr), (u16 *) ha->addr,
WAVELAN_ADDR_SIZE >> 1);
#ifdef DEBUG_CONFIG_INFO
printk(KERN_DEBUG
"%s: wv_82586_config(): set %d multicast addresses:\n",
dev->name, lp->mc_count);
- netdev_for_each_mc_addr(dmi, dev)
- printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ printk(KERN_DEBUG " %pM\n", ha->addr);
#endif
}
diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 37fa855..e3bb40b 100644
--- a/drivers/staging/wavelan/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
@@ -3591,20 +3591,20 @@ wv_82593_config(struct net_device * dev)
/* If roaming is enabled, join the "Beacon Request" multicast group... */
/* But only if it's not in there already! */
if(do_roaming)
- dev_mc_add(dev,WAVELAN_BEACON_ADDRESS, WAVELAN_ADDR_SIZE, 1);
+ dev_mc_add(dev, WAVELAN_BEACON_ADDRESS);
#endif /* WAVELAN_ROAMING */
/* If any multicast address to set */
if(lp->mc_count)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count;
#ifdef DEBUG_CONFIG_INFO
printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n",
dev->name, lp->mc_count);
- netdev_for_each_mc_addr(dmi, dev)
- printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ printk(KERN_DEBUG " %pM\n", ha->addr);
#endif
/* Initialize adapter's ethernet multicast addresses */
@@ -3612,8 +3612,8 @@ wv_82593_config(struct net_device * dev)
outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
outb(addrs_len & 0xff, PIOP(base)); /* byte count lsb */
outb((addrs_len >> 8), PIOP(base)); /* byte count msb */
- netdev_for_each_mc_addr(dmi, dev)
- outsb(PIOP(base), dmi->dmi_addr, dmi->dmi_addrlen);
+ netdev_for_each_mc_addr(ha, dev)
+ outsb(PIOP(base), ha->addr, dev->addr_len);
/* reset transmit DMA pointer */
hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 3482eec..5d9499b 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -92,10 +92,10 @@ static int wbsoft_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
- struct dev_addr_list *mc_list)
+static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
- return mc_count;
+ return netdev_hw_addr_list_count(mc_list);
}
static void wbsoft_configure_filter(struct ieee80211_hw *dev,
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 1db73eb..ca8c8b1 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -1050,7 +1050,7 @@ void wl_multicast( struct net_device *dev )
//;?seems reasonable that even an AP-only driver could afford this small additional footprint
int x;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
@@ -1073,9 +1073,9 @@ void wl_multicast( struct net_device *dev )
DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
- netdev_for_each_mc_addr(mclist, dev)
- DBG_PRINT( " %s (%d)\n", DbgHwAddr(mclist->dmi_addr),
- mclist->dmi_addrlen );
+ netdev_for_each_mc_addr(ha, dev)
+ DBG_PRINT(" %s (%d)\n", DbgHwAddr(ha->addr),
+ dev->addr_len);
}
#endif /* DBG */
@@ -1120,9 +1120,9 @@ void wl_multicast( struct net_device *dev )
lp->ltvRecord.typ = CFG_GROUP_ADDR;
x = 0;
- netdev_for_each_mc_addr(mclist, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
- mclist->dmi_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
DBG_PRINT( "Setting multicast list\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
} else {
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 25f01b5..e213d3f 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -94,19 +94,19 @@
} while (0)
#define uea_enters(usb_dev) \
- uea_vdbg(usb_dev, "entering %s\n", __func__)
+ uea_vdbg(usb_dev, "entering %s\n" , __func__)
#define uea_leaves(usb_dev) \
- uea_vdbg(usb_dev, "leaving %s\n", __func__)
+ uea_vdbg(usb_dev, "leaving %s\n" , __func__)
-#define uea_err(usb_dev, format,args...) \
- dev_err(&(usb_dev)->dev ,"[UEAGLE-ATM] " format , ##args)
+#define uea_err(usb_dev, format, args...) \
+ dev_err(&(usb_dev)->dev , "[UEAGLE-ATM] " format , ##args)
-#define uea_warn(usb_dev, format,args...) \
- dev_warn(&(usb_dev)->dev ,"[Ueagle-atm] " format, ##args)
+#define uea_warn(usb_dev, format, args...) \
+ dev_warn(&(usb_dev)->dev , "[Ueagle-atm] " format, ##args)
-#define uea_info(usb_dev, format,args...) \
- dev_info(&(usb_dev)->dev ,"[ueagle-atm] " format, ##args)
+#define uea_info(usb_dev, format, args...) \
+ dev_info(&(usb_dev)->dev , "[ueagle-atm] " format, ##args)
struct intr_pkt;
@@ -289,7 +289,7 @@ enum {
#define IS_ISDN(x) \
((x)->annex & ANNEXB)
-#define INS_TO_USBDEV(ins) ins->usb_dev
+#define INS_TO_USBDEV(ins) (ins->usb_dev)
#define GET_STATUS(data) \
((data >> 8) & 0xf)
@@ -304,7 +304,7 @@ enum {
* The FW_GET_BYTE() macro is provided only for consistency.
*/
-#define FW_GET_BYTE(p) *((__u8 *) (p))
+#define FW_GET_BYTE(p) (*((__u8 *) (p)))
#define FW_DIR "ueagle-atm/"
#define UEA_FW_NAME_MAX 30
@@ -315,7 +315,7 @@ enum {
#define ACK_TIMEOUT msecs_to_jiffies(3000)
-#define UEA_INTR_IFACE_NO 0
+#define UEA_INTR_IFACE_NO 0
#define UEA_US_IFACE_NO 1
#define UEA_DS_IFACE_NO 2
@@ -326,9 +326,9 @@ enum {
#define UEA_INTR_PIPE 0x04
#define UEA_ISO_DATA_PIPE 0x08
-#define UEA_E1_SET_BLOCK 0x0001
+#define UEA_E1_SET_BLOCK 0x0001
#define UEA_E4_SET_BLOCK 0x002c
-#define UEA_SET_MODE 0x0003
+#define UEA_SET_MODE 0x0003
#define UEA_SET_2183_DATA 0x0004
#define UEA_SET_TIMEOUT 0x0011
@@ -366,7 +366,7 @@ struct l1_code {
u8 string_header[E4_L1_STRING_HEADER];
u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER];
struct block_index page_header[E4_NO_SWAPPAGE_HEADERS];
- u8 code [0];
+ u8 code[0];
} __attribute__ ((packed));
/* structures describing a block within a DSP page */
@@ -428,7 +428,8 @@ struct block_info_e4 {
#define E4_MODEMREADY 0x1
#define E1_MAKEFUNCTION(t, s) (((t) & 0xf) << 4 | ((s) & 0xf))
-#define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | ((st) & 0xf) << 4 | ((s) & 0xf))
+#define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | \
+ ((st) & 0xf) << 4 | ((s) & 0xf))
#define E1_MAKESA(a, b, c, d) \
(((c) & 0xff) << 24 | \
@@ -473,7 +474,7 @@ struct cmv_e4 {
__be16 wFunction;
__be16 wOffset;
__be16 wAddress;
- __be32 dwData [6];
+ __be32 dwData[6];
} __attribute__ ((packed));
/* structures representing swap information */
@@ -534,11 +535,13 @@ struct intr_pkt {
static struct usb_driver uea_driver;
static DEFINE_MUTEX(uea_mutex);
-static const char *chip_name[] = {"ADI930", "Eagle I", "Eagle II", "Eagle III", "Eagle IV"};
+static const char *chip_name[] = {"ADI930", "Eagle I", "Eagle II", "Eagle III",
+ "Eagle IV"};
static int modem_index;
static unsigned int debug;
-static unsigned int altsetting[NB_MODEM] = {[0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF};
+static unsigned int altsetting[NB_MODEM] = {
+ [0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF};
static int sync_wait[NB_MODEM];
static char *cmv_file[NB_MODEM];
static int annex[NB_MODEM];
@@ -555,7 +558,7 @@ MODULE_PARM_DESC(cmv_file,
"file name with configuration and management variables");
module_param_array(annex, uint, NULL, 0644);
MODULE_PARM_DESC(annex,
- "manually set annex a/b (0=auto, 1=annex a, 2=annex b)");
+ "manually set annex a/b (0=auto, 1=annex a, 2=annex b)");
#define uea_wait(sc, cond, timeo) \
({ \
@@ -602,7 +605,8 @@ static int uea_send_modem_cmd(struct usb_device *usb,
return (ret == size) ? 0 : -EIO;
}
-static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *context)
+static void uea_upload_pre_firmware(const struct firmware *fw_entry,
+ void *context)
{
struct usb_device *usb = context;
const u8 *pfw;
@@ -707,7 +711,8 @@ static int uea_load_firmware(struct usb_device *usb, unsigned int ver)
}
ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev,
- GFP_KERNEL, usb, uea_upload_pre_firmware);
+ GFP_KERNEL, usb,
+ uea_upload_pre_firmware);
if (ret)
uea_err(usb, "firmware %s is not available\n", fw_name);
else
@@ -876,7 +881,7 @@ static int request_dsp(struct uea_softc *sc)
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
- dsp_name, ret);
+ dsp_name, ret);
return ret;
}
@@ -994,14 +999,17 @@ static void __uea_load_page_e4(struct uea_softc *sc, u8 pageno, int boot)
blockidx = &p->page_header[blockno];
blocksize = E4_PAGE_BYTES(blockidx->PageSize);
- blockoffset = sc->dsp_firm->data + le32_to_cpu(blockidx->PageOffset);
+ blockoffset = sc->dsp_firm->data + le32_to_cpu(
+ blockidx->PageOffset);
bi.dwSize = cpu_to_be32(blocksize);
bi.dwAddress = cpu_to_be32(le32_to_cpu(blockidx->PageAddress));
uea_dbg(INS_TO_USBDEV(sc),
- "sending block %u for DSP page %u size %u address %x\n",
- blockno, pageno, blocksize, le32_to_cpu(blockidx->PageAddress));
+ "sending block %u for DSP page "
+ "%u size %u address %x\n",
+ blockno, pageno, blocksize,
+ le32_to_cpu(blockidx->PageAddress));
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
@@ -1042,7 +1050,8 @@ static void uea_load_page_e4(struct work_struct *work)
p = (struct l1_code *) sc->dsp_firm->data;
if (pageno >= le16_to_cpu(p->page_header[0].PageNumber)) {
- uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno);
+ uea_err(INS_TO_USBDEV(sc), "invalid DSP "
+ "page %u requested\n", pageno);
return;
}
@@ -1059,7 +1068,7 @@ static void uea_load_page_e4(struct work_struct *work)
__uea_load_page_e4(sc, i, 1);
}
- uea_dbg(INS_TO_USBDEV(sc),"sending start bi\n");
+ uea_dbg(INS_TO_USBDEV(sc) , "sending start bi\n");
bi.wHdr = cpu_to_be16(UEA_BIHDR);
bi.bBootPage = 0;
@@ -1139,8 +1148,10 @@ static int uea_cmv_e1(struct uea_softc *sc,
uea_enters(INS_TO_USBDEV(sc));
uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Address : %c%c%c%c, "
"offset : 0x%04x, data : 0x%08x\n",
- E1_FUNCTION_TYPE(function), E1_FUNCTION_SUBTYPE(function),
- E1_GETSA1(address), E1_GETSA2(address), E1_GETSA3(address),
+ E1_FUNCTION_TYPE(function),
+ E1_FUNCTION_SUBTYPE(function),
+ E1_GETSA1(address), E1_GETSA2(address),
+ E1_GETSA3(address),
E1_GETSA4(address), offset, data);
/* we send a request, but we expect a reply */
@@ -1157,7 +1168,8 @@ static int uea_cmv_e1(struct uea_softc *sc,
cmv.wOffsetAddress = cpu_to_le16(offset);
put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData);
- ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv);
+ ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START,
+ sizeof(cmv), &cmv);
if (ret < 0)
return ret;
ret = wait_cmv_ack(sc);
@@ -1191,7 +1203,8 @@ static int uea_cmv_e4(struct uea_softc *sc,
cmv.wOffset = cpu_to_be16(offset);
cmv.dwData[0] = cpu_to_be32(data);
- ret = uea_request(sc, UEA_E4_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv);
+ ret = uea_request(sc, UEA_E4_SET_BLOCK, UEA_MPTX_START,
+ sizeof(cmv), &cmv);
if (ret < 0)
return ret;
ret = wait_cmv_ack(sc);
@@ -1208,7 +1221,7 @@ static inline int uea_read_cmv_e1(struct uea_softc *sc,
uea_err(INS_TO_USBDEV(sc),
"reading cmv failed with error %d\n", ret);
else
- *data = sc->data;
+ *data = sc->data;
return ret;
}
@@ -1216,13 +1229,14 @@ static inline int uea_read_cmv_e1(struct uea_softc *sc,
static inline int uea_read_cmv_e4(struct uea_softc *sc,
u8 size, u16 group, u16 address, u16 offset, u32 *data)
{
- int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS, E4_REQUESTREAD, size),
+ int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS,
+ E4_REQUESTREAD, size),
group, address, offset, 0);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"reading cmv failed with error %d\n", ret);
else {
- *data = sc->data;
+ *data = sc->data;
/* size is in 16-bit word quantities */
if (size > 2)
*(data + 1) = sc->data1;
@@ -1245,7 +1259,8 @@ static inline int uea_write_cmv_e1(struct uea_softc *sc,
static inline int uea_write_cmv_e4(struct uea_softc *sc,
u8 size, u16 group, u16 address, u16 offset, u32 data)
{
- int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS, E4_REQUESTWRITE, size),
+ int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS,
+ E4_REQUESTWRITE, size),
group, address, offset, data);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
@@ -1442,27 +1457,29 @@ static int uea_stat_e4(struct uea_softc *sc)
return ret;
switch (sc->stats.phy.state) {
- case 0x0: /* not yet synchronized */
- case 0x1:
- case 0x3:
- case 0x4:
- uea_dbg(INS_TO_USBDEV(sc), "modem not yet synchronized\n");
- return 0;
- case 0x5: /* initialization */
- case 0x6:
- case 0x9:
- case 0xa:
- uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
- return 0;
- case 0x2: /* fail ... */
- uea_info(INS_TO_USBDEV(sc), "modem synchronization failed"
- " (may be try other cmv/dsp)\n");
- return -EAGAIN;
- case 0x7: /* operational */
- break;
- default:
- uea_warn(INS_TO_USBDEV(sc), "unknown state: %x\n", sc->stats.phy.state);
- return 0;
+ case 0x0: /* not yet synchronized */
+ case 0x1:
+ case 0x3:
+ case 0x4:
+ uea_dbg(INS_TO_USBDEV(sc), "modem not yet "
+ "synchronized\n");
+ return 0;
+ case 0x5: /* initialization */
+ case 0x6:
+ case 0x9:
+ case 0xa:
+ uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
+ return 0;
+ case 0x2: /* fail ... */
+ uea_info(INS_TO_USBDEV(sc), "modem synchronization "
+ "failed (may be try other cmv/dsp)\n");
+ return -EAGAIN;
+ case 0x7: /* operational */
+ break;
+ default:
+ uea_warn(INS_TO_USBDEV(sc), "unknown state: %x\n",
+ sc->stats.phy.state);
+ return 0;
}
if (data != 7) {
@@ -1502,9 +1519,9 @@ static int uea_stat_e4(struct uea_softc *sc)
if (sc->stats.phy.flags) {
uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n",
sc->stats.phy.flags);
- if (sc->stats.phy.flags & 1) //delineation LOSS
+ if (sc->stats.phy.flags & 1) /* delineation LOSS */
return -EAGAIN;
- if (sc->stats.phy.flags & 0x4000) //Reset Flag
+ if (sc->stats.phy.flags & 0x4000) /* Reset Flag */
return -EAGAIN;
return 0;
}
@@ -1618,7 +1635,8 @@ static int request_cmvs(struct uea_softc *sc,
if (ret < 0) {
/* if caller can handle old version, try to provide it */
if (*ver == 1) {
- uea_warn(INS_TO_USBDEV(sc), "requesting firmware %s failed, "
+ uea_warn(INS_TO_USBDEV(sc), "requesting "
+ "firmware %s failed, "
"try to get older cmvs\n", cmv_name);
return request_cmvs_old(sc, cmvs, fw);
}
@@ -1632,8 +1650,8 @@ static int request_cmvs(struct uea_softc *sc,
data = (u8 *) (*fw)->data;
if (size < 4 || strncmp(data, "cmv2", 4) != 0) {
if (*ver == 1) {
- uea_warn(INS_TO_USBDEV(sc), "firmware %s is corrupted, "
- "try to get older cmvs\n", cmv_name);
+ uea_warn(INS_TO_USBDEV(sc), "firmware %s is corrupted,"
+ " try to get older cmvs\n", cmv_name);
release_firmware(*fw);
return request_cmvs_old(sc, cmvs, fw);
}
@@ -1670,7 +1688,7 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
int i, ret, len;
void *cmvs_ptr;
const struct firmware *cmvs_fw;
- int ver = 1; // we can handle v1 cmv firmware version;
+ int ver = 1; /* we can handle v1 cmv firmware version; */
/* Enter in R-IDLE (cmv) until instructed otherwise */
ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 1);
@@ -1685,7 +1703,7 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
sc->stats.phy.firmid);
/* get options */
- ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
+ ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
if (ret < 0)
return ret;
@@ -1697,9 +1715,10 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
"please update your firmware\n");
for (i = 0; i < len; i++) {
- ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v1[i].address),
- get_unaligned_le16(&cmvs_v1[i].offset),
- get_unaligned_le32(&cmvs_v1[i].data));
+ ret = uea_write_cmv_e1(sc,
+ get_unaligned_le32(&cmvs_v1[i].address),
+ get_unaligned_le16(&cmvs_v1[i].offset),
+ get_unaligned_le32(&cmvs_v1[i].data));
if (ret < 0)
goto out;
}
@@ -1707,9 +1726,10 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
for (i = 0; i < len; i++) {
- ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v2[i].address),
- (u16) get_unaligned_le32(&cmvs_v2[i].offset),
- get_unaligned_le32(&cmvs_v2[i].data));
+ ret = uea_write_cmv_e1(sc,
+ get_unaligned_le32(&cmvs_v2[i].address),
+ (u16) get_unaligned_le32(&cmvs_v2[i].offset),
+ get_unaligned_le32(&cmvs_v2[i].data));
if (ret < 0)
goto out;
}
@@ -1722,7 +1742,8 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
/* Enter in R-ACT-REQ */
ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 2);
uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
- uea_info(INS_TO_USBDEV(sc), "modem started, waiting synchronization...\n");
+ uea_info(INS_TO_USBDEV(sc), "modem started, waiting "
+ "synchronization...\n");
out:
release_firmware(cmvs_fw);
return ret;
@@ -1733,7 +1754,7 @@ static int uea_send_cmvs_e4(struct uea_softc *sc)
int i, ret, len;
void *cmvs_ptr;
const struct firmware *cmvs_fw;
- int ver = 2; // we can only handle v2 cmv firmware version;
+ int ver = 2; /* we can only handle v2 cmv firmware version; */
/* Enter in R-IDLE (cmv) until instructed otherwise */
ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 1);
@@ -1750,7 +1771,7 @@ static int uea_send_cmvs_e4(struct uea_softc *sc)
/* get options */
- ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
+ ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
if (ret < 0)
return ret;
@@ -1760,10 +1781,10 @@ static int uea_send_cmvs_e4(struct uea_softc *sc)
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e4(sc, 1,
- get_unaligned_le32(&cmvs_v2[i].group),
- get_unaligned_le32(&cmvs_v2[i].address),
- get_unaligned_le32(&cmvs_v2[i].offset),
- get_unaligned_le32(&cmvs_v2[i].data));
+ get_unaligned_le32(&cmvs_v2[i].group),
+ get_unaligned_le32(&cmvs_v2[i].address),
+ get_unaligned_le32(&cmvs_v2[i].offset),
+ get_unaligned_le32(&cmvs_v2[i].data));
if (ret < 0)
goto out;
}
@@ -1776,7 +1797,8 @@ static int uea_send_cmvs_e4(struct uea_softc *sc)
/* Enter in R-ACT-REQ */
ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 2);
uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
- uea_info(INS_TO_USBDEV(sc), "modem started, waiting synchronization...\n");
+ uea_info(INS_TO_USBDEV(sc), "modem started, waiting "
+ "synchronization...\n");
out:
release_firmware(cmvs_fw);
return ret;
@@ -1812,7 +1834,7 @@ static int uea_start_reset(struct uea_softc *sc)
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL);
uea_request(sc, UEA_SET_MODE, UEA_BOOT_IDMA, 0, NULL);
- /* enter reset mode */
+ /* enter reset mode */
uea_request(sc, UEA_SET_MODE, UEA_START_RESET, 0, NULL);
/* original driver use 200ms, but windows driver use 100ms */
@@ -1824,7 +1846,7 @@ static int uea_start_reset(struct uea_softc *sc)
uea_request(sc, UEA_SET_MODE, UEA_END_RESET, 0, NULL);
if (UEA_CHIP_VERSION(sc) != EAGLE_IV) {
- /* clear tx and rx mailboxes */
+ /* clear tx and rx mailboxes */
uea_request(sc, UEA_SET_2183_DATA, UEA_MPTX_MAILBOX, 2, &zero);
uea_request(sc, UEA_SET_2183_DATA, UEA_MPRX_MAILBOX, 2, &zero);
uea_request(sc, UEA_SET_2183_DATA, UEA_SWAP_MAILBOX, 2, &zero);
@@ -1835,9 +1857,11 @@ static int uea_start_reset(struct uea_softc *sc)
return ret;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
- sc->cmv_dsc.e4.function = E4_MAKEFUNCTION(E4_ADSLDIRECTIVE, E4_MODEMREADY, 1);
+ sc->cmv_dsc.e4.function = E4_MAKEFUNCTION(E4_ADSLDIRECTIVE,
+ E4_MODEMREADY, 1);
else
- sc->cmv_dsc.e1.function = E1_MAKEFUNCTION(E1_ADSLDIRECTIVE, E1_MODEMREADY);
+ sc->cmv_dsc.e1.function = E1_MAKEFUNCTION(E1_ADSLDIRECTIVE,
+ E1_MODEMREADY);
/* demask interrupt */
sc->booting = 0;
@@ -1937,7 +1961,8 @@ static int load_XILINX_firmware(struct uea_softc *sc)
value = 0;
ret = uea_send_modem_cmd(sc->usb_dev, 0xe, 1, &value);
if (ret < 0)
- uea_err(sc->usb_dev, "elsa de-assert failed with error %d\n", ret);
+ uea_err(sc->usb_dev, "elsa de-assert failed with error"
+ " %d\n", ret);
err1:
release_firmware(fw_entry);
@@ -1966,13 +1991,15 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr)
if (UEA_CHIP_VERSION(sc) == ADI930
&& cmv->bFunction == E1_MAKEFUNCTION(2, 2)) {
cmv->wIndex = cpu_to_le16(dsc->idx);
- put_unaligned_le32(dsc->address, &cmv->dwSymbolicAddress);
+ put_unaligned_le32(dsc->address,
+ &cmv->dwSymbolicAddress);
cmv->wOffsetAddress = cpu_to_le16(dsc->offset);
} else
goto bad2;
}
- if (cmv->bFunction == E1_MAKEFUNCTION(E1_ADSLDIRECTIVE, E1_MODEMREADY)) {
+ if (cmv->bFunction == E1_MAKEFUNCTION(E1_ADSLDIRECTIVE,
+ E1_MODEMREADY)) {
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
@@ -2021,7 +2048,8 @@ static void uea_dispatch_cmv_e4(struct uea_softc *sc, struct intr_pkt *intr)
if (be16_to_cpu(cmv->wFunction) != dsc->function)
goto bad2;
- if (be16_to_cpu(cmv->wFunction) == E4_MAKEFUNCTION(E4_ADSLDIRECTIVE, E4_MODEMREADY, 1)) {
+ if (be16_to_cpu(cmv->wFunction) == E4_MAKEFUNCTION(E4_ADSLDIRECTIVE,
+ E4_MODEMREADY, 1)) {
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
@@ -2048,14 +2076,16 @@ bad2:
return;
}
-static void uea_schedule_load_page_e1(struct uea_softc *sc, struct intr_pkt *intr)
+static void uea_schedule_load_page_e1(struct uea_softc *sc,
+ struct intr_pkt *intr)
{
sc->pageno = intr->e1_bSwapPageNo;
sc->ovl = intr->e1_bOvl >> 4 | intr->e1_bOvl << 4;
queue_work(sc->work_q, &sc->task);
}
-static void uea_schedule_load_page_e4(struct uea_softc *sc, struct intr_pkt *intr)
+static void uea_schedule_load_page_e4(struct uea_softc *sc,
+ struct intr_pkt *intr)
{
sc->pageno = intr->e4_bSwapPageNo;
queue_work(sc->work_q, &sc->task);
@@ -2263,8 +2293,8 @@ out:
static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot);
-static ssize_t read_human_status(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t read_human_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret = -ENODEV;
int modem_state;
@@ -2289,7 +2319,7 @@ static ssize_t read_human_status(struct device *dev, struct device_attribute *at
case 0xa:
modem_state = 1;
break;
- case 0x7: /* operational */
+ case 0x7: /* operational */
modem_state = 2;
break;
case 0x2: /* fail ... */
@@ -2324,7 +2354,8 @@ out:
return ret;
}
-static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, read_human_status, NULL);
+static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO,
+ read_human_status, NULL);
static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2358,25 +2389,25 @@ out:
static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL);
-#define UEA_ATTR(name, reset) \
+#define UEA_ATTR(name, reset) \
\
-static ssize_t read_##name(struct device *dev, \
+static ssize_t read_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
-{ \
- int ret = -ENODEV; \
- struct uea_softc *sc; \
- \
- mutex_lock(&uea_mutex); \
+{ \
+ int ret = -ENODEV; \
+ struct uea_softc *sc; \
+ \
+ mutex_lock(&uea_mutex); \
sc = dev_to_uea(dev); \
- if (!sc) \
- goto out; \
+ if (!sc) \
+ goto out; \
ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.name); \
if (reset) \
sc->stats.phy.name = 0; \
-out: \
- mutex_unlock(&uea_mutex); \
- return ret; \
-} \
+out: \
+ mutex_unlock(&uea_mutex); \
+ return ret; \
+} \
\
static DEVICE_ATTR(stat_##name, S_IRUGO, read_##name, NULL)
@@ -2527,12 +2558,14 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
else if (sc->driver_info & AUTO_ANNEX_B)
sc->annex = ANNEXB;
else
- sc->annex = (le16_to_cpu(sc->usb_dev->descriptor.bcdDevice) & 0x80)?ANNEXB:ANNEXA;
+ sc->annex = (le16_to_cpu
+ (sc->usb_dev->descriptor.bcdDevice) & 0x80) ? ANNEXB : ANNEXA;
alt = altsetting[sc->modem_index];
/* ADI930 don't support iso */
if (UEA_CHIP_VERSION(id) != ADI930 && alt > 0) {
- if (alt <= 8 && usb_set_interface(usb, UEA_DS_IFACE_NO, alt) == 0) {
+ if (alt <= 8 &&
+ usb_set_interface(usb, UEA_DS_IFACE_NO, alt) == 0) {
uea_dbg(usb, "set alternate %u for 2 interface\n", alt);
uea_info(usb, "using iso mode\n");
usbatm->flags |= UDSL_USE_ISOC | UDSL_IGNORE_EILSEQ;
@@ -2621,40 +2654,74 @@ static void uea_disconnect(struct usb_interface *intf)
* List of supported VID/PID
*/
static const struct usb_device_id uea_ids[] = {
- {USB_DEVICE(ANALOG_VID, ADI930_PID_PREFIRM), .driver_info = ADI930 | PREFIRM},
- {USB_DEVICE(ANALOG_VID, ADI930_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PREFIRM), .driver_info = EAGLE_III | PREFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PSTFIRM), .driver_info = EAGLE_III | PSTFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PREFIRM), .driver_info = EAGLE_IV | PREFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PSTFIRM), .driver_info = EAGLE_IV | PSTFIRM},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_A},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_B},
- {USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM), .driver_info = ADI930 | PREFIRM},
- {USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM},
- {USB_DEVICE(ELSA_VID, ELSA_PID_A_PREFIRM), .driver_info = ADI930 | PREFIRM},
- {USB_DEVICE(ELSA_VID, ELSA_PID_A_PSTFIRM), .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_A},
- {USB_DEVICE(ELSA_VID, ELSA_PID_B_PREFIRM), .driver_info = ADI930 | PREFIRM},
- {USB_DEVICE(ELSA_VID, ELSA_PID_B_PSTFIRM), .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_B},
- {USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
- {USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
- {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM),.driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM),.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
- {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM),.driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM),.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
+ {USB_DEVICE(ANALOG_VID, ADI930_PID_PREFIRM),
+ .driver_info = ADI930 | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, ADI930_PID_PSTFIRM),
+ .driver_info = ADI930 | PSTFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PREFIRM),
+ .driver_info = EAGLE_II | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PSTFIRM),
+ .driver_info = EAGLE_II | PSTFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PREFIRM),
+ .driver_info = EAGLE_II | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PSTFIRM),
+ .driver_info = EAGLE_II | PSTFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PREFIRM),
+ .driver_info = EAGLE_III | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PSTFIRM),
+ .driver_info = EAGLE_III | PSTFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PREFIRM),
+ .driver_info = EAGLE_IV | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PSTFIRM),
+ .driver_info = EAGLE_IV | PSTFIRM},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PREFIRM),
+ .driver_info = EAGLE_II | PREFIRM},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PSTFIRM),
+ .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_A},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PREFIRM),
+ .driver_info = EAGLE_II | PREFIRM},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PSTFIRM),
+ .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_B},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM),
+ .driver_info = ADI930 | PREFIRM},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM),
+ .driver_info = ADI930 | PSTFIRM},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_A_PREFIRM),
+ .driver_info = ADI930 | PREFIRM},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_A_PSTFIRM),
+ .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_A},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_B_PREFIRM),
+ .driver_info = ADI930 | PREFIRM},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_B_PSTFIRM),
+ .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_B},
+ {USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
+ {USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
+ {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
+ {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{}
};
diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h
index e8c6d94..74e4462 100644
--- a/drivers/usb/c67x00/c67x00-hcd.h
+++ b/drivers/usb/c67x00/c67x00-hcd.h
@@ -28,7 +28,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "c67x00.h"
/*
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5e1a253..0c2f14f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -892,7 +892,7 @@ static void acm_write_buffers_free(struct acm *acm)
struct usb_device *usb_dev = interface_to_usbdev(acm->control);
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++)
- usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah);
+ usb_free_coherent(usb_dev, acm->writesize, wb->buf, wb->dmah);
}
static void acm_read_buffers_free(struct acm *acm)
@@ -901,8 +901,8 @@ static void acm_read_buffers_free(struct acm *acm)
int i, n = acm->rx_buflimit;
for (i = 0; i < n; i++)
- usb_buffer_free(usb_dev, acm->readsize,
- acm->rb[i].base, acm->rb[i].dma);
+ usb_free_coherent(usb_dev, acm->readsize,
+ acm->rb[i].base, acm->rb[i].dma);
}
/* Little helper: write buffers allocate */
@@ -912,13 +912,13 @@ static int acm_write_buffers_alloc(struct acm *acm)
struct acm_wb *wb;
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) {
- wb->buf = usb_buffer_alloc(acm->dev, acm->writesize, GFP_KERNEL,
+ wb->buf = usb_alloc_coherent(acm->dev, acm->writesize, GFP_KERNEL,
&wb->dmah);
if (!wb->buf) {
while (i != 0) {
--i;
--wb;
- usb_buffer_free(acm->dev, acm->writesize,
+ usb_free_coherent(acm->dev, acm->writesize,
wb->buf, wb->dmah);
}
return -ENOMEM;
@@ -1177,7 +1177,7 @@ made_compressed_probe:
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
- buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
+ buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
if (!buf) {
dev_dbg(&intf->dev, "out of memory (ctrl buffer alloc)\n");
goto alloc_fail2;
@@ -1210,11 +1210,11 @@ made_compressed_probe:
for (i = 0; i < num_rx_buf; i++) {
struct acm_rb *rb = &(acm->rb[i]);
- rb->base = usb_buffer_alloc(acm->dev, readsize,
+ rb->base = usb_alloc_coherent(acm->dev, readsize,
GFP_KERNEL, &rb->dma);
if (!rb->base) {
dev_dbg(&intf->dev,
- "out of memory (read bufs usb_buffer_alloc)\n");
+ "out of memory (read bufs usb_alloc_coherent)\n");
goto alloc_fail7;
}
}
@@ -1306,7 +1306,7 @@ alloc_fail7:
alloc_fail5:
acm_write_buffers_free(acm);
alloc_fail4:
- usb_buffer_free(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
+ usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
alloc_fail2:
kfree(acm);
alloc_fail:
@@ -1356,8 +1356,8 @@ static void acm_disconnect(struct usb_interface *intf)
stop_data_traffic(acm);
acm_write_buffers_free(acm);
- usb_buffer_free(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
- acm->ctrl_dma);
+ usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
+ acm->ctrl_dma);
acm_read_buffers_free(acm);
if (!acm->combined_interfaces)
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 4a8e87e..5eeb570 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -124,8 +124,8 @@ struct acm {
unsigned char clocal; /* termios CLOCAL */
unsigned int ctrl_caps; /* control capabilities from the class specific header */
unsigned int susp_count; /* number of suspended interfaces */
- int combined_interfaces:1; /* control and data collapsed */
- int is_int_ep:1; /* interrupt endpoints contrary to spec used */
+ unsigned int combined_interfaces:1; /* control and data collapsed */
+ unsigned int is_int_ep:1; /* interrupt endpoints contrary to spec used */
u8 bInterval;
struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
};
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 189141c..094c76b 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -276,14 +276,14 @@ static void free_urbs(struct wdm_device *desc)
static void cleanup(struct wdm_device *desc)
{
- usb_buffer_free(interface_to_usbdev(desc->intf),
- desc->wMaxPacketSize,
- desc->sbuf,
- desc->validity->transfer_dma);
- usb_buffer_free(interface_to_usbdev(desc->intf),
- desc->wMaxCommand,
- desc->inbuf,
- desc->response->transfer_dma);
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+ desc->wMaxPacketSize,
+ desc->sbuf,
+ desc->validity->transfer_dma);
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+ desc->wMaxCommand,
+ desc->inbuf,
+ desc->response->transfer_dma);
kfree(desc->orq);
kfree(desc->irq);
kfree(desc->ubuf);
@@ -705,17 +705,17 @@ next_desc:
if (!desc->ubuf)
goto err;
- desc->sbuf = usb_buffer_alloc(interface_to_usbdev(intf),
+ desc->sbuf = usb_alloc_coherent(interface_to_usbdev(intf),
desc->wMaxPacketSize,
GFP_KERNEL,
&desc->validity->transfer_dma);
if (!desc->sbuf)
goto err;
- desc->inbuf = usb_buffer_alloc(interface_to_usbdev(intf),
- desc->bMaxPacketSize0,
- GFP_KERNEL,
- &desc->response->transfer_dma);
+ desc->inbuf = usb_alloc_coherent(interface_to_usbdev(intf),
+ desc->bMaxPacketSize0,
+ GFP_KERNEL,
+ &desc->response->transfer_dma);
if (!desc->inbuf)
goto err2;
@@ -742,15 +742,15 @@ out:
return rv;
err3:
usb_set_intfdata(intf, NULL);
- usb_buffer_free(interface_to_usbdev(desc->intf),
- desc->bMaxPacketSize0,
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+ desc->bMaxPacketSize0,
desc->inbuf,
desc->response->transfer_dma);
err2:
- usb_buffer_free(interface_to_usbdev(desc->intf),
- desc->wMaxPacketSize,
- desc->sbuf,
- desc->validity->transfer_dma);
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+ desc->wMaxPacketSize,
+ desc->sbuf,
+ desc->validity->transfer_dma);
err:
free_urbs(desc);
kfree(desc->ubuf);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 93b5f85..2250095 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -27,7 +27,7 @@
* v0.11 - add proto_bias option (Pete Zaitcev)
* v0.12 - add hpoj.sourceforge.net ioctls (David Paschal)
* v0.13 - alloc space for statusbuf (<status> not on stack);
- * use usb_buffer_alloc() for read buf & write buf;
+ * use usb_alloc_coherent() for read buf & write buf;
* none - Maintained in Linux kernel after v0.13
*/
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 3ba2fff..2c69654 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -14,7 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/usb.h>
-#include "hcd.h"
+#include <linux/usb/hcd.h>
/*
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 0d3af6a..83126b0 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1,12 +1,14 @@
#include <linux/usb.h>
#include <linux/usb/ch9.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/quirks.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <asm/byteorder.h>
#include "usb.h"
-#include "hcd.h"
+
#define USB_MAXALTSETTING 128 /* Hard limit */
#define USB_MAXENDPOINTS 30 /* Hard limit */
@@ -19,32 +21,6 @@ static inline const char *plural(int n)
return (n == 1 ? "" : "s");
}
-/* FIXME: this is a kludge */
-static int find_next_descriptor_more(unsigned char *buffer, int size,
- int dt1, int dt2, int dt3, int *num_skipped)
-{
- struct usb_descriptor_header *h;
- int n = 0;
- unsigned char *buffer0 = buffer;
-
- /* Find the next descriptor of type dt1 or dt2 or dt3 */
- while (size > 0) {
- h = (struct usb_descriptor_header *) buffer;
- if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2 ||
- h->bDescriptorType == dt3)
- break;
- buffer += h->bLength;
- size -= h->bLength;
- ++n;
- }
-
- /* Store the number of descriptors skipped and return the
- * number of bytes skipped */
- if (num_skipped)
- *num_skipped = n;
- return buffer - buffer0;
-}
-
static int find_next_descriptor(unsigned char *buffer, int size,
int dt1, int dt2, int *num_skipped)
{
@@ -69,47 +45,41 @@ static int find_next_descriptor(unsigned char *buffer, int size,
return buffer - buffer0;
}
-static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
int inum, int asnum, struct usb_host_endpoint *ep,
- int num_ep, unsigned char *buffer, int size)
+ unsigned char *buffer, int size)
{
- unsigned char *buffer_start = buffer;
- struct usb_ss_ep_comp_descriptor *desc;
- int retval;
- int num_skipped;
+ struct usb_ss_ep_comp_descriptor *desc;
int max_tx;
- int i;
+ /* The SuperSpeed endpoint companion descriptor is supposed to
+ * be the first thing immediately following the endpoint descriptor.
+ */
desc = (struct usb_ss_ep_comp_descriptor *) buffer;
- if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
+ if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
+ size < USB_DT_SS_EP_COMP_SIZE) {
dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
" interface %d altsetting %d ep %d: "
"using minimum values\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- /*
- * The next descriptor is for an Endpoint or Interface,
- * no extra descriptors to copy into the companion structure,
- * and we didn't eat up any of the buffer.
+
+ /* Fill in some default values.
+ * Leave bmAttributes as zero, which will mean no streams for
+ * bulk, and isoc won't support multiple bursts of packets.
+ * With bursts of only one packet, and a Mult of 1, the max
+ * amount of data moved per endpoint service interval is one
+ * packet.
*/
- return 0;
+ ep->ss_ep_comp.bLength = USB_DT_SS_EP_COMP_SIZE;
+ ep->ss_ep_comp.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
+ if (usb_endpoint_xfer_isoc(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc))
+ ep->ss_ep_comp.wBytesPerInterval =
+ ep->desc.wMaxPacketSize;
+ return;
}
- memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
- desc = &ep->ss_ep_comp->desc;
- buffer += desc->bLength;
- size -= desc->bLength;
- /* Eat up the other descriptors we don't care about */
- ep->ss_ep_comp->extra = buffer;
- i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
- USB_DT_INTERFACE, &num_skipped);
- ep->ss_ep_comp->extralen = i;
- buffer += i;
- size -= i;
- retval = buffer - buffer_start;
- if (num_skipped > 0)
- dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
- num_skipped, plural(num_skipped),
- "SuperSpeed endpoint companion");
+ memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE);
/* Check the various values */
if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
@@ -117,47 +87,48 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
"config %d interface %d altsetting %d ep %d: "
"setting to zero\n", desc->bMaxBurst,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- desc->bMaxBurst = 0;
- }
- if (desc->bMaxBurst > 15) {
+ ep->ss_ep_comp.bMaxBurst = 0;
+ } else if (desc->bMaxBurst > 15) {
dev_warn(ddev, "Endpoint with bMaxBurst = %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 15\n", desc->bMaxBurst,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- desc->bMaxBurst = 15;
+ ep->ss_ep_comp.bMaxBurst = 15;
}
- if ((usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_int(&ep->desc))
- && desc->bmAttributes != 0) {
+
+ if ((usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc)) &&
+ desc->bmAttributes != 0) {
dev_warn(ddev, "%s endpoint with bmAttributes = %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to zero\n",
usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
desc->bmAttributes,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- desc->bmAttributes = 0;
- }
- if (usb_endpoint_xfer_bulk(&ep->desc) && desc->bmAttributes > 16) {
+ ep->ss_ep_comp.bmAttributes = 0;
+ } else if (usb_endpoint_xfer_bulk(&ep->desc) &&
+ desc->bmAttributes > 16) {
dev_warn(ddev, "Bulk endpoint with more than 65536 streams in "
"config %d interface %d altsetting %d ep %d: "
"setting to max\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- desc->bmAttributes = 16;
- }
- if (usb_endpoint_xfer_isoc(&ep->desc) && desc->bmAttributes > 2) {
+ ep->ss_ep_comp.bmAttributes = 16;
+ } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
+ desc->bmAttributes > 2) {
dev_warn(ddev, "Isoc endpoint has Mult of %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 3\n", desc->bmAttributes + 1,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- desc->bmAttributes = 2;
+ ep->ss_ep_comp.bmAttributes = 2;
}
- if (usb_endpoint_xfer_isoc(&ep->desc)) {
+
+ if (usb_endpoint_xfer_isoc(&ep->desc))
max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) *
(desc->bmAttributes + 1);
- } else if (usb_endpoint_xfer_int(&ep->desc)) {
+ else if (usb_endpoint_xfer_int(&ep->desc))
max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1);
- } else {
- goto valid;
- }
+ else
+ max_tx = 999999;
if (desc->wBytesPerInterval > max_tx) {
dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
"config %d interface %d altsetting %d ep %d: "
@@ -166,10 +137,8 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
desc->wBytesPerInterval,
cfgno, inum, asnum, ep->desc.bEndpointAddress,
max_tx);
- desc->wBytesPerInterval = max_tx;
+ ep->ss_ep_comp.wBytesPerInterval = max_tx;
}
-valid:
- return retval;
}
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
@@ -291,61 +260,19 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
cfgno, inum, asnum, d->bEndpointAddress,
maxp);
}
- /* Allocate room for and parse any SS endpoint companion descriptors */
- if (to_usb_device(ddev)->speed == USB_SPEED_SUPER) {
- endpoint->extra = buffer;
- i = find_next_descriptor_more(buffer, size, USB_DT_SS_ENDPOINT_COMP,
- USB_DT_ENDPOINT, USB_DT_INTERFACE, &n);
- endpoint->extralen = i;
- buffer += i;
- size -= i;
-
- /* Allocate space for the SS endpoint companion descriptor */
- endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
- GFP_KERNEL);
- if (!endpoint->ss_ep_comp)
- return -ENOMEM;
- /* Fill in some default values (may be overwritten later) */
- endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
- endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
- endpoint->ss_ep_comp->desc.bMaxBurst = 0;
- /*
- * Leave bmAttributes as zero, which will mean no streams for
- * bulk, and isoc won't support multiple bursts of packets.
- * With bursts of only one packet, and a Mult of 1, the max
- * amount of data moved per endpoint service interval is one
- * packet.
- */
- if (usb_endpoint_xfer_isoc(&endpoint->desc) ||
- usb_endpoint_xfer_int(&endpoint->desc))
- endpoint->ss_ep_comp->desc.wBytesPerInterval =
- endpoint->desc.wMaxPacketSize;
-
- if (size > 0) {
- retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
- inum, asnum, endpoint, num_ep, buffer,
- size);
- if (retval >= 0) {
- buffer += retval;
- retval = buffer - buffer0;
- }
- } else {
- dev_warn(ddev, "config %d interface %d altsetting %d "
- "endpoint 0x%X has no "
- "SuperSpeed companion descriptor\n",
- cfgno, inum, asnum, d->bEndpointAddress);
- retval = buffer - buffer0;
- }
- } else {
- /* Skip over any Class Specific or Vendor Specific descriptors;
- * find the next endpoint or interface descriptor */
- endpoint->extra = buffer;
- i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
- USB_DT_INTERFACE, &n);
- endpoint->extralen = i;
- retval = buffer - buffer0 + i;
- }
+ /* Parse a possible SuperSpeed endpoint companion descriptor */
+ if (to_usb_device(ddev)->speed == USB_SPEED_SUPER)
+ usb_parse_ss_endpoint_companion(ddev, cfgno,
+ inum, asnum, endpoint, buffer, size);
+
+ /* Skip over any Class Specific or Vendor Specific descriptors;
+ * find the next endpoint or interface descriptor */
+ endpoint->extra = buffer;
+ i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
+ USB_DT_INTERFACE, &n);
+ endpoint->extralen = i;
+ retval = buffer - buffer0 + i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
n, plural(n), "endpoint");
@@ -478,9 +405,10 @@ skip_to_next_interface_descriptor:
return buffer - buffer0 + i;
}
-static int usb_parse_configuration(struct device *ddev, int cfgidx,
+static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
struct usb_host_config *config, unsigned char *buffer, int size)
{
+ struct device *ddev = &dev->dev;
unsigned char *buffer0 = buffer;
int cfgno;
int nintf, nintf_orig;
@@ -549,6 +477,16 @@ static int usb_parse_configuration(struct device *ddev, int cfgidx,
}
inum = d->bInterfaceNumber;
+
+ if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) &&
+ n >= nintf_orig) {
+ dev_warn(ddev, "config %d has more interface "
+ "descriptors, than it declares in "
+ "bNumInterfaces, ignoring interface "
+ "number: %d\n", cfgno, inum);
+ continue;
+ }
+
if (inum >= nintf_orig)
dev_warn(ddev, "config %d has an invalid "
"interface number: %d but max is %d\n",
@@ -722,7 +660,6 @@ int usb_get_configuration(struct usb_device *dev)
int ncfg = dev->descriptor.bNumConfigurations;
int result = 0;
unsigned int cfgno, length;
- unsigned char *buffer;
unsigned char *bigbuffer;
struct usb_config_descriptor *desc;
@@ -751,17 +688,16 @@ int usb_get_configuration(struct usb_device *dev)
if (!dev->rawdescriptors)
goto err2;
- buffer = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
- if (!buffer)
+ desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
+ if (!desc)
goto err2;
- desc = (struct usb_config_descriptor *)buffer;
result = 0;
for (; cfgno < ncfg; cfgno++) {
/* We grab just the first descriptor so we know how long
* the whole configuration is */
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
- buffer, USB_DT_CONFIG_SIZE);
+ desc, USB_DT_CONFIG_SIZE);
if (result < 0) {
dev_err(ddev, "unable to read config index %d "
"descriptor/%s: %d\n", cfgno, "start", result);
@@ -800,7 +736,7 @@ int usb_get_configuration(struct usb_device *dev)
dev->rawdescriptors[cfgno] = bigbuffer;
- result = usb_parse_configuration(&dev->dev, cfgno,
+ result = usb_parse_configuration(dev, cfgno,
&dev->config[cfgno], bigbuffer, length);
if (result < 0) {
++cfgno;
@@ -810,7 +746,7 @@ int usb_get_configuration(struct usb_device *dev)
result = 0;
err:
- kfree(buffer);
+ kfree(desc);
out_not_authorized:
dev->descriptor.bNumConfigurations = cfgno;
err2:
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 19bc03a..3449742 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -1,7 +1,8 @@
/*
* devices.c
* (C) Copyright 1999 Randy Dunlap.
- * (C) Copyright 1999,2000 Thomas Sailer <sailer@ife.ee.ethz.ch>. (proc file per device)
+ * (C) Copyright 1999,2000 Thomas Sailer <sailer@ife.ee.ethz.ch>.
+ * (proc file per device)
* (C) Copyright 1999 Deti Fliegl (new USB architecture)
*
* This program is free software; you can redistribute it and/or modify
@@ -55,11 +56,11 @@
#include <linux/usb.h>
#include <linux/smp_lock.h>
#include <linux/usbdevice_fs.h>
+#include <linux/usb/hcd.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "usb.h"
-#include "hcd.h"
/* Define ALLOW_SERIAL_NUMBER if you want to see the serial number of devices */
#define ALLOW_SERIAL_NUMBER
@@ -138,8 +139,8 @@ struct class_info {
char *class_name;
};
-static const struct class_info clas_info[] =
-{ /* max. 5 chars. per name string */
+static const struct class_info clas_info[] = {
+ /* max. 5 chars. per name string */
{USB_CLASS_PER_INTERFACE, ">ifc"},
{USB_CLASS_AUDIO, "audio"},
{USB_CLASS_COMM, "comm."},
@@ -191,8 +192,10 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
if (speed == USB_SPEED_HIGH) {
switch (le16_to_cpu(desc->wMaxPacketSize) & (0x03 << 11)) {
- case 1 << 11: bandwidth = 2; break;
- case 2 << 11: bandwidth = 3; break;
+ case 1 << 11:
+ bandwidth = 2; break;
+ case 2 << 11:
+ bandwidth = 3; break;
}
}
@@ -200,7 +203,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
type = "Ctrl";
- if (speed == USB_SPEED_HIGH) /* uframes per NAK */
+ if (speed == USB_SPEED_HIGH) /* uframes per NAK */
interval = desc->bInterval;
else
interval = 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 3466fdc..c2f62a3 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -43,6 +43,7 @@
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
+#include <linux/usb/hcd.h> /* for usbcore internals */
#include <linux/cdev.h>
#include <linux/notifier.h>
#include <linux/security.h>
@@ -50,9 +51,7 @@
#include <asm/byteorder.h>
#include <linux/moduleparam.h>
-#include "hcd.h" /* for usbcore internals */
#include "usb.h"
-#include "hub.h"
#define USB_MAXBUS 64
#define USB_DEVICE_MAX USB_MAXBUS * 128
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 2f3dc4c..ded550e 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -26,8 +26,9 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
+#include <linux/usb/hcd.h>
#include <linux/pm_runtime.h>
-#include "hcd.h"
+
#include "usb.h"
@@ -333,7 +334,8 @@ static int usb_probe_interface(struct device *dev)
usb_cancel_queued_reset(intf);
/* Unbound interfaces are always runtime-PM-disabled and -suspended */
- pm_runtime_disable(dev);
+ if (driver->supports_autosuspend)
+ pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
usb_autosuspend_device(udev);
@@ -388,7 +390,8 @@ static int usb_unbind_interface(struct device *dev)
intf->needs_remote_wakeup = 0;
/* Unbound interfaces are always runtime-PM-disabled and -suspended */
- pm_runtime_disable(dev);
+ if (driver->supports_autosuspend)
+ pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
/* Undo any residual pm_autopm_get_interface_* calls */
@@ -437,14 +440,17 @@ int usb_driver_claim_interface(struct usb_driver *driver,
iface->condition = USB_INTERFACE_BOUND;
- /* Claimed interfaces are initially inactive (suspended). They are
- * runtime-PM-enabled only if the driver has autosuspend support.
- * They are sensitive to their children's power states.
+ /* Claimed interfaces are initially inactive (suspended) and
+ * runtime-PM-enabled, but only if the driver has autosuspend
+ * support. Otherwise they are marked active, to prevent the
+ * device from being autosuspended, but left disabled. In either
+ * case they are sensitive to their children's power states.
*/
- pm_runtime_set_suspended(dev);
pm_suspend_ignore_children(dev, false);
if (driver->supports_autosuspend)
pm_runtime_enable(dev);
+ else
+ pm_runtime_set_active(dev);
/* if interface was already added, bind now; else let
* the future device_add() bind it, bypassing probe()
@@ -1355,13 +1361,9 @@ int usb_resume(struct device *dev, pm_message_t msg)
*
* The caller must hold @udev's device lock.
*/
-int usb_enable_autosuspend(struct usb_device *udev)
+void usb_enable_autosuspend(struct usb_device *udev)
{
- if (udev->autosuspend_disabled) {
- udev->autosuspend_disabled = 0;
- usb_autosuspend_device(udev);
- }
- return 0;
+ pm_runtime_allow(&udev->dev);
}
EXPORT_SYMBOL_GPL(usb_enable_autosuspend);
@@ -1374,16 +1376,9 @@ EXPORT_SYMBOL_GPL(usb_enable_autosuspend);
*
* The caller must hold @udev's device lock.
*/
-int usb_disable_autosuspend(struct usb_device *udev)
+void usb_disable_autosuspend(struct usb_device *udev)
{
- int rc = 0;
-
- if (!udev->autosuspend_disabled) {
- rc = usb_autoresume_device(udev);
- if (rc == 0)
- udev->autosuspend_disabled = 1;
- }
- return rc;
+ pm_runtime_forbid(&udev->dev);
}
EXPORT_SYMBOL_GPL(usb_disable_autosuspend);
@@ -1485,9 +1480,6 @@ int usb_autoresume_device(struct usb_device *udev)
* 0, a delayed autosuspend request for @intf's device is attempted. The
* attempt may fail (see autosuspend_check()).
*
- * If the driver has set @intf->needs_remote_wakeup then autosuspend will
- * take place only if the device's remote-wakeup facility is enabled.
- *
* This routine can run only in process context.
*/
void usb_autopm_put_interface(struct usb_interface *intf)
@@ -1530,7 +1522,7 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
atomic_dec(&intf->pm_usage_cnt);
pm_runtime_put_noidle(&intf->dev);
- if (!udev->autosuspend_disabled) {
+ if (udev->dev.power.runtime_auto) {
/* Optimization: Don't schedule a delayed autosuspend if
* the timer is already running and the expiration time
* wouldn't change.
@@ -1672,14 +1664,14 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
/* Internal routine to check whether we may autosuspend a device. */
static int autosuspend_check(struct usb_device *udev)
{
- int i;
+ int w, i;
struct usb_interface *intf;
unsigned long suspend_time, j;
/* Fail if autosuspend is disabled, or any interfaces are in use, or
* any interface drivers require remote wakeup but it isn't available.
*/
- udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+ w = 0;
if (udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
@@ -1693,12 +1685,7 @@ static int autosuspend_check(struct usb_device *udev)
continue;
if (atomic_read(&intf->dev.power.usage_count) > 0)
return -EBUSY;
- if (intf->needs_remote_wakeup &&
- !udev->do_remote_wakeup) {
- dev_dbg(&udev->dev, "remote wakeup needed "
- "for autosuspend\n");
- return -EOPNOTSUPP;
- }
+ w |= intf->needs_remote_wakeup;
/* Don't allow autosuspend if the device will need
* a reset-resume and any of its interface drivers
@@ -1714,6 +1701,11 @@ static int autosuspend_check(struct usb_device *udev)
}
}
}
+ if (w && !device_can_wakeup(&udev->dev)) {
+ dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n");
+ return -EOPNOTSUPP;
+ }
+ udev->do_remote_wakeup = w;
/* If everything is okay but the device hasn't been idle for long
* enough, queue a delayed autosuspend request.
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 2c95153..9a34ccb 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -18,8 +18,8 @@
*/
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include "usb.h"
-#include "hcd.h"
static inline const char *plural(int n)
{
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 1528653..1cf2d1e 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -21,6 +21,7 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -33,7 +34,6 @@
#endif
#include "usb.h"
-#include "hcd.h"
/* PCI-based HCs are common, but plenty of non-PCI HCs are used too */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 2f8cedd..12742f1 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -38,14 +38,12 @@
#include <asm/unaligned.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
-#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include "usb.h"
-#include "hcd.h"
-#include "hub.h"
/*-------------------------------------------------------------------------*/
@@ -1261,6 +1259,51 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
*dma_handle = 0;
}
+static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ enum dma_data_direction dir;
+
+ if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
+ dma_unmap_single(hcd->self.controller,
+ urb->setup_dma,
+ sizeof(struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+ else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL)
+ hcd_free_coherent(urb->dev->bus,
+ &urb->setup_dma,
+ (void **) &urb->setup_packet,
+ sizeof(struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ if (urb->transfer_flags & URB_DMA_MAP_SG)
+ dma_unmap_sg(hcd->self.controller,
+ urb->sg,
+ urb->num_sgs,
+ dir);
+ else if (urb->transfer_flags & URB_DMA_MAP_PAGE)
+ dma_unmap_page(hcd->self.controller,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ dir);
+ else if (urb->transfer_flags & URB_DMA_MAP_SINGLE)
+ dma_unmap_single(hcd->self.controller,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ dir);
+ else if (urb->transfer_flags & URB_MAP_LOCAL)
+ hcd_free_coherent(urb->dev->bus,
+ &urb->transfer_dma,
+ &urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+
+ /* Make it safe to call this routine more than once */
+ urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
+ URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
+ URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
+}
+
static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
@@ -1272,11 +1315,8 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
* unless it uses pio or talks to another transport,
* or uses the provided scatter gather list for bulk.
*/
- if (is_root_hub(urb->dev))
- return 0;
- if (usb_endpoint_xfer_control(&urb->ep->desc)
- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
+ if (usb_endpoint_xfer_control(&urb->ep->desc)) {
if (hcd->self.uses_dma) {
urb->setup_dma = dma_map_single(
hcd->self.controller,
@@ -1286,27 +1326,64 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (dma_mapping_error(hcd->self.controller,
urb->setup_dma))
return -EAGAIN;
- } else if (hcd->driver->flags & HCD_LOCAL_MEM)
+ urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
+ } else if (hcd->driver->flags & HCD_LOCAL_MEM) {
ret = hcd_alloc_coherent(
urb->dev->bus, mem_flags,
&urb->setup_dma,
(void **)&urb->setup_packet,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+ urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
+ }
}
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- if (ret == 0 && urb->transfer_buffer_length != 0
+ if (urb->transfer_buffer_length != 0
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
if (hcd->self.uses_dma) {
- urb->transfer_dma = dma_map_single (
- hcd->self.controller,
- urb->transfer_buffer,
- urb->transfer_buffer_length,
- dir);
- if (dma_mapping_error(hcd->self.controller,
+ if (urb->num_sgs) {
+ int n = dma_map_sg(
+ hcd->self.controller,
+ urb->sg,
+ urb->num_sgs,
+ dir);
+ if (n <= 0)
+ ret = -EAGAIN;
+ else
+ urb->transfer_flags |= URB_DMA_MAP_SG;
+ if (n != urb->num_sgs) {
+ urb->num_sgs = n;
+ urb->transfer_flags |=
+ URB_DMA_SG_COMBINED;
+ }
+ } else if (urb->sg) {
+ struct scatterlist *sg = urb->sg;
+ urb->transfer_dma = dma_map_page(
+ hcd->self.controller,
+ sg_page(sg),
+ sg->offset,
+ urb->transfer_buffer_length,
+ dir);
+ if (dma_mapping_error(hcd->self.controller,
urb->transfer_dma))
- return -EAGAIN;
+ ret = -EAGAIN;
+ else
+ urb->transfer_flags |= URB_DMA_MAP_PAGE;
+ } else {
+ urb->transfer_dma = dma_map_single(
+ hcd->self.controller,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+ if (dma_mapping_error(hcd->self.controller,
+ urb->transfer_dma))
+ ret = -EAGAIN;
+ else
+ urb->transfer_flags |= URB_DMA_MAP_SINGLE;
+ }
} else if (hcd->driver->flags & HCD_LOCAL_MEM) {
ret = hcd_alloc_coherent(
urb->dev->bus, mem_flags,
@@ -1314,55 +1391,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
&urb->transfer_buffer,
urb->transfer_buffer_length,
dir);
-
- if (ret && usb_endpoint_xfer_control(&urb->ep->desc)
- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
- hcd_free_coherent(urb->dev->bus,
- &urb->setup_dma,
- (void **)&urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
+ if (ret == 0)
+ urb->transfer_flags |= URB_MAP_LOCAL;
}
+ if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
+ URB_SETUP_MAP_LOCAL)))
+ unmap_urb_for_dma(hcd, urb);
}
return ret;
}
-static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
-{
- enum dma_data_direction dir;
-
- if (is_root_hub(urb->dev))
- return;
-
- if (usb_endpoint_xfer_control(&urb->ep->desc)
- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
- if (hcd->self.uses_dma)
- dma_unmap_single(hcd->self.controller, urb->setup_dma,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
- else if (hcd->driver->flags & HCD_LOCAL_MEM)
- hcd_free_coherent(urb->dev->bus, &urb->setup_dma,
- (void **)&urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
- }
-
- dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- if (urb->transfer_buffer_length != 0
- && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- if (hcd->self.uses_dma)
- dma_unmap_single(hcd->self.controller,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- dir);
- else if (hcd->driver->flags & HCD_LOCAL_MEM)
- hcd_free_coherent(urb->dev->bus, &urb->transfer_dma,
- &urb->transfer_buffer,
- urb->transfer_buffer_length,
- dir);
- }
-}
-
/*-------------------------------------------------------------------------*/
/* may be called in any context with a valid urb->dev usecount
@@ -1391,21 +1429,20 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
* URBs must be submitted in process context with interrupts
* enabled.
*/
- status = map_urb_for_dma(hcd, urb, mem_flags);
- if (unlikely(status)) {
- usbmon_urb_submit_error(&hcd->self, urb, status);
- goto error;
- }
- if (is_root_hub(urb->dev))
+ if (is_root_hub(urb->dev)) {
status = rh_urb_enqueue(hcd, urb);
- else
- status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
+ } else {
+ status = map_urb_for_dma(hcd, urb, mem_flags);
+ if (likely(status == 0)) {
+ status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
+ if (unlikely(status))
+ unmap_urb_for_dma(hcd, urb);
+ }
+ }
if (unlikely(status)) {
usbmon_urb_submit_error(&hcd->self, urb, status);
- unmap_urb_for_dma(hcd, urb);
- error:
urb->hcpriv = NULL;
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
@@ -1775,6 +1812,75 @@ void usb_hcd_reset_endpoint(struct usb_device *udev,
}
}
+/**
+ * usb_alloc_streams - allocate bulk endpoint stream IDs.
+ * @interface: alternate setting that includes all endpoints.
+ * @eps: array of endpoints that need streams.
+ * @num_eps: number of endpoints in the array.
+ * @num_streams: number of streams to allocate.
+ * @mem_flags: flags hcd should use to allocate memory.
+ *
+ * Sets up a group of bulk endpoints to have num_streams stream IDs available.
+ * Drivers may queue multiple transfers to different stream IDs, which may
+ * complete in a different order than they were queued.
+ */
+int usb_alloc_streams(struct usb_interface *interface,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags)
+{
+ struct usb_hcd *hcd;
+ struct usb_device *dev;
+ int i;
+
+ dev = interface_to_usbdev(interface);
+ hcd = bus_to_hcd(dev->bus);
+ if (!hcd->driver->alloc_streams || !hcd->driver->free_streams)
+ return -EINVAL;
+ if (dev->speed != USB_SPEED_SUPER)
+ return -EINVAL;
+
+ /* Streams only apply to bulk endpoints. */
+ for (i = 0; i < num_eps; i++)
+ if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
+ return -EINVAL;
+
+ return hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
+ num_streams, mem_flags);
+}
+EXPORT_SYMBOL_GPL(usb_alloc_streams);
+
+/**
+ * usb_free_streams - free bulk endpoint stream IDs.
+ * @interface: alternate setting that includes all endpoints.
+ * @eps: array of endpoints to remove streams from.
+ * @num_eps: number of endpoints in the array.
+ * @mem_flags: flags hcd should use to allocate memory.
+ *
+ * Reverts a group of bulk endpoints back to not using stream IDs.
+ * Can fail if we are given bad arguments, or HCD is broken.
+ */
+void usb_free_streams(struct usb_interface *interface,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags)
+{
+ struct usb_hcd *hcd;
+ struct usb_device *dev;
+ int i;
+
+ dev = interface_to_usbdev(interface);
+ hcd = bus_to_hcd(dev->bus);
+ if (dev->speed != USB_SPEED_SUPER)
+ return;
+
+ /* Streams only apply to bulk endpoints. */
+ for (i = 0; i < num_eps; i++)
+ if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
+ return;
+
+ hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
+}
+EXPORT_SYMBOL_GPL(usb_free_streams);
+
/* Protect against drivers that try to unlink URBs after the device
* is gone, by waiting until all unlinks for @udev are finished.
* Since we don't currently track URBs by device, simply wait until
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
deleted file mode 100644
index a3cdb09..0000000
--- a/drivers/usb/core/hcd.h
+++ /dev/null
@@ -1,578 +0,0 @@
-/*
- * Copyright (c) 2001-2002 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __USB_CORE_HCD_H
-#define __USB_CORE_HCD_H
-
-#ifdef __KERNEL__
-
-#include <linux/rwsem.h>
-
-#define MAX_TOPO_LEVEL 6
-
-/* This file contains declarations of usbcore internals that are mostly
- * used or exposed by Host Controller Drivers.
- */
-
-/*
- * USB Packet IDs (PIDs)
- */
-#define USB_PID_EXT 0xf0 /* USB 2.0 LPM ECN */
-#define USB_PID_OUT 0xe1
-#define USB_PID_ACK 0xd2
-#define USB_PID_DATA0 0xc3
-#define USB_PID_PING 0xb4 /* USB 2.0 */
-#define USB_PID_SOF 0xa5
-#define USB_PID_NYET 0x96 /* USB 2.0 */
-#define USB_PID_DATA2 0x87 /* USB 2.0 */
-#define USB_PID_SPLIT 0x78 /* USB 2.0 */
-#define USB_PID_IN 0x69
-#define USB_PID_NAK 0x5a
-#define USB_PID_DATA1 0x4b
-#define USB_PID_PREAMBLE 0x3c /* Token mode */
-#define USB_PID_ERR 0x3c /* USB 2.0: handshake mode */
-#define USB_PID_SETUP 0x2d
-#define USB_PID_STALL 0x1e
-#define USB_PID_MDATA 0x0f /* USB 2.0 */
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * USB Host Controller Driver (usb_hcd) framework
- *
- * Since "struct usb_bus" is so thin, you can't share much code in it.
- * This framework is a layer over that, and should be more sharable.
- *
- * @authorized_default: Specifies if new devices are authorized to
- * connect by default or they require explicit
- * user space authorization; this bit is settable
- * through /sys/class/usb_host/X/authorized_default.
- * For the rest is RO, so we don't lock to r/w it.
- */
-
-/*-------------------------------------------------------------------------*/
-
-struct usb_hcd {
-
- /*
- * housekeeping
- */
- struct usb_bus self; /* hcd is-a bus */
- struct kref kref; /* reference counter */
-
- const char *product_desc; /* product/vendor string */
- char irq_descr[24]; /* driver + bus # */
-
- struct timer_list rh_timer; /* drives root-hub polling */
- struct urb *status_urb; /* the current status urb */
-#ifdef CONFIG_USB_SUSPEND
- struct work_struct wakeup_work; /* for remote wakeup */
-#endif
-
- /*
- * hardware info/state
- */
- const struct hc_driver *driver; /* hw-specific hooks */
-
- /* Flags that need to be manipulated atomically */
- unsigned long flags;
-#define HCD_FLAG_HW_ACCESSIBLE 0x00000001
-#define HCD_FLAG_SAW_IRQ 0x00000002
-
- unsigned rh_registered:1;/* is root hub registered? */
-
- /* The next flag is a stopgap, to be removed when all the HCDs
- * support the new root-hub polling mechanism. */
- unsigned uses_new_polling:1;
- unsigned poll_rh:1; /* poll for rh status? */
- unsigned poll_pending:1; /* status has changed? */
- unsigned wireless:1; /* Wireless USB HCD */
- unsigned authorized_default:1;
- unsigned has_tt:1; /* Integrated TT in root hub */
-
- int irq; /* irq allocated */
- void __iomem *regs; /* device memory/io */
- u64 rsrc_start; /* memory/io resource start */
- u64 rsrc_len; /* memory/io resource length */
- unsigned power_budget; /* in mA, 0 = no limit */
-
- /* bandwidth_mutex should be taken before adding or removing
- * any new bus bandwidth constraints:
- * 1. Before adding a configuration for a new device.
- * 2. Before removing the configuration to put the device into
- * the addressed state.
- * 3. Before selecting a different configuration.
- * 4. Before selecting an alternate interface setting.
- *
- * bandwidth_mutex should be dropped after a successful control message
- * to the device, or resetting the bandwidth after a failed attempt.
- */
- struct mutex bandwidth_mutex;
-
-
-#define HCD_BUFFER_POOLS 4
- struct dma_pool *pool [HCD_BUFFER_POOLS];
-
- int state;
-# define __ACTIVE 0x01
-# define __SUSPEND 0x04
-# define __TRANSIENT 0x80
-
-# define HC_STATE_HALT 0
-# define HC_STATE_RUNNING (__ACTIVE)
-# define HC_STATE_QUIESCING (__SUSPEND|__TRANSIENT|__ACTIVE)
-# define HC_STATE_RESUMING (__SUSPEND|__TRANSIENT)
-# define HC_STATE_SUSPENDED (__SUSPEND)
-
-#define HC_IS_RUNNING(state) ((state) & __ACTIVE)
-#define HC_IS_SUSPENDED(state) ((state) & __SUSPEND)
-
- /* more shared queuing code would be good; it should support
- * smarter scheduling, handle transaction translators, etc;
- * input size of periodic table to an interrupt scheduler.
- * (ohci 32, uhci 1024, ehci 256/512/1024).
- */
-
- /* The HC driver's private data is stored at the end of
- * this structure.
- */
- unsigned long hcd_priv[0]
- __attribute__ ((aligned(sizeof(unsigned long))));
-};
-
-/* 2.4 does this a bit differently ... */
-static inline struct usb_bus *hcd_to_bus(struct usb_hcd *hcd)
-{
- return &hcd->self;
-}
-
-static inline struct usb_hcd *bus_to_hcd(struct usb_bus *bus)
-{
- return container_of(bus, struct usb_hcd, self);
-}
-
-struct hcd_timeout { /* timeouts we allocate */
- struct list_head timeout_list;
- struct timer_list timer;
-};
-
-/*-------------------------------------------------------------------------*/
-
-
-struct hc_driver {
- const char *description; /* "ehci-hcd" etc */
- const char *product_desc; /* product/vendor string */
- size_t hcd_priv_size; /* size of private data */
-
- /* irq handler */
- irqreturn_t (*irq) (struct usb_hcd *hcd);
-
- int flags;
-#define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */
-#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */
-#define HCD_USB11 0x0010 /* USB 1.1 */
-#define HCD_USB2 0x0020 /* USB 2.0 */
-#define HCD_USB3 0x0040 /* USB 3.0 */
-#define HCD_MASK 0x0070
-
- /* called to init HCD and root hub */
- int (*reset) (struct usb_hcd *hcd);
- int (*start) (struct usb_hcd *hcd);
-
- /* NOTE: these suspend/resume calls relate to the HC as
- * a whole, not just the root hub; they're for PCI bus glue.
- */
- /* called after suspending the hub, before entering D3 etc */
- int (*pci_suspend)(struct usb_hcd *hcd);
-
- /* called after entering D0 (etc), before resuming the hub */
- int (*pci_resume)(struct usb_hcd *hcd, bool hibernated);
-
- /* cleanly make HCD stop writing memory and doing I/O */
- void (*stop) (struct usb_hcd *hcd);
-
- /* shutdown HCD */
- void (*shutdown) (struct usb_hcd *hcd);
-
- /* return current frame number */
- int (*get_frame_number) (struct usb_hcd *hcd);
-
- /* manage i/o requests, device state */
- int (*urb_enqueue)(struct usb_hcd *hcd,
- struct urb *urb, gfp_t mem_flags);
- int (*urb_dequeue)(struct usb_hcd *hcd,
- struct urb *urb, int status);
-
- /* hw synch, freeing endpoint resources that urb_dequeue can't */
- void (*endpoint_disable)(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep);
-
- /* (optional) reset any endpoint state such as sequence number
- and current window */
- void (*endpoint_reset)(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep);
-
- /* root hub support */
- int (*hub_status_data) (struct usb_hcd *hcd, char *buf);
- int (*hub_control) (struct usb_hcd *hcd,
- u16 typeReq, u16 wValue, u16 wIndex,
- char *buf, u16 wLength);
- int (*bus_suspend)(struct usb_hcd *);
- int (*bus_resume)(struct usb_hcd *);
- int (*start_port_reset)(struct usb_hcd *, unsigned port_num);
-
- /* force handover of high-speed port to full-speed companion */
- void (*relinquish_port)(struct usb_hcd *, int);
- /* has a port been handed over to a companion? */
- int (*port_handed_over)(struct usb_hcd *, int);
-
- /* CLEAR_TT_BUFFER completion callback */
- void (*clear_tt_buffer_complete)(struct usb_hcd *,
- struct usb_host_endpoint *);
-
- /* xHCI specific functions */
- /* Called by usb_alloc_dev to alloc HC device structures */
- int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
- /* Called by usb_disconnect to free HC device structures */
- void (*free_dev)(struct usb_hcd *, struct usb_device *);
-
- /* Bandwidth computation functions */
- /* Note that add_endpoint() can only be called once per endpoint before
- * check_bandwidth() or reset_bandwidth() must be called.
- * drop_endpoint() can only be called once per endpoint also.
- * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
- * add the endpoint to the schedule with possibly new parameters denoted by a
- * different endpoint descriptor in usb_host_endpoint.
- * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
- * not allowed.
- */
- /* Allocate endpoint resources and add them to a new schedule */
- int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
- /* Drop an endpoint from a new schedule */
- int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
- /* Check that a new hardware configuration, set using
- * endpoint_enable and endpoint_disable, does not exceed bus
- * bandwidth. This must be called before any set configuration
- * or set interface requests are sent to the device.
- */
- int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
- /* Reset the device schedule to the last known good schedule,
- * which was set from a previous successful call to
- * check_bandwidth(). This reverts any add_endpoint() and
- * drop_endpoint() calls since that last successful call.
- * Used for when a check_bandwidth() call fails due to resource
- * or bandwidth constraints.
- */
- void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
- /* Returns the hardware-chosen device address */
- int (*address_device)(struct usb_hcd *, struct usb_device *udev);
- /* Notifies the HCD after a hub descriptor is fetched.
- * Will block.
- */
- int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
- struct usb_tt *tt, gfp_t mem_flags);
- int (*reset_device)(struct usb_hcd *, struct usb_device *);
-};
-
-extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
-extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
- int status);
-extern void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb);
-
-extern int usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags);
-extern int usb_hcd_unlink_urb(struct urb *urb, int status);
-extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb,
- int status);
-extern void usb_hcd_flush_endpoint(struct usb_device *udev,
- struct usb_host_endpoint *ep);
-extern void usb_hcd_disable_endpoint(struct usb_device *udev,
- struct usb_host_endpoint *ep);
-extern void usb_hcd_reset_endpoint(struct usb_device *udev,
- struct usb_host_endpoint *ep);
-extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
-extern int usb_hcd_alloc_bandwidth(struct usb_device *udev,
- struct usb_host_config *new_config,
- struct usb_host_interface *old_alt,
- struct usb_host_interface *new_alt);
-extern int usb_hcd_get_frame_number(struct usb_device *udev);
-
-extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
- struct device *dev, const char *bus_name);
-extern struct usb_hcd *usb_get_hcd(struct usb_hcd *hcd);
-extern void usb_put_hcd(struct usb_hcd *hcd);
-extern int usb_add_hcd(struct usb_hcd *hcd,
- unsigned int irqnum, unsigned long irqflags);
-extern void usb_remove_hcd(struct usb_hcd *hcd);
-
-struct platform_device;
-extern void usb_hcd_platform_shutdown(struct platform_device *dev);
-
-#ifdef CONFIG_PCI
-struct pci_dev;
-struct pci_device_id;
-extern int usb_hcd_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id);
-extern void usb_hcd_pci_remove(struct pci_dev *dev);
-extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
-
-#ifdef CONFIG_PM_SLEEP
-extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
-#endif
-#endif /* CONFIG_PCI */
-
-/* pci-ish (pdev null is ok) buffer alloc/mapping support */
-int hcd_buffer_create(struct usb_hcd *hcd);
-void hcd_buffer_destroy(struct usb_hcd *hcd);
-
-void *hcd_buffer_alloc(struct usb_bus *bus, size_t size,
- gfp_t mem_flags, dma_addr_t *dma);
-void hcd_buffer_free(struct usb_bus *bus, size_t size,
- void *addr, dma_addr_t dma);
-
-/* generic bus glue, needed for host controllers that don't use PCI */
-extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);
-
-extern void usb_hc_died(struct usb_hcd *hcd);
-extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
-
-/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
-#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
-#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
-#define usb_settoggle(dev, ep, out, bit) \
- ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \
- ((bit) << (ep)))
-
-/* -------------------------------------------------------------------------- */
-
-/* Enumeration is only for the hub driver, or HCD virtual root hubs */
-extern struct usb_device *usb_alloc_dev(struct usb_device *parent,
- struct usb_bus *, unsigned port);
-extern int usb_new_device(struct usb_device *dev);
-extern void usb_disconnect(struct usb_device **);
-
-extern int usb_get_configuration(struct usb_device *dev);
-extern void usb_destroy_configuration(struct usb_device *dev);
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * HCD Root Hub support
- */
-
-#include "hub.h"
-
-/* (shifted) direction/type/recipient from the USB 2.0 spec, table 9.2 */
-#define DeviceRequest \
- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8)
-#define DeviceOutRequest \
- ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8)
-
-#define InterfaceRequest \
- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
-
-#define EndpointRequest \
- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
-#define EndpointOutRequest \
- ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
-
-/* class requests from the USB 2.0 hub spec, table 11-15 */
-/* GetBusState and SetHubDescriptor are optional, omitted */
-#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE)
-#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE)
-#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR)
-#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS)
-#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS)
-#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE)
-#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE)
-
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * Generic bandwidth allocation constants/support
- */
-#define FRAME_TIME_USECS 1000L
-#define BitTime(bytecount) (7 * 8 * bytecount / 6) /* with integer truncation */
- /* Trying not to use worst-case bit-stuffing
- * of (7/6 * 8 * bytecount) = 9.33 * bytecount */
- /* bytecount = data payload byte count */
-
-#define NS_TO_US(ns) ((ns + 500L) / 1000L)
- /* convert & round nanoseconds to microseconds */
-
-
-/*
- * Full/low speed bandwidth allocation constants/support.
- */
-#define BW_HOST_DELAY 1000L /* nanoseconds */
-#define BW_HUB_LS_SETUP 333L /* nanoseconds */
- /* 4 full-speed bit times (est.) */
-
-#define FRAME_TIME_BITS 12000L /* frame = 1 millisecond */
-#define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L)
-#define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L)
-
-/*
- * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed
- * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed
- * to preallocate bandwidth)
- */
-#define USB2_HOST_DELAY 5 /* nsec, guess */
-#define HS_NSECS(bytes) (((55 * 8 * 2083) \
- + (2083UL * (3 + BitTime(bytes))))/1000 \
- + USB2_HOST_DELAY)
-#define HS_NSECS_ISO(bytes) (((38 * 8 * 2083) \
- + (2083UL * (3 + BitTime(bytes))))/1000 \
- + USB2_HOST_DELAY)
-#define HS_USECS(bytes) NS_TO_US (HS_NSECS(bytes))
-#define HS_USECS_ISO(bytes) NS_TO_US (HS_NSECS_ISO(bytes))
-
-extern long usb_calc_bus_time(int speed, int is_input,
- int isoc, int bytecount);
-
-/*-------------------------------------------------------------------------*/
-
-extern void usb_set_device_state(struct usb_device *udev,
- enum usb_device_state new_state);
-
-/*-------------------------------------------------------------------------*/
-
-/* exported only within usbcore */
-
-extern struct list_head usb_bus_list;
-extern struct mutex usb_bus_list_lock;
-extern wait_queue_head_t usb_kill_urb_queue;
-
-extern int usb_find_interface_driver(struct usb_device *dev,
- struct usb_interface *interface);
-
-#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
-
-#ifdef CONFIG_PM
-extern void usb_root_hub_lost_power(struct usb_device *rhdev);
-extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
-extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_USB_SUSPEND
-extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
-#else
-static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
-{
- return;
-}
-#endif /* CONFIG_USB_SUSPEND */
-
-
-/*
- * USB device fs stuff
- */
-
-#ifdef CONFIG_USB_DEVICEFS
-
-/*
- * these are expected to be called from the USB core/hub thread
- * with the kernel lock held
- */
-extern void usbfs_update_special(void);
-extern int usbfs_init(void);
-extern void usbfs_cleanup(void);
-
-#else /* CONFIG_USB_DEVICEFS */
-
-static inline void usbfs_update_special(void) {}
-static inline int usbfs_init(void) { return 0; }
-static inline void usbfs_cleanup(void) { }
-
-#endif /* CONFIG_USB_DEVICEFS */
-
-/*-------------------------------------------------------------------------*/
-
-#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
-
-struct usb_mon_operations {
- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
- /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
-};
-
-extern struct usb_mon_operations *mon_ops;
-
-static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
-{
- if (bus->monitored)
- (*mon_ops->urb_submit)(bus, urb);
-}
-
-static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb,
- int error)
-{
- if (bus->monitored)
- (*mon_ops->urb_submit_error)(bus, urb, error);
-}
-
-static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
- int status)
-{
- if (bus->monitored)
- (*mon_ops->urb_complete)(bus, urb, status);
-}
-
-int usb_mon_register(struct usb_mon_operations *ops);
-void usb_mon_deregister(void);
-
-#else
-
-static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) {}
-static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb,
- int error) {}
-static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
- int status) {}
-
-#endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */
-
-/*-------------------------------------------------------------------------*/
-
-/* hub.h ... DeviceRemovable in 2.4.2-ac11, gone in 2.4.10 */
-/* bleech -- resurfaced in 2.4.11 or 2.4.12 */
-#define bitmap DeviceRemovable
-
-
-/*-------------------------------------------------------------------------*/
-
-/* random stuff */
-
-#define RUN_CONTEXT (in_irq() ? "in_irq" \
- : (in_interrupt() ? "in_interrupt" : "can sleep"))
-
-
-/* This rwsem is for use only by the hub driver and ehci-hcd.
- * Nobody else should touch it.
- */
-extern struct rw_semaphore ehci_cf_port_reset_rwsem;
-
-/* Keep track of which host controller drivers are loaded */
-#define USB_UHCI_LOADED 0
-#define USB_OHCI_LOADED 1
-#define USB_EHCI_LOADED 2
-extern unsigned long usb_hcds_loaded;
-
-#endif /* __KERNEL__ */
-
-#endif /* __USB_CORE_HCD_H */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0940ccd..83e7bbb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -19,6 +19,7 @@
#include <linux/ioctl.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
+#include <linux/usb/hcd.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
@@ -28,8 +29,6 @@
#include <asm/byteorder.h>
#include "usb.h"
-#include "hcd.h"
-#include "hub.h"
/* if we are in debug mode, always announce new devices */
#ifdef DEBUG
@@ -154,11 +153,11 @@ static int usb_reset_and_verify_device(struct usb_device *udev);
static inline char *portspeed(int portstatus)
{
- if (portstatus & (1 << USB_PORT_FEAT_HIGHSPEED))
+ if (portstatus & USB_PORT_STAT_HIGH_SPEED)
return "480 Mb/s";
- else if (portstatus & (1 << USB_PORT_FEAT_LOWSPEED))
+ else if (portstatus & USB_PORT_STAT_LOW_SPEED)
return "1.5 Mb/s";
- else if (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED))
+ else if (portstatus & USB_PORT_STAT_SUPER_SPEED)
return "5.0 Gb/s";
else
return "12 Mb/s";
@@ -745,8 +744,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
!(portstatus & USB_PORT_STAT_CONNECTION) ||
!udev ||
udev->state == USB_STATE_NOTATTACHED)) {
- clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
- portstatus &= ~USB_PORT_STAT_ENABLE;
+ /*
+ * USB3 protocol ports will automatically transition
+ * to Enabled state when detect an USB3.0 device attach.
+ * Do not disable USB3 protocol ports.
+ * FIXME: USB3 root hub and external hubs are treated
+ * differently here.
+ */
+ if (hdev->descriptor.bDeviceProtocol != 3 ||
+ (!hdev->parent &&
+ !(portstatus & USB_PORT_STAT_SUPER_SPEED))) {
+ clear_port_feature(hdev, port1,
+ USB_PORT_FEAT_ENABLE);
+ portstatus &= ~USB_PORT_STAT_ENABLE;
+ }
}
/* Clear status-change flags; we'll debounce later */
@@ -1784,7 +1795,6 @@ int usb_new_device(struct usb_device *udev)
* sysfs power/wakeup controls wakeup enabled/disabled
*/
device_init_wakeup(&udev->dev, 0);
- device_set_wakeup_enable(&udev->dev, 1);
}
/* Tell the runtime-PM framework the device is active */
@@ -3038,7 +3048,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
/* maybe switch power back on (e.g. root hub was reset) */
if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2
- && !(portstatus & (1 << USB_PORT_FEAT_POWER)))
+ && !(portstatus & USB_PORT_STAT_POWER))
set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
if (portstatus & USB_PORT_STAT_ENABLE)
@@ -3076,7 +3086,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if (!(hcd->driver->flags & HCD_USB3))
udev->speed = USB_SPEED_UNKNOWN;
else if ((hdev->parent == NULL) &&
- (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED)))
+ (portstatus & USB_PORT_STAT_SUPER_SPEED))
udev->speed = USB_SPEED_SUPER;
else
udev->speed = USB_SPEED_UNKNOWN;
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
deleted file mode 100644
index de8081f..0000000
--- a/drivers/usb/core/hub.h
+++ /dev/null
@@ -1,205 +0,0 @@
-#ifndef __LINUX_HUB_H
-#define __LINUX_HUB_H
-
-/*
- * Hub protocol and driver data structures.
- *
- * Some of these are known to the "virtual root hub" code
- * in host controller drivers.
- */
-
-#include <linux/list.h>
-#include <linux/workqueue.h>
-#include <linux/compiler.h> /* likely()/unlikely() */
-
-/*
- * Hub request types
- */
-
-#define USB_RT_HUB (USB_TYPE_CLASS | USB_RECIP_DEVICE)
-#define USB_RT_PORT (USB_TYPE_CLASS | USB_RECIP_OTHER)
-
-/*
- * Hub class requests
- * See USB 2.0 spec Table 11-16
- */
-#define HUB_CLEAR_TT_BUFFER 8
-#define HUB_RESET_TT 9
-#define HUB_GET_TT_STATE 10
-#define HUB_STOP_TT 11
-
-/*
- * Hub Class feature numbers
- * See USB 2.0 spec Table 11-17
- */
-#define C_HUB_LOCAL_POWER 0
-#define C_HUB_OVER_CURRENT 1
-
-/*
- * Port feature numbers
- * See USB 2.0 spec Table 11-17
- */
-#define USB_PORT_FEAT_CONNECTION 0
-#define USB_PORT_FEAT_ENABLE 1
-#define USB_PORT_FEAT_SUSPEND 2 /* L2 suspend */
-#define USB_PORT_FEAT_OVER_CURRENT 3
-#define USB_PORT_FEAT_RESET 4
-#define USB_PORT_FEAT_L1 5 /* L1 suspend */
-#define USB_PORT_FEAT_POWER 8
-#define USB_PORT_FEAT_LOWSPEED 9
-/* This value was never in Table 11-17 */
-#define USB_PORT_FEAT_HIGHSPEED 10
-/* This value is also fake */
-#define USB_PORT_FEAT_SUPERSPEED 11
-#define USB_PORT_FEAT_C_CONNECTION 16
-#define USB_PORT_FEAT_C_ENABLE 17
-#define USB_PORT_FEAT_C_SUSPEND 18
-#define USB_PORT_FEAT_C_OVER_CURRENT 19
-#define USB_PORT_FEAT_C_RESET 20
-#define USB_PORT_FEAT_TEST 21
-#define USB_PORT_FEAT_INDICATOR 22
-#define USB_PORT_FEAT_C_PORT_L1 23
-
-/*
- * Hub Status and Hub Change results
- * See USB 2.0 spec Table 11-19 and Table 11-20
- */
-struct usb_port_status {
- __le16 wPortStatus;
- __le16 wPortChange;
-} __attribute__ ((packed));
-
-/*
- * wPortStatus bit field
- * See USB 2.0 spec Table 11-21
- */
-#define USB_PORT_STAT_CONNECTION 0x0001
-#define USB_PORT_STAT_ENABLE 0x0002
-#define USB_PORT_STAT_SUSPEND 0x0004
-#define USB_PORT_STAT_OVERCURRENT 0x0008
-#define USB_PORT_STAT_RESET 0x0010
-#define USB_PORT_STAT_L1 0x0020
-/* bits 6 to 7 are reserved */
-#define USB_PORT_STAT_POWER 0x0100
-#define USB_PORT_STAT_LOW_SPEED 0x0200
-#define USB_PORT_STAT_HIGH_SPEED 0x0400
-#define USB_PORT_STAT_TEST 0x0800
-#define USB_PORT_STAT_INDICATOR 0x1000
-/* bits 13 to 15 are reserved */
-
-/*
- * wPortChange bit field
- * See USB 2.0 spec Table 11-22
- * Bits 0 to 4 shown, bits 5 to 15 are reserved
- */
-#define USB_PORT_STAT_C_CONNECTION 0x0001
-#define USB_PORT_STAT_C_ENABLE 0x0002
-#define USB_PORT_STAT_C_SUSPEND 0x0004
-#define USB_PORT_STAT_C_OVERCURRENT 0x0008
-#define USB_PORT_STAT_C_RESET 0x0010
-#define USB_PORT_STAT_C_L1 0x0020
-
-/*
- * wHubCharacteristics (masks)
- * See USB 2.0 spec Table 11-13, offset 3
- */
-#define HUB_CHAR_LPSM 0x0003 /* D1 .. D0 */
-#define HUB_CHAR_COMPOUND 0x0004 /* D2 */
-#define HUB_CHAR_OCPM 0x0018 /* D4 .. D3 */
-#define HUB_CHAR_TTTT 0x0060 /* D6 .. D5 */
-#define HUB_CHAR_PORTIND 0x0080 /* D7 */
-
-struct usb_hub_status {
- __le16 wHubStatus;
- __le16 wHubChange;
-} __attribute__ ((packed));
-
-/*
- * Hub Status & Hub Change bit masks
- * See USB 2.0 spec Table 11-19 and Table 11-20
- * Bits 0 and 1 for wHubStatus and wHubChange
- * Bits 2 to 15 are reserved for both
- */
-#define HUB_STATUS_LOCAL_POWER 0x0001
-#define HUB_STATUS_OVERCURRENT 0x0002
-#define HUB_CHANGE_LOCAL_POWER 0x0001
-#define HUB_CHANGE_OVERCURRENT 0x0002
-
-
-/*
- * Hub descriptor
- * See USB 2.0 spec Table 11-13
- */
-
-#define USB_DT_HUB (USB_TYPE_CLASS | 0x09)
-#define USB_DT_HUB_NONVAR_SIZE 7
-
-struct usb_hub_descriptor {
- __u8 bDescLength;
- __u8 bDescriptorType;
- __u8 bNbrPorts;
- __le16 wHubCharacteristics;
- __u8 bPwrOn2PwrGood;
- __u8 bHubContrCurrent;
- /* add 1 bit for hub status change; round to bytes */
- __u8 DeviceRemovable[(USB_MAXCHILDREN + 1 + 7) / 8];
- __u8 PortPwrCtrlMask[(USB_MAXCHILDREN + 1 + 7) / 8];
-} __attribute__ ((packed));
-
-
-/* port indicator status selectors, tables 11-7 and 11-25 */
-#define HUB_LED_AUTO 0
-#define HUB_LED_AMBER 1
-#define HUB_LED_GREEN 2
-#define HUB_LED_OFF 3
-
-enum hub_led_mode {
- INDICATOR_AUTO = 0,
- INDICATOR_CYCLE,
- /* software blinks for attention: software, hardware, reserved */
- INDICATOR_GREEN_BLINK, INDICATOR_GREEN_BLINK_OFF,
- INDICATOR_AMBER_BLINK, INDICATOR_AMBER_BLINK_OFF,
- INDICATOR_ALT_BLINK, INDICATOR_ALT_BLINK_OFF
-} __attribute__ ((packed));
-
-struct usb_device;
-
-/* Transaction Translator Think Times, in bits */
-#define HUB_TTTT_8_BITS 0x00
-#define HUB_TTTT_16_BITS 0x20
-#define HUB_TTTT_24_BITS 0x40
-#define HUB_TTTT_32_BITS 0x60
-
-/*
- * As of USB 2.0, full/low speed devices are segregated into trees.
- * One type grows from USB 1.1 host controllers (OHCI, UHCI etc).
- * The other type grows from high speed hubs when they connect to
- * full/low speed devices using "Transaction Translators" (TTs).
- *
- * TTs should only be known to the hub driver, and high speed bus
- * drivers (only EHCI for now). They affect periodic scheduling and
- * sometimes control/bulk error recovery.
- */
-struct usb_tt {
- struct usb_device *hub; /* upstream highspeed hub */
- int multi; /* true means one TT per port */
- unsigned think_time; /* think time in ns */
-
- /* for control/bulk error recovery (CLEAR_TT_BUFFER) */
- spinlock_t lock;
- struct list_head clear_list; /* of usb_tt_clear */
- struct work_struct clear_work;
-};
-
-struct usb_tt_clear {
- struct list_head clear_list;
- unsigned tt;
- u16 devinfo;
- struct usb_hcd *hcd;
- struct usb_host_endpoint *ep;
-};
-
-extern int usb_hub_clear_tt_buffer(struct urb *urb);
-extern void usb_ep0_reinit(struct usb_device *);
-
-#endif /* __LINUX_HUB_H */
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 111a01a..1a27618 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -40,9 +40,9 @@
#include <linux/notifier.h>
#include <linux/seq_file.h>
#include <linux/smp_lock.h>
+#include <linux/usb/hcd.h>
#include <asm/byteorder.h>
#include "usb.h"
-#include "hcd.h"
#define USBFS_DEFAULT_DEVMODE (S_IWUSR | S_IRUGO)
#define USBFS_DEFAULT_BUSMODE (S_IXUGO | S_IRUGO)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index cd22027..a73e08f 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -14,9 +14,9 @@
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/usb/quirks.h>
+#include <linux/usb/hcd.h> /* for usbcore internals */
#include <asm/byteorder.h>
-#include "hcd.h" /* for usbcore internals */
#include "usb.h"
static void cancel_async_set_config(struct usb_device *udev);
@@ -226,8 +226,7 @@ int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
struct urb *urb;
struct usb_host_endpoint *ep;
- ep = (usb_pipein(pipe) ? usb_dev->ep_in : usb_dev->ep_out)
- [usb_pipeendpoint(pipe)];
+ ep = usb_pipe_endpoint(usb_dev, pipe);
if (!ep || len < 0)
return -EINVAL;
@@ -259,9 +258,6 @@ static void sg_clean(struct usb_sg_request *io)
kfree(io->urbs);
io->urbs = NULL;
}
- if (io->dev->dev.dma_mask != NULL)
- usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
- io->sg, io->nents);
io->dev = NULL;
}
@@ -364,7 +360,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
{
int i;
int urb_flags;
- int dma;
int use_sg;
if (!io || !dev || !sg
@@ -376,114 +371,76 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
spin_lock_init(&io->lock);
io->dev = dev;
io->pipe = pipe;
- io->sg = sg;
- io->nents = nents;
-
- /* not all host controllers use DMA (like the mainstream pci ones);
- * they can use PIO (sl811) or be software over another transport.
- */
- dma = (dev->dev.dma_mask != NULL);
- if (dma)
- io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
- sg, nents);
- else
- io->entries = nents;
-
- /* initialize all the urbs we'll use */
- if (io->entries <= 0)
- return io->entries;
if (dev->bus->sg_tablesize > 0) {
- io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
use_sg = true;
+ io->entries = 1;
} else {
- io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
use_sg = false;
+ io->entries = nents;
}
+
+ /* initialize all the urbs we'll use */
+ io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
if (!io->urbs)
goto nomem;
- urb_flags = 0;
- if (dma)
- urb_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb_flags = URB_NO_INTERRUPT;
if (usb_pipein(pipe))
urb_flags |= URB_SHORT_NOT_OK;
- if (use_sg) {
- io->urbs[0] = usb_alloc_urb(0, mem_flags);
- if (!io->urbs[0]) {
- io->entries = 0;
- goto nomem;
- }
+ for_each_sg(sg, sg, io->entries, i) {
+ struct urb *urb;
+ unsigned len;
- io->urbs[0]->dev = NULL;
- io->urbs[0]->pipe = pipe;
- io->urbs[0]->interval = period;
- io->urbs[0]->transfer_flags = urb_flags;
-
- io->urbs[0]->complete = sg_complete;
- io->urbs[0]->context = io;
- /* A length of zero means transfer the whole sg list */
- io->urbs[0]->transfer_buffer_length = length;
- if (length == 0) {
- for_each_sg(sg, sg, io->entries, i) {
- io->urbs[0]->transfer_buffer_length +=
- sg_dma_len(sg);
- }
+ urb = usb_alloc_urb(0, mem_flags);
+ if (!urb) {
+ io->entries = i;
+ goto nomem;
}
- io->urbs[0]->sg = io;
- io->urbs[0]->num_sgs = io->entries;
- io->entries = 1;
- } else {
- urb_flags |= URB_NO_INTERRUPT;
- for_each_sg(sg, sg, io->entries, i) {
- unsigned len;
-
- io->urbs[i] = usb_alloc_urb(0, mem_flags);
- if (!io->urbs[i]) {
- io->entries = i;
- goto nomem;
+ io->urbs[i] = urb;
+
+ urb->dev = NULL;
+ urb->pipe = pipe;
+ urb->interval = period;
+ urb->transfer_flags = urb_flags;
+ urb->complete = sg_complete;
+ urb->context = io;
+ urb->sg = sg;
+
+ if (use_sg) {
+ /* There is no single transfer buffer */
+ urb->transfer_buffer = NULL;
+ urb->num_sgs = nents;
+
+ /* A length of zero means transfer the whole sg list */
+ len = length;
+ if (len == 0) {
+ for_each_sg(sg, sg, nents, i)
+ len += sg->length;
}
-
- io->urbs[i]->dev = NULL;
- io->urbs[i]->pipe = pipe;
- io->urbs[i]->interval = period;
- io->urbs[i]->transfer_flags = urb_flags;
-
- io->urbs[i]->complete = sg_complete;
- io->urbs[i]->context = io;
-
+ } else {
/*
- * Some systems need to revert to PIO when DMA is temporarily
- * unavailable. For their sakes, both transfer_buffer and
- * transfer_dma are set when possible.
- *
- * Note that if IOMMU coalescing occurred, we cannot
- * trust sg_page anymore, so check if S/G list shrunk.
+ * Some systems can't use DMA; they use PIO instead.
+ * For their sakes, transfer_buffer is set whenever
+ * possible.
*/
- if (io->nents == io->entries && !PageHighMem(sg_page(sg)))
- io->urbs[i]->transfer_buffer = sg_virt(sg);
+ if (!PageHighMem(sg_page(sg)))
+ urb->transfer_buffer = sg_virt(sg);
else
- io->urbs[i]->transfer_buffer = NULL;
-
- if (dma) {
- io->urbs[i]->transfer_dma = sg_dma_address(sg);
- len = sg_dma_len(sg);
- } else {
- /* hc may use _only_ transfer_buffer */
- len = sg->length;
- }
+ urb->transfer_buffer = NULL;
+ len = sg->length;
if (length) {
len = min_t(unsigned, len, length);
length -= len;
if (length == 0)
io->entries = i + 1;
}
- io->urbs[i]->transfer_buffer_length = len;
}
- io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
+ urb->transfer_buffer_length = len;
}
+ io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
/* transaction state */
io->count = io->entries;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f073c5c..f22d03d 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -71,6 +71,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* SKYMEDI USB_DRIVE */
{ USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* BUILDWIN Photo Frame */
+ { USB_DEVICE(0x1908, 0x1315), .driver_info =
+ USB_QUIRK_HONOR_BNUMINTERFACES },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 43c002e..448f5b4 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -383,13 +383,24 @@ static DEVICE_ATTR(autosuspend, S_IRUGO | S_IWUSR,
static const char on_string[] = "on";
static const char auto_string[] = "auto";
+static void warn_level(void) {
+ static int level_warned;
+
+ if (!level_warned) {
+ level_warned = 1;
+ printk(KERN_WARNING "WARNING! power/level is deprecated; "
+ "use power/control instead\n");
+ }
+}
+
static ssize_t
show_level(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
const char *p = auto_string;
- if (udev->state != USB_STATE_SUSPENDED && udev->autosuspend_disabled)
+ warn_level();
+ if (udev->state != USB_STATE_SUSPENDED && !udev->dev.power.runtime_auto)
p = on_string;
return sprintf(buf, "%s\n", p);
}
@@ -401,8 +412,9 @@ set_level(struct device *dev, struct device_attribute *attr,
struct usb_device *udev = to_usb_device(dev);
int len = count;
char *cp;
- int rc;
+ int rc = count;
+ warn_level();
cp = memchr(buf, '\n', count);
if (cp)
len = cp - buf;
@@ -411,17 +423,17 @@ set_level(struct device *dev, struct device_attribute *attr,
if (len == sizeof on_string - 1 &&
strncmp(buf, on_string, len) == 0)
- rc = usb_disable_autosuspend(udev);
+ usb_disable_autosuspend(udev);
else if (len == sizeof auto_string - 1 &&
strncmp(buf, auto_string, len) == 0)
- rc = usb_enable_autosuspend(udev);
+ usb_enable_autosuspend(udev);
else
rc = -EINVAL;
usb_unlock_device(udev);
- return (rc < 0 ? rc : count);
+ return rc;
}
static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level);
@@ -646,7 +658,8 @@ const struct attribute_group *usb_device_groups[] = {
/* Binary descriptors */
static ssize_t
-read_descriptors(struct kobject *kobj, struct bin_attribute *attr,
+read_descriptors(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 45a32da..7c05555 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -6,7 +6,7 @@
#include <linux/log2.h>
#include <linux/usb.h>
#include <linux/wait.h>
-#include "hcd.h"
+#include <linux/usb/hcd.h>
#define to_urb(d) container_of(d, struct urb, kref)
@@ -308,8 +308,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
* will be required to set urb->ep directly and we will eliminate
* urb->pipe.
*/
- ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
- [usb_pipeendpoint(urb->pipe)];
+ ep = usb_pipe_endpoint(dev, urb->pipe);
if (!ep)
return -ENOENT;
@@ -333,9 +332,12 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
is_out = usb_endpoint_dir_out(&ep->desc);
}
- /* Cache the direction for later use */
- urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) |
- (is_out ? URB_DIR_OUT : URB_DIR_IN);
+ /* Clear the internal flags and cache the direction for later use */
+ urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
+ URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
+ URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
+ URB_DMA_SG_COMBINED);
+ urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
dev->state < USB_STATE_CONFIGURED)
@@ -396,8 +398,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
return -EPIPE; /* The most suitable error code :-) */
/* enforce simple/standard policy */
- allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
- URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER);
+ allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
+ URB_FREE_BUFFER);
switch (xfertype) {
case USB_ENDPOINT_XFER_BULK:
if (is_out)
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0561430..5ae14f6 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -32,6 +32,7 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
@@ -41,7 +42,6 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
-#include "hcd.h"
#include "usb.h"
@@ -573,7 +573,7 @@ int usb_lock_device_for_reset(struct usb_device *udev,
iface->condition == USB_INTERFACE_UNBOUND))
return -EINTR;
- while (usb_trylock_device(udev) != 0) {
+ while (!usb_trylock_device(udev)) {
/* If we can't acquire the lock after waiting one second,
* we're probably deadlocked */
@@ -593,76 +593,6 @@ int usb_lock_device_for_reset(struct usb_device *udev,
}
EXPORT_SYMBOL_GPL(usb_lock_device_for_reset);
-static struct usb_device *match_device(struct usb_device *dev,
- u16 vendor_id, u16 product_id)
-{
- struct usb_device *ret_dev = NULL;
- int child;
-
- dev_dbg(&dev->dev, "check for vendor %04x, product %04x ...\n",
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
-
- /* see if this device matches */
- if ((vendor_id == le16_to_cpu(dev->descriptor.idVendor)) &&
- (product_id == le16_to_cpu(dev->descriptor.idProduct))) {
- dev_dbg(&dev->dev, "matched this device!\n");
- ret_dev = usb_get_dev(dev);
- goto exit;
- }
-
- /* look through all of the children of this device */
- for (child = 0; child < dev->maxchild; ++child) {
- if (dev->children[child]) {
- usb_lock_device(dev->children[child]);
- ret_dev = match_device(dev->children[child],
- vendor_id, product_id);
- usb_unlock_device(dev->children[child]);
- if (ret_dev)
- goto exit;
- }
- }
-exit:
- return ret_dev;
-}
-
-/**
- * usb_find_device - find a specific usb device in the system
- * @vendor_id: the vendor id of the device to find
- * @product_id: the product id of the device to find
- *
- * Returns a pointer to a struct usb_device if such a specified usb
- * device is present in the system currently. The usage count of the
- * device will be incremented if a device is found. Make sure to call
- * usb_put_dev() when the caller is finished with the device.
- *
- * If a device with the specified vendor and product id is not found,
- * NULL is returned.
- */
-struct usb_device *usb_find_device(u16 vendor_id, u16 product_id)
-{
- struct list_head *buslist;
- struct usb_bus *bus;
- struct usb_device *dev = NULL;
-
- mutex_lock(&usb_bus_list_lock);
- for (buslist = usb_bus_list.next;
- buslist != &usb_bus_list;
- buslist = buslist->next) {
- bus = container_of(buslist, struct usb_bus, bus_list);
- if (!bus->root_hub)
- continue;
- usb_lock_device(bus->root_hub);
- dev = match_device(bus->root_hub, vendor_id, product_id);
- usb_unlock_device(bus->root_hub);
- if (dev)
- goto exit;
- }
-exit:
- mutex_unlock(&usb_bus_list_lock);
- return dev;
-}
-
/**
* usb_get_current_frame_number - return current bus frame number
* @dev: the device whose bus is being queried
@@ -775,7 +705,7 @@ EXPORT_SYMBOL_GPL(usb_free_coherent);
* @urb: urb whose transfer_buffer/setup_packet will be mapped
*
* Return value is either null (indicating no buffer could be mapped), or
- * the parameter. URB_NO_TRANSFER_DMA_MAP and URB_NO_SETUP_DMA_MAP are
+ * the parameter. URB_NO_TRANSFER_DMA_MAP is
* added to urb->transfer_flags if the operation succeeds. If the device
* is connected to this system through a non-DMA controller, this operation
* always succeeds.
@@ -803,17 +733,11 @@ struct urb *usb_buffer_map(struct urb *urb)
urb->transfer_buffer, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (usb_pipecontrol(urb->pipe))
- urb->setup_dma = dma_map_single(controller,
- urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
/* FIXME generic api broken like pci, can't report errors */
/* if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; */
} else
urb->transfer_dma = ~0;
- urb->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP
- | URB_NO_SETUP_DMA_MAP);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
return urb;
}
EXPORT_SYMBOL_GPL(usb_buffer_map);
@@ -881,18 +805,13 @@ void usb_buffer_unmap(struct urb *urb)
urb->transfer_dma, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (usb_pipecontrol(urb->pipe))
- dma_unmap_single(controller,
- urb->setup_dma,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
}
- urb->transfer_flags &= ~(URB_NO_TRANSFER_DMA_MAP
- | URB_NO_SETUP_DMA_MAP);
+ urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap);
#endif /* 0 */
+#if 0
/**
* usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
* @dev: device to which the scatterlist will be mapped
@@ -936,6 +855,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
+#endif
/* XXX DISABLED, no users currently. If you wish to re-enable this
* XXX please determine whether the sync is to transfer ownership of
@@ -972,6 +892,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
#endif
+#if 0
/**
* usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
* @dev: device to which the scatterlist will be mapped
@@ -997,6 +918,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
+#endif
/* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */
#ifdef MODULE
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 11a3e0f..649c0c5 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -710,6 +710,43 @@ config USB_GADGETFS
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "gadgetfs".
+config USB_FUNCTIONFS
+ tristate "Function Filesystem (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ The Function Filesystem (FunctioFS) lets one create USB
+ composite functions in user space in the same way as GadgetFS
+ lets one create USB gadgets in user space. This allows creation
+ of composite gadgets such that some of the functions are
+ implemented in kernel space (for instance Ethernet, serial or
+ mass storage) and other are implemented in user space.
+
+ Say "y" to link the driver statically, or "m" to build
+ a dynamically linked module called "g_ffs".
+
+config USB_FUNCTIONFS_ETH
+ bool "Include CDC ECM (Ethernet) function"
+ depends on USB_FUNCTIONFS && NET
+ help
+ Include an CDC ECM (Ethernet) funcion in the CDC ECM (Funcion)
+ Filesystem. If you also say "y" to the RNDIS query below the
+ gadget will have two configurations.
+
+config USB_FUNCTIONFS_RNDIS
+ bool "Include RNDIS (Ethernet) function"
+ depends on USB_FUNCTIONFS && NET
+ help
+ Include an RNDIS (Ethernet) funcion in the Funcion Filesystem.
+ If you also say "y" to the CDC ECM query above the gadget will
+ have two configurations.
+
+config USB_FUNCTIONFS_GENERIC
+ bool "Include 'pure' configuration"
+ depends on USB_FUNCTIONFS && (USB_FUNCTIONFS_ETH || USB_FUNCTIONFS_RNDIS)
+ help
+ Include a configuration with FunctionFS and no Ethernet
+ configuration.
+
config USB_FILE_STORAGE
tristate "File-backed Storage Gadget"
depends on BLOCK
@@ -863,11 +900,30 @@ config USB_G_MULTI_CDC
If unsure, say "y".
+config USB_G_HID
+ tristate "HID Gadget"
+ help
+ The HID gadget driver provides generic emulation of USB
+ Human Interface Devices (HID).
+
+ For more information, see Documentation/usb/gadget_hid.txt which
+ includes sample code for accessing the device files.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "g_hid".
# put drivers that need isochronous transfer support (for audio
# or video class gadget drivers), or specific hardware, here.
+config USB_G_WEBCAM
+ tristate "USB Webcam Gadget"
+ depends on VIDEO_DEV
+ help
+ The Webcam Gadget acts as a composite USB Audio and Video Class
+ device. It provides a userspace API to process UVC control requests
+ and stream video data to the host.
-# - none yet
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "g_webcam".
endchoice
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 43b51da..9bcde11 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-objs := fsl_udc_core.o
ifeq ($(CONFIG_ARCH_MXC),y)
-fsl_usb2_udc-objs += fsl_mx3_udc.o
+fsl_usb2_udc-objs += fsl_mxc_udc.o
endif
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
@@ -43,18 +43,24 @@ g_mass_storage-objs := mass_storage.o
g_printer-objs := printer.o
g_cdc-objs := cdc2.o
g_multi-objs := multi.o
+g_hid-objs := hid.o
g_nokia-objs := nokia.o
+g_webcam-objs := webcam.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
obj-$(CONFIG_USB_AUDIO) += g_audio.o
obj-$(CONFIG_USB_ETH) += g_ether.o
obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o
+obj-$(CONFIG_USB_FUNCTIONFS) += g_ffs.o
+obj-$(CONFIG_USB_ETH_FUNCTIONFS) += g_eth_ffs.o
obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o
obj-$(CONFIG_USB_MASS_STORAGE) += g_mass_storage.o
obj-$(CONFIG_USB_G_SERIAL) += g_serial.o
obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
+obj-$(CONFIG_USB_G_HID) += g_hid.o
obj-$(CONFIG_USB_G_MULTI) += g_multi.o
obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
+obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 75a256f..d623c7b 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -48,10 +48,9 @@ static int queue_dbg_open(struct inode *inode, struct file *file)
spin_lock_irq(&ep->udc->lock);
list_for_each_entry(req, &ep->queue, queue) {
- req_copy = kmalloc(sizeof(*req_copy), GFP_ATOMIC);
+ req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
if (!req_copy)
goto fail;
- memcpy(req_copy, req, sizeof(*req_copy));
list_add_tail(&req_copy->queue, queue_data);
}
spin_unlock_irq(&ep->udc->lock);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 09289bb..391d169 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -36,7 +36,7 @@
*/
/* big enough to hold our biggest descriptor */
-#define USB_BUFSIZ 512
+#define USB_BUFSIZ 1024
static struct usb_composite_driver *composite;
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(iSerialNumber, "SerialNumber string");
* This function returns the value of the function's bind(), which is
* zero for success else a negative errno value.
*/
-int __init usb_add_function(struct usb_configuration *config,
+int usb_add_function(struct usb_configuration *config,
struct usb_function *function)
{
int value = -EINVAL;
@@ -215,7 +215,7 @@ int usb_function_activate(struct usb_function *function)
* Returns the interface ID which was allocated; or -ENODEV if no
* more interface IDs can be allocated.
*/
-int __init usb_interface_id(struct usb_configuration *config,
+int usb_interface_id(struct usb_configuration *config,
struct usb_function *function)
{
unsigned id = config->next_interface_id;
@@ -480,7 +480,7 @@ done:
* assigns global resources including string IDs, and per-configuration
* resources such as interface IDs and endpoints.
*/
-int __init usb_add_config(struct usb_composite_dev *cdev,
+int usb_add_config(struct usb_composite_dev *cdev,
struct usb_configuration *config)
{
int status = -EINVAL;
@@ -677,7 +677,7 @@ static int get_string(struct usb_composite_dev *cdev,
* ensure that for example different functions don't wrongly assign
* different meanings to the same identifier.
*/
-int __init usb_string_id(struct usb_composite_dev *cdev)
+int usb_string_id(struct usb_composite_dev *cdev)
{
if (cdev->next_string_id < 254) {
/* string id 0 is reserved */
@@ -898,7 +898,19 @@ static void composite_disconnect(struct usb_gadget *gadget)
/*-------------------------------------------------------------------------*/
-static void /* __init_or_exit */
+static ssize_t composite_show_suspended(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+
+ return sprintf(buf, "%d\n", cdev->suspended);
+}
+
+static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL);
+
+static void
composite_unbind(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
@@ -944,10 +956,11 @@ composite_unbind(struct usb_gadget *gadget)
}
kfree(cdev);
set_gadget_data(gadget, NULL);
+ device_remove_file(&gadget->dev, &dev_attr_suspended);
composite = NULL;
}
-static void __init
+static void
string_override_one(struct usb_gadget_strings *tab, u8 id, const char *s)
{
struct usb_string *str = tab->strings;
@@ -960,7 +973,7 @@ string_override_one(struct usb_gadget_strings *tab, u8 id, const char *s)
}
}
-static void __init
+static void
string_override(struct usb_gadget_strings **tab, u8 id, const char *s)
{
while (*tab) {
@@ -969,7 +982,7 @@ string_override(struct usb_gadget_strings **tab, u8 id, const char *s)
}
}
-static int __init composite_bind(struct usb_gadget *gadget)
+static int composite_bind(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
int status = -ENOMEM;
@@ -1004,6 +1017,14 @@ static int __init composite_bind(struct usb_gadget *gadget)
*/
usb_ep_autoconfig_reset(cdev->gadget);
+ /* standardized runtime overrides for device ID data */
+ if (idVendor)
+ cdev->desc.idVendor = cpu_to_le16(idVendor);
+ if (idProduct)
+ cdev->desc.idProduct = cpu_to_le16(idProduct);
+ if (bcdDevice)
+ cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
+
/* composite gadget needs to assign strings for whole device (like
* serial number), register function drivers, potentially update
* power state and consumption, etc
@@ -1015,14 +1036,6 @@ static int __init composite_bind(struct usb_gadget *gadget)
cdev->desc = *composite->dev;
cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
- /* standardized runtime overrides for device ID data */
- if (idVendor)
- cdev->desc.idVendor = cpu_to_le16(idVendor);
- if (idProduct)
- cdev->desc.idProduct = cpu_to_le16(idProduct);
- if (bcdDevice)
- cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
-
/* strings can't be assigned before bind() allocates the
* releavnt identifiers
*/
@@ -1036,6 +1049,10 @@ static int __init composite_bind(struct usb_gadget *gadget)
string_override(composite->strings,
cdev->desc.iSerialNumber, iSerialNumber);
+ status = device_create_file(&gadget->dev, &dev_attr_suspended);
+ if (status)
+ goto fail;
+
INFO(cdev, "%s ready\n", composite->name);
return 0;
@@ -1064,6 +1081,8 @@ composite_suspend(struct usb_gadget *gadget)
}
if (composite->suspend)
composite->suspend(cdev);
+
+ cdev->suspended = 1;
}
static void
@@ -1084,6 +1103,8 @@ composite_resume(struct usb_gadget *gadget)
f->resume(f);
}
}
+
+ cdev->suspended = 0;
}
/*-------------------------------------------------------------------------*/
@@ -1092,7 +1113,6 @@ static struct usb_gadget_driver composite_driver = {
.speed = USB_SPEED_HIGH,
.bind = composite_bind,
- /* .unbind = __exit_p(composite_unbind), */
.unbind = composite_unbind,
.setup = composite_setup,
@@ -1121,7 +1141,7 @@ static struct usb_gadget_driver composite_driver = {
* while it was binding. That would usually be done in order to wait for
* some userspace participation.
*/
-int __init usb_composite_register(struct usb_composite_driver *driver)
+int usb_composite_register(struct usb_composite_driver *driver)
{
if (!driver || !driver->dev || !driver->bind || composite)
return -EINVAL;
@@ -1142,7 +1162,7 @@ int __init usb_composite_register(struct usb_composite_driver *driver)
* This function is used to unregister drivers using the composite
* driver framework.
*/
-void /* __exit */ usb_composite_unregister(struct usb_composite_driver *driver)
+void usb_composite_unregister(struct usb_composite_driver *driver)
{
if (composite != driver)
return;
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 47e8e72..09084fd 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -128,7 +128,7 @@ int usb_gadget_config_buf(
* with identifiers (for interfaces, strings, endpoints, and more)
* as needed by a given function instance.
*/
-struct usb_descriptor_header **__init
+struct usb_descriptor_header **
usb_copy_descriptors(struct usb_descriptor_header **src)
{
struct usb_descriptor_header **tmp;
@@ -175,7 +175,7 @@ usb_copy_descriptors(struct usb_descriptor_header **src)
* intended use is to help remembering the endpoint descriptor to use
* when enabling a given endpoint.
*/
-struct usb_endpoint_descriptor *__init
+struct usb_endpoint_descriptor *
usb_find_endpoint(
struct usb_descriptor_header **src,
struct usb_descriptor_header **copy,
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 5e09664..4f9e578 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -47,6 +47,7 @@
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -55,9 +56,6 @@
#include <asm/unaligned.h>
-#include "../core/hcd.h"
-
-
#define DRIVER_DESC "USB Host+Gadget Emulator"
#define DRIVER_VERSION "02 May 2005"
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 3568de2..8a83248 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -34,12 +34,12 @@
/* we must assign addresses for configurable endpoints (like net2280) */
-static __initdata unsigned epnum;
+static unsigned epnum;
// #define MANY_ENDPOINTS
#ifdef MANY_ENDPOINTS
/* more than 15 configurable endpoints */
-static __initdata unsigned in_epnum;
+static unsigned in_epnum;
#endif
@@ -59,7 +59,7 @@ static __initdata unsigned in_epnum;
* NOTE: each endpoint is unidirectional, as specified by its USB
* descriptor; and isn't specific to a configuration or altsetting.
*/
-static int __init
+static int
ep_matches (
struct usb_gadget *gadget,
struct usb_ep *ep,
@@ -187,7 +187,7 @@ ep_matches (
return 1;
}
-static struct usb_ep * __init
+static struct usb_ep *
find_ep (struct usb_gadget *gadget, const char *name)
{
struct usb_ep *ep;
@@ -229,7 +229,7 @@ find_ep (struct usb_gadget *gadget, const char *name)
*
* On failure, this returns a null endpoint descriptor.
*/
-struct usb_ep * __init usb_ep_autoconfig (
+struct usb_ep *usb_ep_autoconfig (
struct usb_gadget *gadget,
struct usb_endpoint_descriptor *desc
)
@@ -304,7 +304,7 @@ struct usb_ep * __init usb_ep_autoconfig (
* state such as ep->driver_data and the record of assigned endpoints
* used by usb_ep_autoconfig().
*/
-void __init usb_ep_autoconfig_reset (struct usb_gadget *gadget)
+void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
{
struct usb_ep *ep;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 400e1eb..d47a123 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -116,7 +116,7 @@ acm_iad_descriptor = {
};
-static struct usb_interface_descriptor acm_control_interface_desc __initdata = {
+static struct usb_interface_descriptor acm_control_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
@@ -127,7 +127,7 @@ static struct usb_interface_descriptor acm_control_interface_desc __initdata = {
/* .iInterface = DYNAMIC */
};
-static struct usb_interface_descriptor acm_data_interface_desc __initdata = {
+static struct usb_interface_descriptor acm_data_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
@@ -138,7 +138,7 @@ static struct usb_interface_descriptor acm_data_interface_desc __initdata = {
/* .iInterface = DYNAMIC */
};
-static struct usb_cdc_header_desc acm_header_desc __initdata = {
+static struct usb_cdc_header_desc acm_header_desc = {
.bLength = sizeof(acm_header_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
@@ -146,7 +146,7 @@ static struct usb_cdc_header_desc acm_header_desc __initdata = {
};
static struct usb_cdc_call_mgmt_descriptor
-acm_call_mgmt_descriptor __initdata = {
+acm_call_mgmt_descriptor = {
.bLength = sizeof(acm_call_mgmt_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
@@ -154,14 +154,14 @@ acm_call_mgmt_descriptor __initdata = {
/* .bDataInterface = DYNAMIC */
};
-static struct usb_cdc_acm_descriptor acm_descriptor __initdata = {
+static struct usb_cdc_acm_descriptor acm_descriptor = {
.bLength = sizeof(acm_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
.bmCapabilities = USB_CDC_CAP_LINE,
};
-static struct usb_cdc_union_desc acm_union_desc __initdata = {
+static struct usb_cdc_union_desc acm_union_desc = {
.bLength = sizeof(acm_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
@@ -171,7 +171,7 @@ static struct usb_cdc_union_desc acm_union_desc __initdata = {
/* full speed support: */
-static struct usb_endpoint_descriptor acm_fs_notify_desc __initdata = {
+static struct usb_endpoint_descriptor acm_fs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -180,21 +180,21 @@ static struct usb_endpoint_descriptor acm_fs_notify_desc __initdata = {
.bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL,
};
-static struct usb_endpoint_descriptor acm_fs_in_desc __initdata = {
+static struct usb_endpoint_descriptor acm_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_endpoint_descriptor acm_fs_out_desc __initdata = {
+static struct usb_endpoint_descriptor acm_fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_descriptor_header *acm_fs_function[] __initdata = {
+static struct usb_descriptor_header *acm_fs_function[] = {
(struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
@@ -210,7 +210,7 @@ static struct usb_descriptor_header *acm_fs_function[] __initdata = {
/* high speed support: */
-static struct usb_endpoint_descriptor acm_hs_notify_desc __initdata = {
+static struct usb_endpoint_descriptor acm_hs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -219,21 +219,21 @@ static struct usb_endpoint_descriptor acm_hs_notify_desc __initdata = {
.bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
};
-static struct usb_endpoint_descriptor acm_hs_in_desc __initdata = {
+static struct usb_endpoint_descriptor acm_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_endpoint_descriptor acm_hs_out_desc __initdata = {
+static struct usb_endpoint_descriptor acm_hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_descriptor_header *acm_hs_function[] __initdata = {
+static struct usb_descriptor_header *acm_hs_function[] = {
(struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
@@ -571,7 +571,7 @@ static int acm_send_break(struct gserial *port, int duration)
/*-------------------------------------------------------------------------*/
/* ACM function driver setup/binding */
-static int __init
+static int
acm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
@@ -719,7 +719,7 @@ static inline bool can_support_cdc(struct usb_configuration *c)
* handle all the ones it binds. Caller is also responsible
* for calling @gserial_cleanup() before module unload.
*/
-int __init acm_bind_config(struct usb_configuration *c, u8 port_num)
+int acm_bind_config(struct usb_configuration *c, u8 port_num)
{
struct f_acm *acm;
int status;
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 4e59532..544257a8 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -113,7 +113,7 @@ static inline unsigned ecm_bitrate(struct usb_gadget *g)
/* interface descriptor: */
-static struct usb_interface_descriptor ecm_control_intf __initdata = {
+static struct usb_interface_descriptor ecm_control_intf = {
.bLength = sizeof ecm_control_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -126,7 +126,7 @@ static struct usb_interface_descriptor ecm_control_intf __initdata = {
/* .iInterface = DYNAMIC */
};
-static struct usb_cdc_header_desc ecm_header_desc __initdata = {
+static struct usb_cdc_header_desc ecm_header_desc = {
.bLength = sizeof ecm_header_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
@@ -134,7 +134,7 @@ static struct usb_cdc_header_desc ecm_header_desc __initdata = {
.bcdCDC = cpu_to_le16(0x0110),
};
-static struct usb_cdc_union_desc ecm_union_desc __initdata = {
+static struct usb_cdc_union_desc ecm_union_desc = {
.bLength = sizeof(ecm_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
@@ -142,7 +142,7 @@ static struct usb_cdc_union_desc ecm_union_desc __initdata = {
/* .bSlaveInterface0 = DYNAMIC */
};
-static struct usb_cdc_ether_desc ecm_desc __initdata = {
+static struct usb_cdc_ether_desc ecm_desc = {
.bLength = sizeof ecm_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
@@ -157,7 +157,7 @@ static struct usb_cdc_ether_desc ecm_desc __initdata = {
/* the default data interface has no endpoints ... */
-static struct usb_interface_descriptor ecm_data_nop_intf __initdata = {
+static struct usb_interface_descriptor ecm_data_nop_intf = {
.bLength = sizeof ecm_data_nop_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -172,7 +172,7 @@ static struct usb_interface_descriptor ecm_data_nop_intf __initdata = {
/* ... but the "real" data interface has two bulk endpoints */
-static struct usb_interface_descriptor ecm_data_intf __initdata = {
+static struct usb_interface_descriptor ecm_data_intf = {
.bLength = sizeof ecm_data_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -187,7 +187,7 @@ static struct usb_interface_descriptor ecm_data_intf __initdata = {
/* full speed support: */
-static struct usb_endpoint_descriptor fs_ecm_notify_desc __initdata = {
+static struct usb_endpoint_descriptor fs_ecm_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -197,7 +197,7 @@ static struct usb_endpoint_descriptor fs_ecm_notify_desc __initdata = {
.bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
};
-static struct usb_endpoint_descriptor fs_ecm_in_desc __initdata = {
+static struct usb_endpoint_descriptor fs_ecm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -205,7 +205,7 @@ static struct usb_endpoint_descriptor fs_ecm_in_desc __initdata = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_endpoint_descriptor fs_ecm_out_desc __initdata = {
+static struct usb_endpoint_descriptor fs_ecm_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -213,7 +213,7 @@ static struct usb_endpoint_descriptor fs_ecm_out_desc __initdata = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_descriptor_header *ecm_fs_function[] __initdata = {
+static struct usb_descriptor_header *ecm_fs_function[] = {
/* CDC ECM control descriptors */
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
@@ -231,7 +231,7 @@ static struct usb_descriptor_header *ecm_fs_function[] __initdata = {
/* high speed support: */
-static struct usb_endpoint_descriptor hs_ecm_notify_desc __initdata = {
+static struct usb_endpoint_descriptor hs_ecm_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -240,7 +240,7 @@ static struct usb_endpoint_descriptor hs_ecm_notify_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT),
.bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
};
-static struct usb_endpoint_descriptor hs_ecm_in_desc __initdata = {
+static struct usb_endpoint_descriptor hs_ecm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -249,7 +249,7 @@ static struct usb_endpoint_descriptor hs_ecm_in_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_endpoint_descriptor hs_ecm_out_desc __initdata = {
+static struct usb_endpoint_descriptor hs_ecm_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -258,7 +258,7 @@ static struct usb_endpoint_descriptor hs_ecm_out_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_descriptor_header *ecm_hs_function[] __initdata = {
+static struct usb_descriptor_header *ecm_hs_function[] = {
/* CDC ECM control descriptors */
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
@@ -597,7 +597,7 @@ static void ecm_close(struct gether *geth)
/* ethernet function driver setup/binding */
-static int __init
+static int
ecm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
@@ -763,7 +763,8 @@ ecm_unbind(struct usb_configuration *c, struct usb_function *f)
* Caller must have called @gether_setup(). Caller is also responsible
* for calling @gether_cleanup() before module unload.
*/
-int __init ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+int
+ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
{
struct f_ecm *ecm;
int status;
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
new file mode 100644
index 0000000..d69eccf
--- /dev/null
+++ b/drivers/usb/gadget/f_fs.c
@@ -0,0 +1,2442 @@
+/*
+ * f_fs.c -- user mode filesystem api for usb composite funtcion controllers
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
+ *
+ * Based on inode.c (GadgetFS):
+ * Copyright (C) 2003-2004 David Brownell
+ * Copyright (C) 2003 Agilent Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/blkdev.h>
+#include <linux/pagemap.h>
+#include <asm/unaligned.h>
+#include <linux/smp_lock.h>
+
+#include <linux/usb/composite.h>
+#include <linux/usb/functionfs.h>
+
+
+#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+
+
+/* Debuging *****************************************************************/
+
+#define ffs_printk(level, fmt, args...) printk(level "f_fs: " fmt "\n", ## args)
+
+#define FERR(...) ffs_printk(KERN_ERR, __VA_ARGS__)
+#define FINFO(...) ffs_printk(KERN_INFO, __VA_ARGS__)
+
+#ifdef DEBUG
+# define FDBG(...) ffs_printk(KERN_DEBUG, __VA_ARGS__)
+#else
+# define FDBG(...) do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+# define FVDBG FDBG
+#else
+# define FVDBG(...) do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define ENTER() FVDBG("%s()", __func__)
+
+#ifdef VERBOSE_DEBUG
+# define ffs_dump_mem(prefix, ptr, len) \
+ print_hex_dump_bytes("f_fs" prefix ": ", DUMP_PREFIX_NONE, ptr, len)
+#else
+# define ffs_dump_mem(prefix, ptr, len) do { } while (0)
+#endif
+
+
+/* The data structure and setup file ****************************************/
+
+enum ffs_state {
+ /* Waiting for descriptors and strings. */
+ /* In this state no open(2), read(2) or write(2) on epfiles
+ * may succeed (which should not be the problem as there
+ * should be no such files opened in the firts place). */
+ FFS_READ_DESCRIPTORS,
+ FFS_READ_STRINGS,
+
+ /* We've got descriptors and strings. We are or have called
+ * functionfs_ready_callback(). functionfs_bind() may have
+ * been called but we don't know. */
+ /* This is the only state in which operations on epfiles may
+ * succeed. */
+ FFS_ACTIVE,
+
+ /* All endpoints have been closed. This state is also set if
+ * we encounter an unrecoverable error. The only
+ * unrecoverable error is situation when after reading strings
+ * from user space we fail to initialise EP files or
+ * functionfs_ready_callback() returns with error (<0). */
+ /* In this state no open(2), read(2) or write(2) (both on ep0
+ * as well as epfile) may succeed (at this point epfiles are
+ * unlinked and all closed so this is not a problem; ep0 is
+ * also closed but ep0 file exists and so open(2) on ep0 must
+ * fail). */
+ FFS_CLOSING
+};
+
+
+enum ffs_setup_state {
+ /* There is no setup request pending. */
+ FFS_NO_SETUP,
+ /* User has read events and there was a setup request event
+ * there. The next read/write on ep0 will handle the
+ * request. */
+ FFS_SETUP_PENDING,
+ /* There was event pending but before user space handled it
+ * some other event was introduced which canceled existing
+ * setup. If this state is set read/write on ep0 return
+ * -EIDRM. This state is only set when adding event. */
+ FFS_SETUP_CANCELED
+};
+
+
+
+struct ffs_epfile;
+struct ffs_function;
+
+struct ffs_data {
+ struct usb_gadget *gadget;
+
+ /* Protect access read/write operations, only one read/write
+ * at a time. As a consequence protects ep0req and company.
+ * While setup request is being processed (queued) this is
+ * held. */
+ struct mutex mutex;
+
+ /* Protect access to enpoint related structures (basically
+ * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for
+ * endpint zero. */
+ spinlock_t eps_lock;
+
+ /* XXX REVISIT do we need our own request? Since we are not
+ * handling setup requests immidiatelly user space may be so
+ * slow that another setup will be sent to the gadget but this
+ * time not to us but another function and then there could be
+ * a race. Is taht the case? Or maybe we can use cdev->req
+ * after all, maybe we just need some spinlock for that? */
+ struct usb_request *ep0req; /* P: mutex */
+ struct completion ep0req_completion; /* P: mutex */
+ int ep0req_status; /* P: mutex */
+
+ /* reference counter */
+ atomic_t ref;
+ /* how many files are opened (EP0 and others) */
+ atomic_t opened;
+
+ /* EP0 state */
+ enum ffs_state state;
+
+ /*
+ * Possible transations:
+ * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
+ * happens only in ep0 read which is P: mutex
+ * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
+ * happens only in ep0 i/o which is P: mutex
+ * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELED -- P: ev.waitq.lock
+ * + FFS_SETUP_CANCELED -> FFS_NO_SETUP -- cmpxchg
+ */
+ enum ffs_setup_state setup_state;
+
+#define FFS_SETUP_STATE(ffs) \
+ ((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state, \
+ FFS_SETUP_CANCELED, FFS_NO_SETUP))
+
+ /* Events & such. */
+ struct {
+ u8 types[4];
+ unsigned short count;
+ /* XXX REVISIT need to update it in some places, or do we? */
+ unsigned short can_stall;
+ struct usb_ctrlrequest setup;
+
+ wait_queue_head_t waitq;
+ } ev; /* the whole structure, P: ev.waitq.lock */
+
+ /* Flags */
+ unsigned long flags;
+#define FFS_FL_CALL_CLOSED_CALLBACK 0
+#define FFS_FL_BOUND 1
+
+ /* Active function */
+ struct ffs_function *func;
+
+ /* Device name, write once when file system is mounted.
+ * Intendet for user to read if she wants. */
+ const char *dev_name;
+ /* Private data for our user (ie. gadget). Managed by
+ * user. */
+ void *private_data;
+
+ /* filled by __ffs_data_got_descs() */
+ /* real descriptors are 16 bytes after raw_descs (so you need
+ * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
+ * first full speed descriptor). raw_descs_length and
+ * raw_fs_descs_length do not have those 16 bytes added. */
+ const void *raw_descs;
+ unsigned raw_descs_length;
+ unsigned raw_fs_descs_length;
+ unsigned fs_descs_count;
+ unsigned hs_descs_count;
+
+ unsigned short strings_count;
+ unsigned short interfaces_count;
+ unsigned short eps_count;
+ unsigned short _pad1;
+
+ /* filled by __ffs_data_got_strings() */
+ /* ids in stringtabs are set in functionfs_bind() */
+ const void *raw_strings;
+ struct usb_gadget_strings **stringtabs;
+
+ /* File system's super block, write once when file system is mounted. */
+ struct super_block *sb;
+
+ /* File permissions, written once when fs is mounted*/
+ struct ffs_file_perms {
+ umode_t mode;
+ uid_t uid;
+ gid_t gid;
+ } file_perms;
+
+ /* The endpoint files, filled by ffs_epfiles_create(),
+ * destroyed by ffs_epfiles_destroy(). */
+ struct ffs_epfile *epfiles;
+};
+
+/* Reference counter handling */
+static void ffs_data_get(struct ffs_data *ffs);
+static void ffs_data_put(struct ffs_data *ffs);
+/* Creates new ffs_data object. */
+static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
+
+/* Opened counter handling. */
+static void ffs_data_opened(struct ffs_data *ffs);
+static void ffs_data_closed(struct ffs_data *ffs);
+
+/* Called with ffs->mutex held; take over ownerrship of data. */
+static int __must_check
+__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
+static int __must_check
+__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
+
+
+/* The function structure ***************************************************/
+
+struct ffs_ep;
+
+struct ffs_function {
+ struct usb_configuration *conf;
+ struct usb_gadget *gadget;
+ struct ffs_data *ffs;
+
+ struct ffs_ep *eps;
+ u8 eps_revmap[16];
+ short *interfaces_nums;
+
+ struct usb_function function;
+};
+
+
+static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
+{
+ return container_of(f, struct ffs_function, function);
+}
+
+static void ffs_func_free(struct ffs_function *func);
+
+
+static void ffs_func_eps_disable(struct ffs_function *func);
+static int __must_check ffs_func_eps_enable(struct ffs_function *func);
+
+
+static int ffs_func_bind(struct usb_configuration *,
+ struct usb_function *);
+static void ffs_func_unbind(struct usb_configuration *,
+ struct usb_function *);
+static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
+static void ffs_func_disable(struct usb_function *);
+static int ffs_func_setup(struct usb_function *,
+ const struct usb_ctrlrequest *);
+static void ffs_func_suspend(struct usb_function *);
+static void ffs_func_resume(struct usb_function *);
+
+
+static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
+static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
+
+
+
+/* The endpoints structures *************************************************/
+
+struct ffs_ep {
+ struct usb_ep *ep; /* P: ffs->eps_lock */
+ struct usb_request *req; /* P: epfile->mutex */
+
+ /* [0]: full speed, [1]: high speed */
+ struct usb_endpoint_descriptor *descs[2];
+
+ u8 num;
+
+ int status; /* P: epfile->mutex */
+};
+
+struct ffs_epfile {
+ /* Protects ep->ep and ep->req. */
+ struct mutex mutex;
+ wait_queue_head_t wait;
+
+ struct ffs_data *ffs;
+ struct ffs_ep *ep; /* P: ffs->eps_lock */
+
+ struct dentry *dentry;
+
+ char name[5];
+
+ unsigned char in; /* P: ffs->eps_lock */
+ unsigned char isoc; /* P: ffs->eps_lock */
+
+ unsigned char _pad;
+};
+
+
+static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
+static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
+
+static struct inode *__must_check
+ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
+ const struct file_operations *fops,
+ struct dentry **dentry_p);
+
+
+/* Misc helper functions ****************************************************/
+
+static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
+ __attribute__((warn_unused_result, nonnull));
+static char *ffs_prepare_buffer(const char * __user buf, size_t len)
+ __attribute__((warn_unused_result, nonnull));
+
+
+/* Control file aka ep0 *****************************************************/
+
+static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ffs_data *ffs = req->context;
+
+ complete_all(&ffs->ep0req_completion);
+}
+
+
+static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
+{
+ struct usb_request *req = ffs->ep0req;
+ int ret;
+
+ req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+
+ req->buf = data;
+ req->length = len;
+
+ INIT_COMPLETION(ffs->ep0req_completion);
+
+ ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
+ if (unlikely(ret)) {
+ usb_ep_dequeue(ffs->gadget->ep0, req);
+ return -EINTR;
+ }
+
+ ffs->setup_state = FFS_NO_SETUP;
+ return ffs->ep0req_status;
+}
+
+static int __ffs_ep0_stall(struct ffs_data *ffs)
+{
+ if (ffs->ev.can_stall) {
+ FVDBG("ep0 stall\n");
+ usb_ep_set_halt(ffs->gadget->ep0);
+ ffs->setup_state = FFS_NO_SETUP;
+ return -EL2HLT;
+ } else {
+ FDBG("bogus ep0 stall!\n");
+ return -ESRCH;
+ }
+}
+
+
+static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ptr)
+{
+ struct ffs_data *ffs = file->private_data;
+ ssize_t ret;
+ char *data;
+
+ ENTER();
+
+ /* Fast check if setup was canceled */
+ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+ return -EIDRM;
+
+ /* Acquire mutex */
+ ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+ if (unlikely(ret < 0))
+ return ret;
+
+
+ /* Check state */
+ switch (ffs->state) {
+ case FFS_READ_DESCRIPTORS:
+ case FFS_READ_STRINGS:
+ /* Copy data */
+ if (unlikely(len < 16)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ data = ffs_prepare_buffer(buf, len);
+ if (unlikely(IS_ERR(data))) {
+ ret = PTR_ERR(data);
+ break;
+ }
+
+ /* Handle data */
+ if (ffs->state == FFS_READ_DESCRIPTORS) {
+ FINFO("read descriptors");
+ ret = __ffs_data_got_descs(ffs, data, len);
+ if (unlikely(ret < 0))
+ break;
+
+ ffs->state = FFS_READ_STRINGS;
+ ret = len;
+ } else {
+ FINFO("read strings");
+ ret = __ffs_data_got_strings(ffs, data, len);
+ if (unlikely(ret < 0))
+ break;
+
+ ret = ffs_epfiles_create(ffs);
+ if (unlikely(ret)) {
+ ffs->state = FFS_CLOSING;
+ break;
+ }
+
+ ffs->state = FFS_ACTIVE;
+ mutex_unlock(&ffs->mutex);
+
+ ret = functionfs_ready_callback(ffs);
+ if (unlikely(ret < 0)) {
+ ffs->state = FFS_CLOSING;
+ return ret;
+ }
+
+ set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
+ return len;
+ }
+ break;
+
+
+ case FFS_ACTIVE:
+ data = NULL;
+ /* We're called from user space, we can use _irq
+ * rather then _irqsave */
+ spin_lock_irq(&ffs->ev.waitq.lock);
+ switch (FFS_SETUP_STATE(ffs)) {
+ case FFS_SETUP_CANCELED:
+ ret = -EIDRM;
+ goto done_spin;
+
+ case FFS_NO_SETUP:
+ ret = -ESRCH;
+ goto done_spin;
+
+ case FFS_SETUP_PENDING:
+ break;
+ }
+
+ /* FFS_SETUP_PENDING */
+ if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+ ret = __ffs_ep0_stall(ffs);
+ break;
+ }
+
+ /* FFS_SETUP_PENDING and not stall */
+ len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+
+ data = ffs_prepare_buffer(buf, len);
+ if (unlikely(IS_ERR(data))) {
+ ret = PTR_ERR(data);
+ break;
+ }
+
+ spin_lock_irq(&ffs->ev.waitq.lock);
+
+ /* We are guaranteed to be still in FFS_ACTIVE state
+ * but the state of setup could have changed from
+ * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need
+ * to check for that. If that happened we copied data
+ * from user space in vain but it's unlikely. */
+ /* For sure we are not in FFS_NO_SETUP since this is
+ * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
+ * transition can be performed and it's protected by
+ * mutex. */
+
+ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+ ret = -EIDRM;
+done_spin:
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+ } else {
+ /* unlocks spinlock */
+ ret = __ffs_ep0_queue_wait(ffs, data, len);
+ }
+ kfree(data);
+ break;
+
+
+ default:
+ ret = -EBADFD;
+ break;
+ }
+
+
+ mutex_unlock(&ffs->mutex);
+ return ret;
+}
+
+
+
+static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
+ size_t n)
+{
+ /* We are holding ffs->ev.waitq.lock and ffs->mutex and we need
+ * to release them. */
+
+ struct usb_functionfs_event events[n];
+ unsigned i = 0;
+
+ memset(events, 0, sizeof events);
+
+ do {
+ events[i].type = ffs->ev.types[i];
+ if (events[i].type == FUNCTIONFS_SETUP) {
+ events[i].u.setup = ffs->ev.setup;
+ ffs->setup_state = FFS_SETUP_PENDING;
+ }
+ } while (++i < n);
+
+ if (n < ffs->ev.count) {
+ ffs->ev.count -= n;
+ memmove(ffs->ev.types, ffs->ev.types + n,
+ ffs->ev.count * sizeof *ffs->ev.types);
+ } else {
+ ffs->ev.count = 0;
+ }
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+ mutex_unlock(&ffs->mutex);
+
+ return unlikely(__copy_to_user(buf, events, sizeof events))
+ ? -EFAULT : sizeof events;
+}
+
+
+static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ptr)
+{
+ struct ffs_data *ffs = file->private_data;
+ char *data = NULL;
+ size_t n;
+ int ret;
+
+ ENTER();
+
+ /* Fast check if setup was canceled */
+ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+ return -EIDRM;
+
+ /* Acquire mutex */
+ ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+ if (unlikely(ret < 0))
+ return ret;
+
+
+ /* Check state */
+ if (ffs->state != FFS_ACTIVE) {
+ ret = -EBADFD;
+ goto done_mutex;
+ }
+
+
+ /* We're called from user space, we can use _irq rather then
+ * _irqsave */
+ spin_lock_irq(&ffs->ev.waitq.lock);
+
+ switch (FFS_SETUP_STATE(ffs)) {
+ case FFS_SETUP_CANCELED:
+ ret = -EIDRM;
+ break;
+
+ case FFS_NO_SETUP:
+ n = len / sizeof(struct usb_functionfs_event);
+ if (unlikely(!n)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (unlikely(wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq, ffs->ev.count))) {
+ ret = -EINTR;
+ break;
+ }
+
+ return __ffs_ep0_read_events(ffs, buf,
+ min(n, (size_t)ffs->ev.count));
+
+
+ case FFS_SETUP_PENDING:
+ if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+ ret = __ffs_ep0_stall(ffs);
+ goto done_mutex;
+ }
+
+ len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+
+ if (likely(len)) {
+ data = kmalloc(len, GFP_KERNEL);
+ if (unlikely(!data)) {
+ ret = -ENOMEM;
+ goto done_mutex;
+ }
+ }
+
+ spin_lock_irq(&ffs->ev.waitq.lock);
+
+ /* See ffs_ep0_write() */
+ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+ ret = -EIDRM;
+ break;
+ }
+
+ /* unlocks spinlock */
+ ret = __ffs_ep0_queue_wait(ffs, data, len);
+ if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
+ ret = -EFAULT;
+ goto done_mutex;
+
+ default:
+ ret = -EBADFD;
+ break;
+ }
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+done_mutex:
+ mutex_unlock(&ffs->mutex);
+ kfree(data);
+ return ret;
+}
+
+
+
+static int ffs_ep0_open(struct inode *inode, struct file *file)
+{
+ struct ffs_data *ffs = inode->i_private;
+
+ ENTER();
+
+ if (unlikely(ffs->state == FFS_CLOSING))
+ return -EBUSY;
+
+ file->private_data = ffs;
+ ffs_data_opened(ffs);
+
+ return 0;
+}
+
+
+static int ffs_ep0_release(struct inode *inode, struct file *file)
+{
+ struct ffs_data *ffs = file->private_data;
+
+ ENTER();
+
+ ffs_data_closed(ffs);
+
+ return 0;
+}
+
+
+static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
+{
+ struct ffs_data *ffs = file->private_data;
+ struct usb_gadget *gadget = ffs->gadget;
+ long ret;
+
+ ENTER();
+
+ if (code == FUNCTIONFS_INTERFACE_REVMAP) {
+ struct ffs_function *func = ffs->func;
+ ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
+ } else if (gadget->ops->ioctl) {
+ lock_kernel();
+ ret = gadget->ops->ioctl(gadget, code, value);
+ unlock_kernel();
+ } else {
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+
+static const struct file_operations ffs_ep0_operations = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+
+ .open = ffs_ep0_open,
+ .write = ffs_ep0_write,
+ .read = ffs_ep0_read,
+ .release = ffs_ep0_release,
+ .unlocked_ioctl = ffs_ep0_ioctl,
+};
+
+
+/* "Normal" endpoints operations ********************************************/
+
+
+static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
+{
+ ENTER();
+ if (likely(req->context)) {
+ struct ffs_ep *ep = _ep->driver_data;
+ ep->status = req->status ? req->status : req->actual;
+ complete(req->context);
+ }
+}
+
+
+static ssize_t ffs_epfile_io(struct file *file,
+ char __user *buf, size_t len, int read)
+{
+ struct ffs_epfile *epfile = file->private_data;
+ struct ffs_ep *ep;
+ char *data = NULL;
+ ssize_t ret;
+ int halt;
+
+ goto first_try;
+ do {
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ mutex_unlock(&epfile->mutex);
+
+first_try:
+ /* Are we still active? */
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ /* Wait for endpoint to be enabled */
+ ep = epfile->ep;
+ if (!ep) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto error;
+ }
+
+ if (unlikely(wait_event_interruptible
+ (epfile->wait, (ep = epfile->ep)))) {
+ ret = -EINTR;
+ goto error;
+ }
+ }
+
+ /* Do we halt? */
+ halt = !read == !epfile->in;
+ if (halt && epfile->isoc) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Allocate & copy */
+ if (!halt && !data) {
+ data = kzalloc(len, GFP_KERNEL);
+ if (unlikely(!data))
+ return -ENOMEM;
+
+ if (!read &&
+ unlikely(__copy_from_user(data, buf, len))) {
+ ret = -EFAULT;
+ goto error;
+ }
+ }
+
+ /* We will be using request */
+ ret = ffs_mutex_lock(&epfile->mutex,
+ file->f_flags & O_NONBLOCK);
+ if (unlikely(ret))
+ goto error;
+
+ /* We're called from user space, we can use _irq rather then
+ * _irqsave */
+ spin_lock_irq(&epfile->ffs->eps_lock);
+
+ /* While we were acquiring mutex endpoint got disabled
+ * or changed? */
+ } while (unlikely(epfile->ep != ep));
+
+ /* Halt */
+ if (unlikely(halt)) {
+ if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
+ usb_ep_set_halt(ep->ep);
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ ret = -EBADMSG;
+ } else {
+ /* Fire the request */
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ struct usb_request *req = ep->req;
+ req->context = &done;
+ req->complete = ffs_epfile_io_complete;
+ req->buf = data;
+ req->length = len;
+
+ ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+
+ if (unlikely(ret < 0)) {
+ /* nop */
+ } else if (unlikely(wait_for_completion_interruptible(&done))) {
+ ret = -EINTR;
+ usb_ep_dequeue(ep->ep, req);
+ } else {
+ ret = ep->status;
+ if (read && ret > 0 &&
+ unlikely(copy_to_user(buf, data, ret)))
+ ret = -EFAULT;
+ }
+ }
+
+ mutex_unlock(&epfile->mutex);
+error:
+ kfree(data);
+ return ret;
+}
+
+
+static ssize_t
+ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
+ loff_t *ptr)
+{
+ ENTER();
+
+ return ffs_epfile_io(file, (char __user *)buf, len, 0);
+}
+
+static ssize_t
+ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
+{
+ ENTER();
+
+ return ffs_epfile_io(file, buf, len, 1);
+}
+
+static int
+ffs_epfile_open(struct inode *inode, struct file *file)
+{
+ struct ffs_epfile *epfile = inode->i_private;
+
+ ENTER();
+
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+ return -ENODEV;
+
+ file->private_data = epfile;
+ ffs_data_opened(epfile->ffs);
+
+ return 0;
+}
+
+static int
+ffs_epfile_release(struct inode *inode, struct file *file)
+{
+ struct ffs_epfile *epfile = inode->i_private;
+
+ ENTER();
+
+ ffs_data_closed(epfile->ffs);
+
+ return 0;
+}
+
+
+static long ffs_epfile_ioctl(struct file *file, unsigned code,
+ unsigned long value)
+{
+ struct ffs_epfile *epfile = file->private_data;
+ int ret;
+
+ ENTER();
+
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+ return -ENODEV;
+
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ if (likely(epfile->ep)) {
+ switch (code) {
+ case FUNCTIONFS_FIFO_STATUS:
+ ret = usb_ep_fifo_status(epfile->ep->ep);
+ break;
+ case FUNCTIONFS_FIFO_FLUSH:
+ usb_ep_fifo_flush(epfile->ep->ep);
+ ret = 0;
+ break;
+ case FUNCTIONFS_CLEAR_HALT:
+ ret = usb_ep_clear_halt(epfile->ep->ep);
+ break;
+ case FUNCTIONFS_ENDPOINT_REVMAP:
+ ret = epfile->ep->num;
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+
+ return ret;
+}
+
+
+static const struct file_operations ffs_epfile_operations = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+
+ .open = ffs_epfile_open,
+ .write = ffs_epfile_write,
+ .read = ffs_epfile_read,
+ .release = ffs_epfile_release,
+ .unlocked_ioctl = ffs_epfile_ioctl,
+};
+
+
+
+/* File system and super block operations ***********************************/
+
+/*
+ * Mounting the filesystem creates a controller file, used first for
+ * function configuration then later for event monitoring.
+ */
+
+
+static struct inode *__must_check
+ffs_sb_make_inode(struct super_block *sb, void *data,
+ const struct file_operations *fops,
+ const struct inode_operations *iops,
+ struct ffs_file_perms *perms)
+{
+ struct inode *inode;
+
+ ENTER();
+
+ inode = new_inode(sb);
+
+ if (likely(inode)) {
+ struct timespec current_time = CURRENT_TIME;
+
+ inode->i_mode = perms->mode;
+ inode->i_uid = perms->uid;
+ inode->i_gid = perms->gid;
+ inode->i_atime = current_time;
+ inode->i_mtime = current_time;
+ inode->i_ctime = current_time;
+ inode->i_private = data;
+ if (fops)
+ inode->i_fop = fops;
+ if (iops)
+ inode->i_op = iops;
+ }
+
+ return inode;
+}
+
+
+/* Create "regular" file */
+
+static struct inode *ffs_sb_create_file(struct super_block *sb,
+ const char *name, void *data,
+ const struct file_operations *fops,
+ struct dentry **dentry_p)
+{
+ struct ffs_data *ffs = sb->s_fs_info;
+ struct dentry *dentry;
+ struct inode *inode;
+
+ ENTER();
+
+ dentry = d_alloc_name(sb->s_root, name);
+ if (unlikely(!dentry))
+ return NULL;
+
+ inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
+ if (unlikely(!inode)) {
+ dput(dentry);
+ return NULL;
+ }
+
+ d_add(dentry, inode);
+ if (dentry_p)
+ *dentry_p = dentry;
+
+ return inode;
+}
+
+
+/* Super block */
+
+static const struct super_operations ffs_sb_operations = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+};
+
+struct ffs_sb_fill_data {
+ struct ffs_file_perms perms;
+ umode_t root_mode;
+ const char *dev_name;
+};
+
+static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
+{
+ struct ffs_sb_fill_data *data = _data;
+ struct inode *inode;
+ struct dentry *d;
+ struct ffs_data *ffs;
+
+ ENTER();
+
+ /* Initialize data */
+ ffs = ffs_data_new();
+ if (unlikely(!ffs))
+ goto enomem0;
+
+ ffs->sb = sb;
+ ffs->dev_name = data->dev_name;
+ ffs->file_perms = data->perms;
+
+ sb->s_fs_info = ffs;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = FUNCTIONFS_MAGIC;
+ sb->s_op = &ffs_sb_operations;
+ sb->s_time_gran = 1;
+
+ /* Root inode */
+ data->perms.mode = data->root_mode;
+ inode = ffs_sb_make_inode(sb, NULL,
+ &simple_dir_operations,
+ &simple_dir_inode_operations,
+ &data->perms);
+ if (unlikely(!inode))
+ goto enomem1;
+ d = d_alloc_root(inode);
+ if (unlikely(!d))
+ goto enomem2;
+ sb->s_root = d;
+
+ /* EP0 file */
+ if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
+ &ffs_ep0_operations, NULL)))
+ goto enomem3;
+
+ return 0;
+
+enomem3:
+ dput(d);
+enomem2:
+ iput(inode);
+enomem1:
+ ffs_data_put(ffs);
+enomem0:
+ return -ENOMEM;
+}
+
+
+static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
+{
+ ENTER();
+
+ if (!opts || !*opts)
+ return 0;
+
+ for (;;) {
+ char *end, *eq, *comma;
+ unsigned long value;
+
+ /* Option limit */
+ comma = strchr(opts, ',');
+ if (comma)
+ *comma = 0;
+
+ /* Value limit */
+ eq = strchr(opts, '=');
+ if (unlikely(!eq)) {
+ FERR("'=' missing in %s", opts);
+ return -EINVAL;
+ }
+ *eq = 0;
+
+ /* Parse value */
+ value = simple_strtoul(eq + 1, &end, 0);
+ if (unlikely(*end != ',' && *end != 0)) {
+ FERR("%s: invalid value: %s", opts, eq + 1);
+ return -EINVAL;
+ }
+
+ /* Interpret option */
+ switch (eq - opts) {
+ case 5:
+ if (!memcmp(opts, "rmode", 5))
+ data->root_mode = (value & 0555) | S_IFDIR;
+ else if (!memcmp(opts, "fmode", 5))
+ data->perms.mode = (value & 0666) | S_IFREG;
+ else
+ goto invalid;
+ break;
+
+ case 4:
+ if (!memcmp(opts, "mode", 4)) {
+ data->root_mode = (value & 0555) | S_IFDIR;
+ data->perms.mode = (value & 0666) | S_IFREG;
+ } else {
+ goto invalid;
+ }
+ break;
+
+ case 3:
+ if (!memcmp(opts, "uid", 3))
+ data->perms.uid = value;
+ else if (!memcmp(opts, "gid", 3))
+ data->perms.gid = value;
+ else
+ goto invalid;
+ break;
+
+ default:
+invalid:
+ FERR("%s: invalid option", opts);
+ return -EINVAL;
+ }
+
+ /* Next iteration */
+ if (!comma)
+ break;
+ opts = comma + 1;
+ }
+
+ return 0;
+}
+
+
+/* "mount -t functionfs dev_name /dev/function" ends up here */
+
+static int
+ffs_fs_get_sb(struct file_system_type *t, int flags,
+ const char *dev_name, void *opts, struct vfsmount *mnt)
+{
+ struct ffs_sb_fill_data data = {
+ .perms = {
+ .mode = S_IFREG | 0600,
+ .uid = 0,
+ .gid = 0
+ },
+ .root_mode = S_IFDIR | 0500,
+ };
+ int ret;
+
+ ENTER();
+
+ ret = functionfs_check_dev_callback(dev_name);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ret = ffs_fs_parse_opts(&data, opts);
+ if (unlikely(ret < 0))
+ return ret;
+
+ data.dev_name = dev_name;
+ return get_sb_single(t, flags, &data, ffs_sb_fill, mnt);
+}
+
+static void
+ffs_fs_kill_sb(struct super_block *sb)
+{
+ void *ptr;
+
+ ENTER();
+
+ kill_litter_super(sb);
+ ptr = xchg(&sb->s_fs_info, NULL);
+ if (ptr)
+ ffs_data_put(ptr);
+}
+
+static struct file_system_type ffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "functionfs",
+ .get_sb = ffs_fs_get_sb,
+ .kill_sb = ffs_fs_kill_sb,
+};
+
+
+
+/* Driver's main init/cleanup functions *************************************/
+
+
+static int functionfs_init(void)
+{
+ int ret;
+
+ ENTER();
+
+ ret = register_filesystem(&ffs_fs_type);
+ if (likely(!ret))
+ FINFO("file system registered");
+ else
+ FERR("failed registering file system (%d)", ret);
+
+ return ret;
+}
+
+static void functionfs_cleanup(void)
+{
+ ENTER();
+
+ FINFO("unloading");
+ unregister_filesystem(&ffs_fs_type);
+}
+
+
+
+/* ffs_data and ffs_function construction and destruction code **************/
+
+static void ffs_data_clear(struct ffs_data *ffs);
+static void ffs_data_reset(struct ffs_data *ffs);
+
+
+static void ffs_data_get(struct ffs_data *ffs)
+{
+ ENTER();
+
+ atomic_inc(&ffs->ref);
+}
+
+static void ffs_data_opened(struct ffs_data *ffs)
+{
+ ENTER();
+
+ atomic_inc(&ffs->ref);
+ atomic_inc(&ffs->opened);
+}
+
+static void ffs_data_put(struct ffs_data *ffs)
+{
+ ENTER();
+
+ if (unlikely(atomic_dec_and_test(&ffs->ref))) {
+ FINFO("%s(): freeing", __func__);
+ ffs_data_clear(ffs);
+ BUG_ON(mutex_is_locked(&ffs->mutex) ||
+ spin_is_locked(&ffs->ev.waitq.lock) ||
+ waitqueue_active(&ffs->ev.waitq) ||
+ waitqueue_active(&ffs->ep0req_completion.wait));
+ kfree(ffs);
+ }
+}
+
+
+
+static void ffs_data_closed(struct ffs_data *ffs)
+{
+ ENTER();
+
+ if (atomic_dec_and_test(&ffs->opened)) {
+ ffs->state = FFS_CLOSING;
+ ffs_data_reset(ffs);
+ }
+
+ ffs_data_put(ffs);
+}
+
+
+static struct ffs_data *ffs_data_new(void)
+{
+ struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
+ if (unlikely(!ffs))
+ return 0;
+
+ ENTER();
+
+ atomic_set(&ffs->ref, 1);
+ atomic_set(&ffs->opened, 0);
+ ffs->state = FFS_READ_DESCRIPTORS;
+ mutex_init(&ffs->mutex);
+ spin_lock_init(&ffs->eps_lock);
+ init_waitqueue_head(&ffs->ev.waitq);
+ init_completion(&ffs->ep0req_completion);
+
+ /* XXX REVISIT need to update it in some places, or do we? */
+ ffs->ev.can_stall = 1;
+
+ return ffs;
+}
+
+
+static void ffs_data_clear(struct ffs_data *ffs)
+{
+ ENTER();
+
+ if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
+ functionfs_closed_callback(ffs);
+
+ BUG_ON(ffs->gadget);
+
+ if (ffs->epfiles)
+ ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+
+ kfree(ffs->raw_descs);
+ kfree(ffs->raw_strings);
+ kfree(ffs->stringtabs);
+}
+
+
+static void ffs_data_reset(struct ffs_data *ffs)
+{
+ ENTER();
+
+ ffs_data_clear(ffs);
+
+ ffs->epfiles = NULL;
+ ffs->raw_descs = NULL;
+ ffs->raw_strings = NULL;
+ ffs->stringtabs = NULL;
+
+ ffs->raw_descs_length = 0;
+ ffs->raw_fs_descs_length = 0;
+ ffs->fs_descs_count = 0;
+ ffs->hs_descs_count = 0;
+
+ ffs->strings_count = 0;
+ ffs->interfaces_count = 0;
+ ffs->eps_count = 0;
+
+ ffs->ev.count = 0;
+
+ ffs->state = FFS_READ_DESCRIPTORS;
+ ffs->setup_state = FFS_NO_SETUP;
+ ffs->flags = 0;
+}
+
+
+static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
+{
+ unsigned i, count;
+
+ ENTER();
+
+ if (WARN_ON(ffs->state != FFS_ACTIVE
+ || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
+ return -EBADFD;
+
+ ffs_data_get(ffs);
+
+ ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
+ if (unlikely(!ffs->ep0req))
+ return -ENOMEM;
+ ffs->ep0req->complete = ffs_ep0_complete;
+ ffs->ep0req->context = ffs;
+
+ /* Get strings identifiers */
+ for (count = ffs->strings_count, i = 0; i < count; ++i) {
+ struct usb_gadget_strings **lang;
+
+ int id = usb_string_id(cdev);
+ if (unlikely(id < 0)) {
+ usb_ep_free_request(cdev->gadget->ep0, ffs->ep0req);
+ ffs->ep0req = NULL;
+ return id;
+ }
+
+ lang = ffs->stringtabs;
+ do {
+ (*lang)->strings[i].id = id;
+ ++lang;
+ } while (*lang);
+ }
+
+ ffs->gadget = cdev->gadget;
+ return 0;
+}
+
+
+static void functionfs_unbind(struct ffs_data *ffs)
+{
+ ENTER();
+
+ if (!WARN_ON(!ffs->gadget)) {
+ usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
+ ffs->ep0req = NULL;
+ ffs->gadget = NULL;
+ ffs_data_put(ffs);
+ }
+}
+
+
+static int ffs_epfiles_create(struct ffs_data *ffs)
+{
+ struct ffs_epfile *epfile, *epfiles;
+ unsigned i, count;
+
+ ENTER();
+
+ count = ffs->eps_count;
+ epfiles = kzalloc(count * sizeof *epfiles, GFP_KERNEL);
+ if (!epfiles)
+ return -ENOMEM;
+
+ epfile = epfiles;
+ for (i = 1; i <= count; ++i, ++epfile) {
+ epfile->ffs = ffs;
+ mutex_init(&epfile->mutex);
+ init_waitqueue_head(&epfile->wait);
+ sprintf(epfiles->name, "ep%u", i);
+ if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile,
+ &ffs_epfile_operations,
+ &epfile->dentry))) {
+ ffs_epfiles_destroy(epfiles, i - 1);
+ return -ENOMEM;
+ }
+ }
+
+ ffs->epfiles = epfiles;
+ return 0;
+}
+
+
+static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
+{
+ struct ffs_epfile *epfile = epfiles;
+
+ ENTER();
+
+ for (; count; --count, ++epfile) {
+ BUG_ON(mutex_is_locked(&epfile->mutex) ||
+ waitqueue_active(&epfile->wait));
+ if (epfile->dentry) {
+ d_delete(epfile->dentry);
+ dput(epfile->dentry);
+ epfile->dentry = NULL;
+ }
+ }
+
+ kfree(epfiles);
+}
+
+
+static int functionfs_add(struct usb_composite_dev *cdev,
+ struct usb_configuration *c,
+ struct ffs_data *ffs)
+{
+ struct ffs_function *func;
+ int ret;
+
+ ENTER();
+
+ func = kzalloc(sizeof *func, GFP_KERNEL);
+ if (unlikely(!func))
+ return -ENOMEM;
+
+ func->function.name = "Function FS Gadget";
+ func->function.strings = ffs->stringtabs;
+
+ func->function.bind = ffs_func_bind;
+ func->function.unbind = ffs_func_unbind;
+ func->function.set_alt = ffs_func_set_alt;
+ /*func->function.get_alt = ffs_func_get_alt;*/
+ func->function.disable = ffs_func_disable;
+ func->function.setup = ffs_func_setup;
+ func->function.suspend = ffs_func_suspend;
+ func->function.resume = ffs_func_resume;
+
+ func->conf = c;
+ func->gadget = cdev->gadget;
+ func->ffs = ffs;
+ ffs_data_get(ffs);
+
+ ret = usb_add_function(c, &func->function);
+ if (unlikely(ret))
+ ffs_func_free(func);
+
+ return ret;
+}
+
+static void ffs_func_free(struct ffs_function *func)
+{
+ ENTER();
+
+ ffs_data_put(func->ffs);
+
+ kfree(func->eps);
+ /* eps and interfaces_nums are allocated in the same chunk so
+ * only one free is required. Descriptors are also allocated
+ * in the same chunk. */
+
+ kfree(func);
+}
+
+
+static void ffs_func_eps_disable(struct ffs_function *func)
+{
+ struct ffs_ep *ep = func->eps;
+ struct ffs_epfile *epfile = func->ffs->epfiles;
+ unsigned count = func->ffs->eps_count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ do {
+ /* pending requests get nuked */
+ if (likely(ep->ep))
+ usb_ep_disable(ep->ep);
+ epfile->ep = NULL;
+
+ ++ep;
+ ++epfile;
+ } while (--count);
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+}
+
+static int ffs_func_eps_enable(struct ffs_function *func)
+{
+ struct ffs_data *ffs = func->ffs;
+ struct ffs_ep *ep = func->eps;
+ struct ffs_epfile *epfile = ffs->epfiles;
+ unsigned count = ffs->eps_count;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ do {
+ struct usb_endpoint_descriptor *ds;
+ ds = ep->descs[ep->descs[1] ? 1 : 0];
+
+ ep->ep->driver_data = ep;
+ ret = usb_ep_enable(ep->ep, ds);
+ if (likely(!ret)) {
+ epfile->ep = ep;
+ epfile->in = usb_endpoint_dir_in(ds);
+ epfile->isoc = usb_endpoint_xfer_isoc(ds);
+ } else {
+ break;
+ }
+
+ wake_up(&epfile->wait);
+
+ ++ep;
+ ++epfile;
+ } while (--count);
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+
+ return ret;
+}
+
+
+/* Parsing and building descriptors and strings *****************************/
+
+
+/* This validates if data pointed by data is a valid USB descriptor as
+ * well as record how many interfaces, endpoints and strings are
+ * required by given configuration. Returns address afther the
+ * descriptor or NULL if data is invalid. */
+
+enum ffs_entity_type {
+ FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
+};
+
+typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
+ u8 *valuep,
+ struct usb_descriptor_header *desc,
+ void *priv);
+
+static int __must_check ffs_do_desc(char *data, unsigned len,
+ ffs_entity_callback entity, void *priv)
+{
+ struct usb_descriptor_header *_ds = (void *)data;
+ u8 length;
+ int ret;
+
+ ENTER();
+
+ /* At least two bytes are required: length and type */
+ if (len < 2) {
+ FVDBG("descriptor too short");
+ return -EINVAL;
+ }
+
+ /* If we have at least as many bytes as the descriptor takes? */
+ length = _ds->bLength;
+ if (len < length) {
+ FVDBG("descriptor longer then available data");
+ return -EINVAL;
+ }
+
+#define __entity_check_INTERFACE(val) 1
+#define __entity_check_STRING(val) (val)
+#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
+#define __entity(type, val) do { \
+ FVDBG("entity " #type "(%02x)", (val)); \
+ if (unlikely(!__entity_check_ ##type(val))) { \
+ FVDBG("invalid entity's value"); \
+ return -EINVAL; \
+ } \
+ ret = entity(FFS_ ##type, &val, _ds, priv); \
+ if (unlikely(ret < 0)) { \
+ FDBG("entity " #type "(%02x); ret = %d", \
+ (val), ret); \
+ return ret; \
+ } \
+ } while (0)
+
+ /* Parse descriptor depending on type. */
+ switch (_ds->bDescriptorType) {
+ case USB_DT_DEVICE:
+ case USB_DT_CONFIG:
+ case USB_DT_STRING:
+ case USB_DT_DEVICE_QUALIFIER:
+ /* function can't have any of those */
+ FVDBG("descriptor reserved for gadget: %d", _ds->bDescriptorType);
+ return -EINVAL;
+
+ case USB_DT_INTERFACE: {
+ struct usb_interface_descriptor *ds = (void *)_ds;
+ FVDBG("interface descriptor");
+ if (length != sizeof *ds)
+ goto inv_length;
+
+ __entity(INTERFACE, ds->bInterfaceNumber);
+ if (ds->iInterface)
+ __entity(STRING, ds->iInterface);
+ }
+ break;
+
+ case USB_DT_ENDPOINT: {
+ struct usb_endpoint_descriptor *ds = (void *)_ds;
+ FVDBG("endpoint descriptor");
+ if (length != USB_DT_ENDPOINT_SIZE &&
+ length != USB_DT_ENDPOINT_AUDIO_SIZE)
+ goto inv_length;
+ __entity(ENDPOINT, ds->bEndpointAddress);
+ }
+ break;
+
+ case USB_DT_OTG:
+ if (length != sizeof(struct usb_otg_descriptor))
+ goto inv_length;
+ break;
+
+ case USB_DT_INTERFACE_ASSOCIATION: {
+ struct usb_interface_assoc_descriptor *ds = (void *)_ds;
+ FVDBG("interface association descriptor");
+ if (length != sizeof *ds)
+ goto inv_length;
+ if (ds->iFunction)
+ __entity(STRING, ds->iFunction);
+ }
+ break;
+
+ case USB_DT_OTHER_SPEED_CONFIG:
+ case USB_DT_INTERFACE_POWER:
+ case USB_DT_DEBUG:
+ case USB_DT_SECURITY:
+ case USB_DT_CS_RADIO_CONTROL:
+ /* TODO */
+ FVDBG("unimplemented descriptor: %d", _ds->bDescriptorType);
+ return -EINVAL;
+
+ default:
+ /* We should never be here */
+ FVDBG("unknown descriptor: %d", _ds->bDescriptorType);
+ return -EINVAL;
+
+ inv_length:
+ FVDBG("invalid length: %d (descriptor %d)",
+ _ds->bLength, _ds->bDescriptorType);
+ return -EINVAL;
+ }
+
+#undef __entity
+#undef __entity_check_DESCRIPTOR
+#undef __entity_check_INTERFACE
+#undef __entity_check_STRING
+#undef __entity_check_ENDPOINT
+
+ return length;
+}
+
+
+static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
+ ffs_entity_callback entity, void *priv)
+{
+ const unsigned _len = len;
+ unsigned long num = 0;
+
+ ENTER();
+
+ for (;;) {
+ int ret;
+
+ if (num == count)
+ data = NULL;
+
+ /* Record "descriptor" entitny */
+ ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
+ if (unlikely(ret < 0)) {
+ FDBG("entity DESCRIPTOR(%02lx); ret = %d", num, ret);
+ return ret;
+ }
+
+ if (!data)
+ return _len - len;
+
+ ret = ffs_do_desc(data, len, entity, priv);
+ if (unlikely(ret < 0)) {
+ FDBG("%s returns %d", __func__, ret);
+ return ret;
+ }
+
+ len -= ret;
+ data += ret;
+ ++num;
+ }
+}
+
+
+static int __ffs_data_do_entity(enum ffs_entity_type type,
+ u8 *valuep, struct usb_descriptor_header *desc,
+ void *priv)
+{
+ struct ffs_data *ffs = priv;
+
+ ENTER();
+
+ switch (type) {
+ case FFS_DESCRIPTOR:
+ break;
+
+ case FFS_INTERFACE:
+ /* Interfaces are indexed from zero so if we
+ * encountered interface "n" then there are at least
+ * "n+1" interfaces. */
+ if (*valuep >= ffs->interfaces_count)
+ ffs->interfaces_count = *valuep + 1;
+ break;
+
+ case FFS_STRING:
+ /* Strings are indexed from 1 (0 is magic ;) reserved
+ * for languages list or some such) */
+ if (*valuep > ffs->strings_count)
+ ffs->strings_count = *valuep;
+ break;
+
+ case FFS_ENDPOINT:
+ /* Endpoints are indexed from 1 as well. */
+ if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
+ ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
+ break;
+ }
+
+ return 0;
+}
+
+
+static int __ffs_data_got_descs(struct ffs_data *ffs,
+ char *const _data, size_t len)
+{
+ unsigned fs_count, hs_count;
+ int fs_len, ret = -EINVAL;
+ char *data = _data;
+
+ ENTER();
+
+ if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_DESCRIPTORS_MAGIC ||
+ get_unaligned_le32(data + 4) != len))
+ goto error;
+ fs_count = get_unaligned_le32(data + 8);
+ hs_count = get_unaligned_le32(data + 12);
+
+ if (!fs_count && !hs_count)
+ goto einval;
+
+ data += 16;
+ len -= 16;
+
+ if (likely(fs_count)) {
+ fs_len = ffs_do_descs(fs_count, data, len,
+ __ffs_data_do_entity, ffs);
+ if (unlikely(fs_len < 0)) {
+ ret = fs_len;
+ goto error;
+ }
+
+ data += fs_len;
+ len -= fs_len;
+ } else {
+ fs_len = 0;
+ }
+
+ if (likely(hs_count)) {
+ ret = ffs_do_descs(hs_count, data, len,
+ __ffs_data_do_entity, ffs);
+ if (unlikely(ret < 0))
+ goto error;
+ } else {
+ ret = 0;
+ }
+
+ if (unlikely(len != ret))
+ goto einval;
+
+ ffs->raw_fs_descs_length = fs_len;
+ ffs->raw_descs_length = fs_len + ret;
+ ffs->raw_descs = _data;
+ ffs->fs_descs_count = fs_count;
+ ffs->hs_descs_count = hs_count;
+
+ return 0;
+
+einval:
+ ret = -EINVAL;
+error:
+ kfree(_data);
+ return ret;
+}
+
+
+
+static int __ffs_data_got_strings(struct ffs_data *ffs,
+ char *const _data, size_t len)
+{
+ u32 str_count, needed_count, lang_count;
+ struct usb_gadget_strings **stringtabs, *t;
+ struct usb_string *strings, *s;
+ const char *data = _data;
+
+ ENTER();
+
+ if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+ get_unaligned_le32(data + 4) != len))
+ goto error;
+ str_count = get_unaligned_le32(data + 8);
+ lang_count = get_unaligned_le32(data + 12);
+
+ /* if one is zero the other must be zero */
+ if (unlikely(!str_count != !lang_count))
+ goto error;
+
+ /* Do we have at least as many strings as descriptors need? */
+ needed_count = ffs->strings_count;
+ if (unlikely(str_count < needed_count))
+ goto error;
+
+ /* If we don't need any strings just return and free all
+ * memory */
+ if (!needed_count) {
+ kfree(_data);
+ return 0;
+ }
+
+ /* Allocate */
+ {
+ /* Allocate everything in one chunk so there's less
+ * maintanance. */
+ struct {
+ struct usb_gadget_strings *stringtabs[lang_count + 1];
+ struct usb_gadget_strings stringtab[lang_count];
+ struct usb_string strings[lang_count*(needed_count+1)];
+ } *d;
+ unsigned i = 0;
+
+ d = kmalloc(sizeof *d, GFP_KERNEL);
+ if (unlikely(!d)) {
+ kfree(_data);
+ return -ENOMEM;
+ }
+
+ stringtabs = d->stringtabs;
+ t = d->stringtab;
+ i = lang_count;
+ do {
+ *stringtabs++ = t++;
+ } while (--i);
+ *stringtabs = NULL;
+
+ stringtabs = d->stringtabs;
+ t = d->stringtab;
+ s = d->strings;
+ strings = s;
+ }
+
+ /* For each language */
+ data += 16;
+ len -= 16;
+
+ do { /* lang_count > 0 so we can use do-while */
+ unsigned needed = needed_count;
+
+ if (unlikely(len < 3))
+ goto error_free;
+ t->language = get_unaligned_le16(data);
+ t->strings = s;
+ ++t;
+
+ data += 2;
+ len -= 2;
+
+ /* For each string */
+ do { /* str_count > 0 so we can use do-while */
+ size_t length = strnlen(data, len);
+
+ if (unlikely(length == len))
+ goto error_free;
+
+ /* user may provide more strings then we need,
+ * if that's the case we simply ingore the
+ * rest */
+ if (likely(needed)) {
+ /* s->id will be set while adding
+ * function to configuration so for
+ * now just leave garbage here. */
+ s->s = data;
+ --needed;
+ ++s;
+ }
+
+ data += length + 1;
+ len -= length + 1;
+ } while (--str_count);
+
+ s->id = 0; /* terminator */
+ s->s = NULL;
+ ++s;
+
+ } while (--lang_count);
+
+ /* Some garbage left? */
+ if (unlikely(len))
+ goto error_free;
+
+ /* Done! */
+ ffs->stringtabs = stringtabs;
+ ffs->raw_strings = _data;
+
+ return 0;
+
+error_free:
+ kfree(stringtabs);
+error:
+ kfree(_data);
+ return -EINVAL;
+}
+
+
+
+
+/* Events handling and management *******************************************/
+
+static void __ffs_event_add(struct ffs_data *ffs,
+ enum usb_functionfs_event_type type)
+{
+ enum usb_functionfs_event_type rem_type1, rem_type2 = type;
+ int neg = 0;
+
+ /* Abort any unhandled setup */
+ /* We do not need to worry about some cmpxchg() changing value
+ * of ffs->setup_state without holding the lock because when
+ * state is FFS_SETUP_PENDING cmpxchg() in several places in
+ * the source does nothing. */
+ if (ffs->setup_state == FFS_SETUP_PENDING)
+ ffs->setup_state = FFS_SETUP_CANCELED;
+
+ switch (type) {
+ case FUNCTIONFS_RESUME:
+ rem_type2 = FUNCTIONFS_SUSPEND;
+ /* FALL THGOUTH */
+ case FUNCTIONFS_SUSPEND:
+ case FUNCTIONFS_SETUP:
+ rem_type1 = type;
+ /* discard all similar events */
+ break;
+
+ case FUNCTIONFS_BIND:
+ case FUNCTIONFS_UNBIND:
+ case FUNCTIONFS_DISABLE:
+ case FUNCTIONFS_ENABLE:
+ /* discard everything other then power management. */
+ rem_type1 = FUNCTIONFS_SUSPEND;
+ rem_type2 = FUNCTIONFS_RESUME;
+ neg = 1;
+ break;
+
+ default:
+ BUG();
+ }
+
+ {
+ u8 *ev = ffs->ev.types, *out = ev;
+ unsigned n = ffs->ev.count;
+ for (; n; --n, ++ev)
+ if ((*ev == rem_type1 || *ev == rem_type2) == neg)
+ *out++ = *ev;
+ else
+ FVDBG("purging event %d", *ev);
+ ffs->ev.count = out - ffs->ev.types;
+ }
+
+ FVDBG("adding event %d", type);
+ ffs->ev.types[ffs->ev.count++] = type;
+ wake_up_locked(&ffs->ev.waitq);
+}
+
+static void ffs_event_add(struct ffs_data *ffs,
+ enum usb_functionfs_event_type type)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
+ __ffs_event_add(ffs, type);
+ spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+}
+
+
+/* Bind/unbind USB function hooks *******************************************/
+
+static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
+ struct usb_descriptor_header *desc,
+ void *priv)
+{
+ struct usb_endpoint_descriptor *ds = (void *)desc;
+ struct ffs_function *func = priv;
+ struct ffs_ep *ffs_ep;
+
+ /* If hs_descriptors is not NULL then we are reading hs
+ * descriptors now */
+ const int isHS = func->function.hs_descriptors != NULL;
+ unsigned idx;
+
+ if (type != FFS_DESCRIPTOR)
+ return 0;
+
+ if (isHS)
+ func->function.hs_descriptors[(long)valuep] = desc;
+ else
+ func->function.descriptors[(long)valuep] = desc;
+
+ if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return 0;
+
+ idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
+ ffs_ep = func->eps + idx;
+
+ if (unlikely(ffs_ep->descs[isHS])) {
+ FVDBG("two %sspeed descriptors for EP %d",
+ isHS ? "high" : "full",
+ ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ return -EINVAL;
+ }
+ ffs_ep->descs[isHS] = ds;
+
+ ffs_dump_mem(": Original ep desc", ds, ds->bLength);
+ if (ffs_ep->ep) {
+ ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
+ if (!ds->wMaxPacketSize)
+ ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
+ } else {
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ FVDBG("autoconfig");
+ ep = usb_ep_autoconfig(func->gadget, ds);
+ if (unlikely(!ep))
+ return -ENOTSUPP;
+ ep->driver_data = func->eps + idx;;
+
+ req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (unlikely(!req))
+ return -ENOMEM;
+
+ ffs_ep->ep = ep;
+ ffs_ep->req = req;
+ func->eps_revmap[ds->bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK] = idx + 1;
+ }
+ ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
+
+ return 0;
+}
+
+
+static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
+ struct usb_descriptor_header *desc,
+ void *priv)
+{
+ struct ffs_function *func = priv;
+ unsigned idx;
+ u8 newValue;
+
+ switch (type) {
+ default:
+ case FFS_DESCRIPTOR:
+ /* Handled in previous pass by __ffs_func_bind_do_descs() */
+ return 0;
+
+ case FFS_INTERFACE:
+ idx = *valuep;
+ if (func->interfaces_nums[idx] < 0) {
+ int id = usb_interface_id(func->conf, &func->function);
+ if (unlikely(id < 0))
+ return id;
+ func->interfaces_nums[idx] = id;
+ }
+ newValue = func->interfaces_nums[idx];
+ break;
+
+ case FFS_STRING:
+ /* String' IDs are allocated when fsf_data is bound to cdev */
+ newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
+ break;
+
+ case FFS_ENDPOINT:
+ /* USB_DT_ENDPOINT are handled in
+ * __ffs_func_bind_do_descs(). */
+ if (desc->bDescriptorType == USB_DT_ENDPOINT)
+ return 0;
+
+ idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
+ if (unlikely(!func->eps[idx].ep))
+ return -EINVAL;
+
+ {
+ struct usb_endpoint_descriptor **descs;
+ descs = func->eps[idx].descs;
+ newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
+ }
+ break;
+ }
+
+ FVDBG("%02x -> %02x", *valuep, newValue);
+ *valuep = newValue;
+ return 0;
+}
+
+static int ffs_func_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
+
+ const int full = !!func->ffs->fs_descs_count;
+ const int high = gadget_is_dualspeed(func->gadget) &&
+ func->ffs->hs_descs_count;
+
+ int ret;
+
+ /* Make it a single chunk, less management later on */
+ struct {
+ struct ffs_ep eps[ffs->eps_count];
+ struct usb_descriptor_header
+ *fs_descs[full ? ffs->fs_descs_count + 1 : 0];
+ struct usb_descriptor_header
+ *hs_descs[high ? ffs->hs_descs_count + 1 : 0];
+ short inums[ffs->interfaces_count];
+ char raw_descs[high ? ffs->raw_descs_length
+ : ffs->raw_fs_descs_length];
+ } *data;
+
+ ENTER();
+
+ /* Only high speed but not supported by gadget? */
+ if (unlikely(!(full | high)))
+ return -ENOTSUPP;
+
+ /* Allocate */
+ data = kmalloc(sizeof *data, GFP_KERNEL);
+ if (unlikely(!data))
+ return -ENOMEM;
+
+ /* Zero */
+ memset(data->eps, 0, sizeof data->eps);
+ memcpy(data->raw_descs, ffs->raw_descs + 16, sizeof data->raw_descs);
+ memset(data->inums, 0xff, sizeof data->inums);
+ for (ret = ffs->eps_count; ret; --ret)
+ data->eps[ret].num = -1;
+
+ /* Save pointers */
+ func->eps = data->eps;
+ func->interfaces_nums = data->inums;
+
+ /* Go throught all the endpoint descriptors and allocate
+ * endpoints first, so that later we can rewrite the endpoint
+ * numbers without worying that it may be described later on. */
+ if (likely(full)) {
+ func->function.descriptors = data->fs_descs;
+ ret = ffs_do_descs(ffs->fs_descs_count,
+ data->raw_descs,
+ sizeof data->raw_descs,
+ __ffs_func_bind_do_descs, func);
+ if (unlikely(ret < 0))
+ goto error;
+ } else {
+ ret = 0;
+ }
+
+ if (likely(high)) {
+ func->function.hs_descriptors = data->hs_descs;
+ ret = ffs_do_descs(ffs->hs_descs_count,
+ data->raw_descs + ret,
+ (sizeof data->raw_descs) - ret,
+ __ffs_func_bind_do_descs, func);
+ }
+
+ /* Now handle interface numbers allocation and interface and
+ * enpoint numbers rewritting. We can do that in one go
+ * now. */
+ ret = ffs_do_descs(ffs->fs_descs_count +
+ (high ? ffs->hs_descs_count : 0),
+ data->raw_descs, sizeof data->raw_descs,
+ __ffs_func_bind_do_nums, func);
+ if (unlikely(ret < 0))
+ goto error;
+
+ /* And we're done */
+ ffs_event_add(ffs, FUNCTIONFS_BIND);
+ return 0;
+
+error:
+ /* XXX Do we need to release all claimed endpoints here? */
+ return ret;
+}
+
+
+/* Other USB function hooks *************************************************/
+
+static void ffs_func_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
+
+ ENTER();
+
+ if (ffs->func == func) {
+ ffs_func_eps_disable(func);
+ ffs->func = NULL;
+ }
+
+ ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+ ffs_func_free(func);
+}
+
+
+static int ffs_func_set_alt(struct usb_function *f,
+ unsigned interface, unsigned alt)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
+ int ret = 0, intf;
+
+ if (alt != (unsigned)-1) {
+ intf = ffs_func_revmap_intf(func, interface);
+ if (unlikely(intf < 0))
+ return intf;
+ }
+
+ if (ffs->func)
+ ffs_func_eps_disable(ffs->func);
+
+ if (ffs->state != FFS_ACTIVE)
+ return -ENODEV;
+
+ if (alt == (unsigned)-1) {
+ ffs->func = NULL;
+ ffs_event_add(ffs, FUNCTIONFS_DISABLE);
+ return 0;
+ }
+
+ ffs->func = func;
+ ret = ffs_func_eps_enable(func);
+ if (likely(ret >= 0))
+ ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+ return ret;
+}
+
+static void ffs_func_disable(struct usb_function *f)
+{
+ ffs_func_set_alt(f, 0, (unsigned)-1);
+}
+
+static int ffs_func_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *creq)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
+ unsigned long flags;
+ int ret;
+
+ ENTER();
+
+ FVDBG("creq->bRequestType = %02x", creq->bRequestType);
+ FVDBG("creq->bRequest = %02x", creq->bRequest);
+ FVDBG("creq->wValue = %04x", le16_to_cpu(creq->wValue));
+ FVDBG("creq->wIndex = %04x", le16_to_cpu(creq->wIndex));
+ FVDBG("creq->wLength = %04x", le16_to_cpu(creq->wLength));
+
+ /* Most requests directed to interface go throught here
+ * (notable exceptions are set/get interface) so we need to
+ * handle them. All other either handled by composite or
+ * passed to usb_configuration->setup() (if one is set). No
+ * matter, we will handle requests directed to endpoint here
+ * as well (as it's straightforward) but what to do with any
+ * other request? */
+
+ if (ffs->state != FFS_ACTIVE)
+ return -ENODEV;
+
+ switch (creq->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_INTERFACE:
+ ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
+ if (unlikely(ret < 0))
+ return ret;
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
+ if (unlikely(ret < 0))
+ return ret;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
+ ffs->ev.setup = *creq;
+ ffs->ev.setup.wIndex = cpu_to_le16(ret);
+ __ffs_event_add(ffs, FUNCTIONFS_SETUP);
+ spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+
+ return 0;
+}
+
+static void ffs_func_suspend(struct usb_function *f)
+{
+ ENTER();
+ ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
+}
+
+static void ffs_func_resume(struct usb_function *f)
+{
+ ENTER();
+ ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
+}
+
+
+
+/* Enpoint and interface numbers reverse mapping ****************************/
+
+static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
+{
+ num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
+ return num ? num : -EDOM;
+}
+
+static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
+{
+ short *nums = func->interfaces_nums;
+ unsigned count = func->ffs->interfaces_count;
+
+ for (; count; --count, ++nums) {
+ if (*nums >= 0 && *nums == intf)
+ return nums - func->interfaces_nums;
+ }
+
+ return -EDOM;
+}
+
+
+/* Misc helper functions ****************************************************/
+
+static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
+{
+ return nonblock
+ ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
+ : mutex_lock_interruptible(mutex);
+}
+
+
+static char *ffs_prepare_buffer(const char * __user buf, size_t len)
+{
+ char *data;
+
+ if (unlikely(!len))
+ return NULL;
+
+ data = kmalloc(len, GFP_KERNEL);
+ if (unlikely(!data))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(__copy_from_user(data, buf, len))) {
+ kfree(data);
+ return ERR_PTR(-EFAULT);
+ }
+
+ FVDBG("Buffer from user space:");
+ ffs_dump_mem("", data, len);
+
+ return data;
+}
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c
new file mode 100644
index 0000000..1e00ff9
--- /dev/null
+++ b/drivers/usb/gadget/f_hid.c
@@ -0,0 +1,673 @@
+/*
+ * f_hid.c -- USB HID function driver
+ *
+ * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/hid.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/smp_lock.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/usb/g_hid.h>
+
+static int major, minors;
+static struct class *hidg_class;
+
+/*-------------------------------------------------------------------------*/
+/* HID gadget struct */
+
+struct f_hidg {
+ /* configuration */
+ unsigned char bInterfaceSubClass;
+ unsigned char bInterfaceProtocol;
+ unsigned short report_desc_length;
+ char *report_desc;
+ unsigned short report_length;
+
+ /* recv report */
+ char *set_report_buff;
+ unsigned short set_report_length;
+ spinlock_t spinlock;
+ wait_queue_head_t read_queue;
+
+ /* send report */
+ struct mutex lock;
+ bool write_pending;
+ wait_queue_head_t write_queue;
+ struct usb_request *req;
+
+ int minor;
+ struct cdev cdev;
+ struct usb_function func;
+ struct usb_ep *in_ep;
+ struct usb_endpoint_descriptor *fs_in_ep_desc;
+ struct usb_endpoint_descriptor *hs_in_ep_desc;
+};
+
+static inline struct f_hidg *func_to_hidg(struct usb_function *f)
+{
+ return container_of(f, struct f_hidg, func);
+}
+
+/*-------------------------------------------------------------------------*/
+/* Static descriptors */
+
+static struct usb_interface_descriptor hidg_interface_desc = {
+ .bLength = sizeof hidg_interface_desc,
+ .bDescriptorType = USB_DT_INTERFACE,
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_HID,
+ /* .bInterfaceSubClass = DYNAMIC */
+ /* .bInterfaceProtocol = DYNAMIC */
+ /* .iInterface = DYNAMIC */
+};
+
+static struct hid_descriptor hidg_desc = {
+ .bLength = sizeof hidg_desc,
+ .bDescriptorType = HID_DT_HID,
+ .bcdHID = 0x0101,
+ .bCountryCode = 0x00,
+ .bNumDescriptors = 0x1,
+ /*.desc[0].bDescriptorType = DYNAMIC */
+ /*.desc[0].wDescriptorLenght = DYNAMIC */
+};
+
+/* High-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 4, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_descriptor_header *hidg_hs_descriptors[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
+ NULL,
+};
+
+/* Full-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 10, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_descriptor_header *hidg_fs_descriptors[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+/* Char Device */
+
+static ssize_t f_hidg_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ptr)
+{
+ struct f_hidg *hidg = (struct f_hidg *)file->private_data;
+ char *tmp_buff = NULL;
+ unsigned long flags;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE, buffer, count))
+ return -EFAULT;
+
+ spin_lock_irqsave(&hidg->spinlock, flags);
+
+#define READ_COND (hidg->set_report_buff != NULL)
+
+ while (!READ_COND) {
+ spin_unlock_irqrestore(&hidg->spinlock, flags);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(hidg->read_queue, READ_COND))
+ return -ERESTARTSYS;
+
+ spin_lock_irqsave(&hidg->spinlock, flags);
+ }
+
+
+ count = min_t(unsigned, count, hidg->set_report_length);
+ tmp_buff = hidg->set_report_buff;
+ hidg->set_report_buff = NULL;
+
+ spin_unlock_irqrestore(&hidg->spinlock, flags);
+
+ if (tmp_buff != NULL) {
+ /* copy to user outside spinlock */
+ count -= copy_to_user(buffer, tmp_buff, count);
+ kfree(tmp_buff);
+ } else
+ count = -ENOMEM;
+
+ return count;
+}
+
+static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
+
+ if (req->status != 0) {
+ ERROR(hidg->func.config->cdev,
+ "End Point Request ERROR: %d\n", req->status);
+ }
+
+ hidg->write_pending = 0;
+ wake_up(&hidg->write_queue);
+}
+
+static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *offp)
+{
+ struct f_hidg *hidg = (struct f_hidg *)file->private_data;
+ ssize_t status = -ENOMEM;
+
+ if (!access_ok(VERIFY_READ, buffer, count))
+ return -EFAULT;
+
+ mutex_lock(&hidg->lock);
+
+#define WRITE_COND (!hidg->write_pending)
+
+ /* write queue */
+ while (!WRITE_COND) {
+ mutex_unlock(&hidg->lock);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible_exclusive(
+ hidg->write_queue, WRITE_COND))
+ return -ERESTARTSYS;
+
+ mutex_lock(&hidg->lock);
+ }
+
+ count = min_t(unsigned, count, hidg->report_length);
+ status = copy_from_user(hidg->req->buf, buffer, count);
+
+ if (status != 0) {
+ ERROR(hidg->func.config->cdev,
+ "copy_from_user error\n");
+ mutex_unlock(&hidg->lock);
+ return -EINVAL;
+ }
+
+ hidg->req->status = 0;
+ hidg->req->zero = 0;
+ hidg->req->length = count;
+ hidg->req->complete = f_hidg_req_complete;
+ hidg->req->context = hidg;
+ hidg->write_pending = 1;
+
+ status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
+ if (status < 0) {
+ ERROR(hidg->func.config->cdev,
+ "usb_ep_queue error on int endpoint %zd\n", status);
+ hidg->write_pending = 0;
+ wake_up(&hidg->write_queue);
+ } else {
+ status = count;
+ }
+
+ mutex_unlock(&hidg->lock);
+
+ return status;
+}
+
+static unsigned int f_hidg_poll(struct file *file, poll_table *wait)
+{
+ struct f_hidg *hidg = (struct f_hidg *)file->private_data;
+ unsigned int ret = 0;
+
+ poll_wait(file, &hidg->read_queue, wait);
+ poll_wait(file, &hidg->write_queue, wait);
+
+ if (WRITE_COND)
+ ret |= POLLOUT | POLLWRNORM;
+
+ if (READ_COND)
+ ret |= POLLIN | POLLRDNORM;
+
+ return ret;
+}
+
+#undef WRITE_COND
+#undef READ_COND
+
+static int f_hidg_release(struct inode *inode, struct file *fd)
+{
+ fd->private_data = NULL;
+ return 0;
+}
+
+static int f_hidg_open(struct inode *inode, struct file *fd)
+{
+ struct f_hidg *hidg =
+ container_of(inode->i_cdev, struct f_hidg, cdev);
+
+ fd->private_data = hidg;
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+/* usb_function */
+
+static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_hidg *hidg = (struct f_hidg *)req->context;
+
+ if (req->status != 0 || req->buf == NULL || req->actual == 0) {
+ ERROR(hidg->func.config->cdev, "%s FAILED\n", __func__);
+ return;
+ }
+
+ spin_lock(&hidg->spinlock);
+
+ hidg->set_report_buff = krealloc(hidg->set_report_buff,
+ req->actual, GFP_ATOMIC);
+
+ if (hidg->set_report_buff == NULL) {
+ spin_unlock(&hidg->spinlock);
+ return;
+ }
+ hidg->set_report_length = req->actual;
+ memcpy(hidg->set_report_buff, req->buf, req->actual);
+
+ spin_unlock(&hidg->spinlock);
+
+ wake_up(&hidg->read_queue);
+
+ return;
+}
+
+static int hidg_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_hidg *hidg = func_to_hidg(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int status = 0;
+ __u16 value, length;
+
+ value = __le16_to_cpu(ctrl->wValue);
+ length = __le16_to_cpu(ctrl->wLength);
+
+ VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x "
+ "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value);
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_GET_REPORT):
+ VDBG(cdev, "get_report\n");
+
+ /* send an empty report */
+ length = min_t(unsigned, length, hidg->report_length);
+ memset(req->buf, 0x0, length);
+
+ goto respond;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_GET_PROTOCOL):
+ VDBG(cdev, "get_protocol\n");
+ goto stall;
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_SET_REPORT):
+ VDBG(cdev, "set_report | wLenght=%d\n", ctrl->wLength);
+ req->context = hidg;
+ req->complete = hidg_set_report_complete;
+ goto respond;
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_SET_PROTOCOL):
+ VDBG(cdev, "set_protocol\n");
+ goto stall;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
+ | USB_REQ_GET_DESCRIPTOR):
+ switch (value >> 8) {
+ case HID_DT_REPORT:
+ VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
+ length = min_t(unsigned short, length,
+ hidg->report_desc_length);
+ memcpy(req->buf, hidg->report_desc, length);
+ goto respond;
+ break;
+
+ default:
+ VDBG(cdev, "Unknown decriptor request 0x%x\n",
+ value >> 8);
+ goto stall;
+ break;
+ }
+ break;
+
+ default:
+ VDBG(cdev, "Unknown request 0x%x\n",
+ ctrl->bRequest);
+ goto stall;
+ break;
+ }
+
+stall:
+ return -EOPNOTSUPP;
+
+respond:
+ req->zero = 0;
+ req->length = length;
+ status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (status < 0)
+ ERROR(cdev, "usb_ep_queue error on ep0 %d\n", value);
+ return status;
+}
+
+static void hidg_disable(struct usb_function *f)
+{
+ struct f_hidg *hidg = func_to_hidg(f);
+
+ usb_ep_disable(hidg->in_ep);
+ hidg->in_ep->driver_data = NULL;
+
+ return;
+}
+
+static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct f_hidg *hidg = func_to_hidg(f);
+ const struct usb_endpoint_descriptor *ep_desc;
+ int status = 0;
+
+ VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt);
+
+ if (hidg->in_ep != NULL) {
+ /* restart endpoint */
+ if (hidg->in_ep->driver_data != NULL)
+ usb_ep_disable(hidg->in_ep);
+
+ ep_desc = ep_choose(f->config->cdev->gadget,
+ hidg->hs_in_ep_desc, hidg->fs_in_ep_desc);
+ status = usb_ep_enable(hidg->in_ep, ep_desc);
+ if (status < 0) {
+ ERROR(cdev, "Enable endpoint FAILED!\n");
+ goto fail;
+ }
+ hidg->in_ep->driver_data = hidg;
+ }
+fail:
+ return status;
+}
+
+const struct file_operations f_hidg_fops = {
+ .owner = THIS_MODULE,
+ .open = f_hidg_open,
+ .release = f_hidg_release,
+ .write = f_hidg_write,
+ .read = f_hidg_read,
+ .poll = f_hidg_poll,
+};
+
+static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_ep *ep;
+ struct f_hidg *hidg = func_to_hidg(f);
+ int status;
+ dev_t dev;
+
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ hidg_interface_desc.bInterfaceNumber = status;
+
+
+ /* allocate instance-specific endpoints */
+ status = -ENODEV;
+ ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc);
+ if (!ep)
+ goto fail;
+ ep->driver_data = c->cdev; /* claim */
+ hidg->in_ep = ep;
+
+ /* preallocate request and buffer */
+ status = -ENOMEM;
+ hidg->req = usb_ep_alloc_request(hidg->in_ep, GFP_KERNEL);
+ if (!hidg->req)
+ goto fail;
+
+
+ hidg->req->buf = kmalloc(hidg->report_length, GFP_KERNEL);
+ if (!hidg->req->buf)
+ goto fail;
+
+ /* set descriptor dynamic values */
+ hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
+ hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
+ hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
+ hidg_desc.desc[0].wDescriptorLength =
+ cpu_to_le16(hidg->report_desc_length);
+
+ hidg->set_report_buff = NULL;
+
+ /* copy descriptors */
+ f->descriptors = usb_copy_descriptors(hidg_fs_descriptors);
+ if (!f->descriptors)
+ goto fail;
+
+ hidg->fs_in_ep_desc = usb_find_endpoint(hidg_fs_descriptors,
+ f->descriptors,
+ &hidg_fs_in_ep_desc);
+
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ hidg_hs_in_ep_desc.bEndpointAddress =
+ hidg_fs_in_ep_desc.bEndpointAddress;
+ f->hs_descriptors = usb_copy_descriptors(hidg_hs_descriptors);
+ if (!f->hs_descriptors)
+ goto fail;
+ hidg->hs_in_ep_desc = usb_find_endpoint(hidg_hs_descriptors,
+ f->hs_descriptors,
+ &hidg_hs_in_ep_desc);
+ } else {
+ hidg->hs_in_ep_desc = NULL;
+ }
+
+ mutex_init(&hidg->lock);
+ spin_lock_init(&hidg->spinlock);
+ init_waitqueue_head(&hidg->write_queue);
+ init_waitqueue_head(&hidg->read_queue);
+
+ /* create char device */
+ cdev_init(&hidg->cdev, &f_hidg_fops);
+ dev = MKDEV(major, hidg->minor);
+ status = cdev_add(&hidg->cdev, dev, 1);
+ if (status)
+ goto fail;
+
+ device_create(hidg_class, NULL, dev, NULL, "%s%d", "hidg", hidg->minor);
+
+ return 0;
+
+fail:
+ ERROR(f->config->cdev, "hidg_bind FAILED\n");
+ if (hidg->req != NULL) {
+ kfree(hidg->req->buf);
+ if (hidg->in_ep != NULL)
+ usb_ep_free_request(hidg->in_ep, hidg->req);
+ }
+
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+
+ return status;
+}
+
+static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_hidg *hidg = func_to_hidg(f);
+
+ device_destroy(hidg_class, MKDEV(major, hidg->minor));
+ cdev_del(&hidg->cdev);
+
+ /* disable/free request and end point */
+ usb_ep_disable(hidg->in_ep);
+ usb_ep_dequeue(hidg->in_ep, hidg->req);
+ kfree(hidg->req->buf);
+ usb_ep_free_request(hidg->in_ep, hidg->req);
+
+ /* free descriptors copies */
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+
+ kfree(hidg->report_desc);
+ kfree(hidg->set_report_buff);
+ kfree(hidg);
+}
+
+/*-------------------------------------------------------------------------*/
+/* Strings */
+
+#define CT_FUNC_HID_IDX 0
+
+static struct usb_string ct_func_string_defs[] = {
+ [CT_FUNC_HID_IDX].s = "HID Interface",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings ct_func_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = ct_func_string_defs,
+};
+
+static struct usb_gadget_strings *ct_func_strings[] = {
+ &ct_func_string_table,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+/* usb_configuration */
+
+int __init hidg_bind_config(struct usb_configuration *c,
+ struct hidg_func_descriptor *fdesc, int index)
+{
+ struct f_hidg *hidg;
+ int status;
+
+ if (index >= minors)
+ return -ENOENT;
+
+ /* maybe allocate device-global string IDs, and patch descriptors */
+ if (ct_func_string_defs[CT_FUNC_HID_IDX].id == 0) {
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ct_func_string_defs[CT_FUNC_HID_IDX].id = status;
+ hidg_interface_desc.iInterface = status;
+ }
+
+ /* allocate and initialize one new instance */
+ hidg = kzalloc(sizeof *hidg, GFP_KERNEL);
+ if (!hidg)
+ return -ENOMEM;
+
+ hidg->minor = index;
+ hidg->bInterfaceSubClass = fdesc->subclass;
+ hidg->bInterfaceProtocol = fdesc->protocol;
+ hidg->report_length = fdesc->report_length;
+ hidg->report_desc_length = fdesc->report_desc_length;
+ hidg->report_desc = kmemdup(fdesc->report_desc,
+ fdesc->report_desc_length,
+ GFP_KERNEL);
+ if (!hidg->report_desc) {
+ kfree(hidg);
+ return -ENOMEM;
+ }
+
+ hidg->func.name = "hid";
+ hidg->func.strings = ct_func_strings;
+ hidg->func.bind = hidg_bind;
+ hidg->func.unbind = hidg_unbind;
+ hidg->func.set_alt = hidg_set_alt;
+ hidg->func.disable = hidg_disable;
+ hidg->func.setup = hidg_setup;
+
+ status = usb_add_function(c, &hidg->func);
+ if (status)
+ kfree(hidg);
+
+ return status;
+}
+
+int __init ghid_setup(struct usb_gadget *g, int count)
+{
+ int status;
+ dev_t dev;
+
+ hidg_class = class_create(THIS_MODULE, "hidg");
+
+ status = alloc_chrdev_region(&dev, 0, count, "hidg");
+ if (!status) {
+ major = MAJOR(dev);
+ minors = count;
+ }
+
+ return status;
+}
+
+void ghid_cleanup(void)
+{
+ if (major) {
+ unregister_chrdev_region(MKDEV(major, 0), minors);
+ major = minors = 0;
+ }
+
+ class_destroy(hidg_class);
+ hidg_class = NULL;
+}
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index f4911c0..7d05a0b 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -163,6 +163,10 @@
* ro setting are not allowed when the medium is loaded or if CD-ROM
* emulation is being used.
*
+ * When a LUN receive an "eject" SCSI request (Start/Stop Unit),
+ * if the LUN is removable, the backing file is released to simulate
+ * ejection.
+ *
*
* This function is heavily based on "File-backed Storage Gadget" by
* Alan Stern which in turn is heavily based on "Gadget Zero" by David
@@ -302,7 +306,6 @@ static const char fsg_string_interface[] = "Mass Storage";
#define FSG_NO_INTR_EP 1
-#define FSG_BUFFHD_STATIC_BUFFER 1
#define FSG_NO_DEVICE_STRINGS 1
#define FSG_NO_OTG 1
#define FSG_NO_INTR_EP 1
@@ -1385,12 +1388,50 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
static int do_start_stop(struct fsg_common *common)
{
- if (!common->curlun) {
+ struct fsg_lun *curlun = common->curlun;
+ int loej, start;
+
+ if (!curlun) {
return -EINVAL;
- } else if (!common->curlun->removable) {
- common->curlun->sense_data = SS_INVALID_COMMAND;
+ } else if (!curlun->removable) {
+ curlun->sense_data = SS_INVALID_COMMAND;
return -EINVAL;
}
+
+ loej = common->cmnd[4] & 0x02;
+ start = common->cmnd[4] & 0x01;
+
+ /* eject code from file_storage.c:do_start_stop() */
+
+ if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
+ (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+
+ if (!start) {
+ /* Are we allowed to unload the media? */
+ if (curlun->prevent_medium_removal) {
+ LDBG(curlun, "unload attempt prevented\n");
+ curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
+ return -EINVAL;
+ }
+ if (loej) { /* Simulate an unload/eject */
+ up_read(&common->filesem);
+ down_write(&common->filesem);
+ fsg_lun_close(curlun);
+ up_write(&common->filesem);
+ down_read(&common->filesem);
+ }
+ } else {
+
+ /* Our emulation doesn't support mounting; the medium is
+ * available for use as soon as it is loaded. */
+ if (!fsg_lun_is_open(curlun)) {
+ curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -2701,10 +2742,8 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
/* Maybe allocate device-global string IDs, and patch descriptors */
if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
rc = usb_string_id(cdev);
- if (rc < 0) {
- kfree(common);
- return ERR_PTR(rc);
- }
+ if (unlikely(rc < 0))
+ goto error_release;
fsg_strings[FSG_STRING_INTERFACE].id = rc;
fsg_intf_desc.iInterface = rc;
}
@@ -2712,9 +2751,9 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
/* Create the LUNs, open their backing files, and register the
* LUN devices in sysfs. */
curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
- if (!curlun) {
- kfree(common);
- return ERR_PTR(-ENOMEM);
+ if (unlikely(!curlun)) {
+ rc = -ENOMEM;
+ goto error_release;
}
common->luns = curlun;
@@ -2762,13 +2801,19 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
/* Data buffers cyclic list */
- /* Buffers in buffhds are static -- no need for additional
- * allocation. */
bh = common->buffhds;
- i = FSG_NUM_BUFFERS - 1;
+ i = FSG_NUM_BUFFERS;
+ goto buffhds_first_it;
do {
bh->next = bh + 1;
- } while (++bh, --i);
+ ++bh;
+buffhds_first_it:
+ bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
+ if (unlikely(!bh->buf)) {
+ rc = -ENOMEM;
+ goto error_release;
+ }
+ } while (--i);
bh->next = common->buffhds;
@@ -2867,10 +2912,7 @@ error_release:
static void fsg_common_release(struct kref *ref)
{
- struct fsg_common *common =
- container_of(ref, struct fsg_common, ref);
- unsigned i = common->nluns;
- struct fsg_lun *lun = common->luns;
+ struct fsg_common *common = container_of(ref, struct fsg_common, ref);
/* If the thread isn't already dead, tell it to exit now */
if (common->state != FSG_STATE_TERMINATED) {
@@ -2881,17 +2923,29 @@ static void fsg_common_release(struct kref *ref)
complete(&common->thread_notifier);
}
- /* Beware tempting for -> do-while optimization: when in error
- * recovery nluns may be zero. */
+ if (likely(common->luns)) {
+ struct fsg_lun *lun = common->luns;
+ unsigned i = common->nluns;
+
+ /* In error recovery common->nluns may be zero. */
+ for (; i; --i, ++lun) {
+ device_remove_file(&lun->dev, &dev_attr_ro);
+ device_remove_file(&lun->dev, &dev_attr_file);
+ fsg_lun_close(lun);
+ device_unregister(&lun->dev);
+ }
+
+ kfree(common->luns);
+ }
- for (; i; --i, ++lun) {
- device_remove_file(&lun->dev, &dev_attr_ro);
- device_remove_file(&lun->dev, &dev_attr_file);
- fsg_lun_close(lun);
- device_unregister(&lun->dev);
+ {
+ struct fsg_buffhd *bh = common->buffhds;
+ unsigned i = FSG_NUM_BUFFERS;
+ do {
+ kfree(bh->buf);
+ } while (++bh, --i);
}
- kfree(common->luns);
if (common->free_storage_on_release)
kfree(common);
}
@@ -2906,11 +2960,13 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(fsg, "unbind\n");
fsg_common_put(fsg->common);
+ usb_free_descriptors(fsg->function.descriptors);
+ usb_free_descriptors(fsg->function.hs_descriptors);
kfree(fsg);
}
-static int __init fsg_bind(struct usb_configuration *c, struct usb_function *f)
+static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
struct usb_gadget *gadget = c->cdev->gadget;
@@ -2946,7 +3002,9 @@ static int __init fsg_bind(struct usb_configuration *c, struct usb_function *f)
fsg_fs_bulk_in_desc.bEndpointAddress;
fsg_hs_bulk_out_desc.bEndpointAddress =
fsg_fs_bulk_out_desc.bEndpointAddress;
- f->hs_descriptors = fsg_hs_function;
+ f->hs_descriptors = usb_copy_descriptors(fsg_hs_function);
+ if (unlikely(!f->hs_descriptors))
+ return -ENOMEM;
}
return 0;
@@ -2978,7 +3036,11 @@ static int fsg_add(struct usb_composite_dev *cdev,
fsg->function.name = FSG_DRIVER_DESC;
fsg->function.strings = fsg_strings_array;
- fsg->function.descriptors = fsg_fs_function;
+ fsg->function.descriptors = usb_copy_descriptors(fsg_fs_function);
+ if (unlikely(!fsg->function.descriptors)) {
+ rc = -ENOMEM;
+ goto error_free_fsg;
+ }
fsg->function.bind = fsg_bind;
fsg->function.unbind = fsg_unbind;
fsg->function.setup = fsg_setup;
@@ -2993,11 +3055,19 @@ static int fsg_add(struct usb_composite_dev *cdev,
* call to usb_add_function() was successful. */
rc = usb_add_function(c, &fsg->function);
+ if (unlikely(rc))
+ goto error_free_all;
- if (likely(rc == 0))
- fsg_common_get(fsg->common);
- else
- kfree(fsg);
+ fsg_common_get(fsg->common);
+ return 0;
+
+error_free_all:
+ usb_free_descriptors(fsg->function.descriptors);
+ /* fsg_bind() might have copied those; or maybe not? who cares
+ * -- free it just in case. */
+ usb_free_descriptors(fsg->function.hs_descriptors);
+error_free_fsg:
+ kfree(fsg);
return rc;
}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 56b0221..882484a 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -122,7 +122,7 @@ static unsigned int bitrate(struct usb_gadget *g)
/* interface descriptor: */
-static struct usb_interface_descriptor rndis_control_intf __initdata = {
+static struct usb_interface_descriptor rndis_control_intf = {
.bLength = sizeof rndis_control_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -135,7 +135,7 @@ static struct usb_interface_descriptor rndis_control_intf __initdata = {
/* .iInterface = DYNAMIC */
};
-static struct usb_cdc_header_desc header_desc __initdata = {
+static struct usb_cdc_header_desc header_desc = {
.bLength = sizeof header_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
@@ -143,7 +143,7 @@ static struct usb_cdc_header_desc header_desc __initdata = {
.bcdCDC = cpu_to_le16(0x0110),
};
-static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor __initdata = {
+static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = {
.bLength = sizeof call_mgmt_descriptor,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
@@ -152,7 +152,7 @@ static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor __initdata = {
.bDataInterface = 0x01,
};
-static struct usb_cdc_acm_descriptor rndis_acm_descriptor __initdata = {
+static struct usb_cdc_acm_descriptor rndis_acm_descriptor = {
.bLength = sizeof rndis_acm_descriptor,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
@@ -160,7 +160,7 @@ static struct usb_cdc_acm_descriptor rndis_acm_descriptor __initdata = {
.bmCapabilities = 0x00,
};
-static struct usb_cdc_union_desc rndis_union_desc __initdata = {
+static struct usb_cdc_union_desc rndis_union_desc = {
.bLength = sizeof(rndis_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
@@ -170,7 +170,7 @@ static struct usb_cdc_union_desc rndis_union_desc __initdata = {
/* the data interface has two bulk endpoints */
-static struct usb_interface_descriptor rndis_data_intf __initdata = {
+static struct usb_interface_descriptor rndis_data_intf = {
.bLength = sizeof rndis_data_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -198,7 +198,7 @@ rndis_iad_descriptor = {
/* full speed support: */
-static struct usb_endpoint_descriptor fs_notify_desc __initdata = {
+static struct usb_endpoint_descriptor fs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -208,7 +208,7 @@ static struct usb_endpoint_descriptor fs_notify_desc __initdata = {
.bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
};
-static struct usb_endpoint_descriptor fs_in_desc __initdata = {
+static struct usb_endpoint_descriptor fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -216,7 +216,7 @@ static struct usb_endpoint_descriptor fs_in_desc __initdata = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_endpoint_descriptor fs_out_desc __initdata = {
+static struct usb_endpoint_descriptor fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -224,7 +224,7 @@ static struct usb_endpoint_descriptor fs_out_desc __initdata = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_descriptor_header *eth_fs_function[] __initdata = {
+static struct usb_descriptor_header *eth_fs_function[] = {
(struct usb_descriptor_header *) &rndis_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
@@ -242,7 +242,7 @@ static struct usb_descriptor_header *eth_fs_function[] __initdata = {
/* high speed support: */
-static struct usb_endpoint_descriptor hs_notify_desc __initdata = {
+static struct usb_endpoint_descriptor hs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -251,7 +251,7 @@ static struct usb_endpoint_descriptor hs_notify_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT),
.bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
};
-static struct usb_endpoint_descriptor hs_in_desc __initdata = {
+static struct usb_endpoint_descriptor hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -260,7 +260,7 @@ static struct usb_endpoint_descriptor hs_in_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_endpoint_descriptor hs_out_desc __initdata = {
+static struct usb_endpoint_descriptor hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -269,7 +269,7 @@ static struct usb_endpoint_descriptor hs_out_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_descriptor_header *eth_hs_function[] __initdata = {
+static struct usb_descriptor_header *eth_hs_function[] = {
(struct usb_descriptor_header *) &rndis_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
@@ -594,7 +594,7 @@ static void rndis_close(struct gether *geth)
/* ethernet function driver setup/binding */
-static int __init
+static int
rndis_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
@@ -786,7 +786,8 @@ static inline bool can_support_rndis(struct usb_configuration *c)
* Caller must have called @gether_setup(). Caller is also responsible
* for calling @gether_cleanup() before module unload.
*/
-int __init rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+int
+rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
{
struct f_rndis *rndis;
int status;
diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c
new file mode 100644
index 0000000..fc2611f
--- /dev/null
+++ b/drivers/usb/gadget/f_uvc.c
@@ -0,0 +1,661 @@
+/*
+ * uvc_gadget.c -- USB Video Class Gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/video.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+
+#include "uvc.h"
+
+unsigned int uvc_trace_param;
+
+/* --------------------------------------------------------------------------
+ * Function descriptors
+ */
+
+/* string IDs are assigned dynamically */
+
+#define UVC_STRING_ASSOCIATION_IDX 0
+#define UVC_STRING_CONTROL_IDX 1
+#define UVC_STRING_STREAMING_IDX 2
+
+static struct usb_string uvc_en_us_strings[] = {
+ [UVC_STRING_ASSOCIATION_IDX].s = "UVC Camera",
+ [UVC_STRING_CONTROL_IDX].s = "Video Control",
+ [UVC_STRING_STREAMING_IDX].s = "Video Streaming",
+ { }
+};
+
+static struct usb_gadget_strings uvc_stringtab = {
+ .language = 0x0409, /* en-us */
+ .strings = uvc_en_us_strings,
+};
+
+static struct usb_gadget_strings *uvc_function_strings[] = {
+ &uvc_stringtab,
+ NULL,
+};
+
+#define UVC_INTF_VIDEO_CONTROL 0
+#define UVC_INTF_VIDEO_STREAMING 1
+
+static struct usb_interface_assoc_descriptor uvc_iad __initdata = {
+ .bLength = USB_DT_INTERFACE_ASSOCIATION_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ .bFirstInterface = 0,
+ .bInterfaceCount = 2,
+ .bFunctionClass = USB_CLASS_VIDEO,
+ .bFunctionSubClass = 0x03,
+ .bFunctionProtocol = 0x00,
+ .iFunction = 0,
+};
+
+static struct usb_interface_descriptor uvc_control_intf __initdata = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = UVC_INTF_VIDEO_CONTROL,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 0x01,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+static struct usb_endpoint_descriptor uvc_control_ep __initdata = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(16),
+ .bInterval = 8,
+};
+
+static struct uvc_control_endpoint_descriptor uvc_control_cs_ep __initdata = {
+ .bLength = UVC_DT_CONTROL_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubType = UVC_EP_INTERRUPT,
+ .wMaxTransferSize = cpu_to_le16(16),
+};
+
+static struct usb_interface_descriptor uvc_streaming_intf_alt0 __initdata = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 0x02,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+static struct usb_interface_descriptor uvc_streaming_intf_alt1 __initdata = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 0x02,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+static struct usb_endpoint_descriptor uvc_streaming_ep = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(512),
+ .bInterval = 1,
+};
+
+static const struct usb_descriptor_header * const uvc_fs_streaming[] = {
+ (struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
+ (struct usb_descriptor_header *) &uvc_streaming_ep,
+ NULL,
+};
+
+static const struct usb_descriptor_header * const uvc_hs_streaming[] = {
+ (struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
+ (struct usb_descriptor_header *) &uvc_streaming_ep,
+ NULL,
+};
+
+/* --------------------------------------------------------------------------
+ * Control requests
+ */
+
+static void
+uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct uvc_device *uvc = req->context;
+ struct v4l2_event v4l2_event;
+ struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+
+ if (uvc->event_setup_out) {
+ uvc->event_setup_out = 0;
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_DATA;
+ uvc_event->data.length = req->actual;
+ memcpy(&uvc_event->data.data, req->buf, req->actual);
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+ }
+}
+
+static int
+uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct uvc_device *uvc = to_uvc(f);
+ struct v4l2_event v4l2_event;
+ struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+
+ /* printk(KERN_INFO "setup request %02x %02x value %04x index %04x %04x\n",
+ * ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue),
+ * le16_to_cpu(ctrl->wIndex), le16_to_cpu(ctrl->wLength));
+ */
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) {
+ INFO(f->config->cdev, "invalid request type\n");
+ return -EINVAL;
+ }
+
+ /* Stall too big requests. */
+ if (le16_to_cpu(ctrl->wLength) > UVC_MAX_REQUEST_SIZE)
+ return -EINVAL;
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_SETUP;
+ memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+ return 0;
+}
+
+static int
+uvc_function_get_alt(struct usb_function *f, unsigned interface)
+{
+ struct uvc_device *uvc = to_uvc(f);
+
+ INFO(f->config->cdev, "uvc_function_get_alt(%u)\n", interface);
+
+ if (interface == uvc->control_intf)
+ return 0;
+ else if (interface != uvc->streaming_intf)
+ return -EINVAL;
+ else
+ return uvc->state == UVC_STATE_STREAMING ? 1 : 0;
+}
+
+static int
+uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
+{
+ struct uvc_device *uvc = to_uvc(f);
+ struct v4l2_event v4l2_event;
+ struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+
+ INFO(f->config->cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt);
+
+ if (interface == uvc->control_intf) {
+ if (alt)
+ return -EINVAL;
+
+ if (uvc->state == UVC_STATE_DISCONNECTED) {
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_CONNECT;
+ uvc_event->speed = f->config->cdev->gadget->speed;
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+ uvc->state = UVC_STATE_CONNECTED;
+ }
+
+ return 0;
+ }
+
+ if (interface != uvc->streaming_intf)
+ return -EINVAL;
+
+ /* TODO
+ if (usb_endpoint_xfer_bulk(&uvc->desc.vs_ep))
+ return alt ? -EINVAL : 0;
+ */
+
+ switch (alt) {
+ case 0:
+ if (uvc->state != UVC_STATE_STREAMING)
+ return 0;
+
+ if (uvc->video.ep)
+ usb_ep_disable(uvc->video.ep);
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_STREAMOFF;
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+ uvc->state = UVC_STATE_CONNECTED;
+ break;
+
+ case 1:
+ if (uvc->state != UVC_STATE_CONNECTED)
+ return 0;
+
+ if (uvc->video.ep)
+ usb_ep_enable(uvc->video.ep, &uvc_streaming_ep);
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_STREAMON;
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+ uvc->state = UVC_STATE_STREAMING;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+uvc_function_disable(struct usb_function *f)
+{
+ struct uvc_device *uvc = to_uvc(f);
+ struct v4l2_event v4l2_event;
+
+ INFO(f->config->cdev, "uvc_function_disable\n");
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_DISCONNECT;
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+ uvc->state = UVC_STATE_DISCONNECTED;
+}
+
+/* --------------------------------------------------------------------------
+ * Connection / disconnection
+ */
+
+void
+uvc_function_connect(struct uvc_device *uvc)
+{
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ int ret;
+
+ if ((ret = usb_function_activate(&uvc->func)) < 0)
+ INFO(cdev, "UVC connect failed with %d\n", ret);
+}
+
+void
+uvc_function_disconnect(struct uvc_device *uvc)
+{
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ int ret;
+
+ if ((ret = usb_function_deactivate(&uvc->func)) < 0)
+ INFO(cdev, "UVC disconnect failed with %d\n", ret);
+}
+
+/* --------------------------------------------------------------------------
+ * USB probe and disconnect
+ */
+
+static int
+uvc_register_video(struct uvc_device *uvc)
+{
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct video_device *video;
+
+ /* TODO reference counting. */
+ video = video_device_alloc();
+ if (video == NULL)
+ return -ENOMEM;
+
+ video->parent = &cdev->gadget->dev;
+ video->minor = -1;
+ video->fops = &uvc_v4l2_fops;
+ video->release = video_device_release;
+ strncpy(video->name, cdev->gadget->name, sizeof(video->name));
+
+ uvc->vdev = video;
+ video_set_drvdata(video, uvc);
+
+ return video_register_device(video, VFL_TYPE_GRABBER, -1);
+}
+
+#define UVC_COPY_DESCRIPTOR(mem, dst, desc) \
+ do { \
+ memcpy(mem, desc, (desc)->bLength); \
+ *(dst)++ = mem; \
+ mem += (desc)->bLength; \
+ } while (0);
+
+#define UVC_COPY_DESCRIPTORS(mem, dst, src) \
+ do { \
+ const struct usb_descriptor_header * const *__src; \
+ for (__src = src; *__src; ++__src) { \
+ memcpy(mem, *__src, (*__src)->bLength); \
+ *dst++ = mem; \
+ mem += (*__src)->bLength; \
+ } \
+ } while (0)
+
+static struct usb_descriptor_header ** __init
+uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
+{
+ struct uvc_input_header_descriptor *uvc_streaming_header;
+ struct uvc_header_descriptor *uvc_control_header;
+ const struct uvc_descriptor_header * const *uvc_streaming_cls;
+ const struct usb_descriptor_header * const *uvc_streaming_std;
+ const struct usb_descriptor_header * const *src;
+ struct usb_descriptor_header **dst;
+ struct usb_descriptor_header **hdr;
+ unsigned int control_size;
+ unsigned int streaming_size;
+ unsigned int n_desc;
+ unsigned int bytes;
+ void *mem;
+
+ uvc_streaming_cls = (speed == USB_SPEED_FULL)
+ ? uvc->desc.fs_streaming : uvc->desc.hs_streaming;
+ uvc_streaming_std = (speed == USB_SPEED_FULL)
+ ? uvc_fs_streaming : uvc_hs_streaming;
+
+ /* Descriptors layout
+ *
+ * uvc_iad
+ * uvc_control_intf
+ * Class-specific UVC control descriptors
+ * uvc_control_ep
+ * uvc_control_cs_ep
+ * uvc_streaming_intf_alt0
+ * Class-specific UVC streaming descriptors
+ * uvc_{fs|hs}_streaming
+ */
+
+ /* Count descriptors and compute their size. */
+ control_size = 0;
+ streaming_size = 0;
+ bytes = uvc_iad.bLength + uvc_control_intf.bLength
+ + uvc_control_ep.bLength + uvc_control_cs_ep.bLength
+ + uvc_streaming_intf_alt0.bLength;
+ n_desc = 5;
+
+ for (src = (const struct usb_descriptor_header**)uvc->desc.control; *src; ++src) {
+ control_size += (*src)->bLength;
+ bytes += (*src)->bLength;
+ n_desc++;
+ }
+ for (src = (const struct usb_descriptor_header**)uvc_streaming_cls; *src; ++src) {
+ streaming_size += (*src)->bLength;
+ bytes += (*src)->bLength;
+ n_desc++;
+ }
+ for (src = uvc_streaming_std; *src; ++src) {
+ bytes += (*src)->bLength;
+ n_desc++;
+ }
+
+ mem = kmalloc((n_desc + 1) * sizeof(*src) + bytes, GFP_KERNEL);
+ if (mem == NULL)
+ return NULL;
+
+ hdr = mem;
+ dst = mem;
+ mem += (n_desc + 1) * sizeof(*src);
+
+ /* Copy the descriptors. */
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_iad);
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_intf);
+
+ uvc_control_header = mem;
+ UVC_COPY_DESCRIPTORS(mem, dst,
+ (const struct usb_descriptor_header**)uvc->desc.control);
+ uvc_control_header->wTotalLength = cpu_to_le16(control_size);
+ uvc_control_header->bInCollection = 1;
+ uvc_control_header->baInterfaceNr[0] = uvc->streaming_intf;
+
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_ep);
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_cs_ep);
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0);
+
+ uvc_streaming_header = mem;
+ UVC_COPY_DESCRIPTORS(mem, dst,
+ (const struct usb_descriptor_header**)uvc_streaming_cls);
+ uvc_streaming_header->wTotalLength = cpu_to_le16(streaming_size);
+ uvc_streaming_header->bEndpointAddress = uvc_streaming_ep.bEndpointAddress;
+
+ UVC_COPY_DESCRIPTORS(mem, dst, uvc_streaming_std);
+
+ *dst = NULL;
+ return hdr;
+}
+
+static void
+uvc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct uvc_device *uvc = to_uvc(f);
+
+ INFO(cdev, "uvc_function_unbind\n");
+
+ if (uvc->vdev) {
+ if (uvc->vdev->minor == -1)
+ video_device_release(uvc->vdev);
+ else
+ video_unregister_device(uvc->vdev);
+ uvc->vdev = NULL;
+ }
+
+ if (uvc->control_ep)
+ uvc->control_ep->driver_data = NULL;
+ if (uvc->video.ep)
+ uvc->video.ep->driver_data = NULL;
+
+ if (uvc->control_req) {
+ usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
+ kfree(uvc->control_buf);
+ }
+
+ kfree(f->descriptors);
+ kfree(f->hs_descriptors);
+
+ kfree(uvc);
+}
+
+static int __init
+uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct uvc_device *uvc = to_uvc(f);
+ struct usb_ep *ep;
+ int ret = -EINVAL;
+
+ INFO(cdev, "uvc_function_bind\n");
+
+ /* Allocate endpoints. */
+ ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
+ if (!ep) {
+ INFO(cdev, "Unable to allocate control EP\n");
+ goto error;
+ }
+ uvc->control_ep = ep;
+ ep->driver_data = uvc;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &uvc_streaming_ep);
+ if (!ep) {
+ INFO(cdev, "Unable to allocate streaming EP\n");
+ goto error;
+ }
+ uvc->video.ep = ep;
+ ep->driver_data = uvc;
+
+ /* Allocate interface IDs. */
+ if ((ret = usb_interface_id(c, f)) < 0)
+ goto error;
+ uvc_iad.bFirstInterface = ret;
+ uvc_control_intf.bInterfaceNumber = ret;
+ uvc->control_intf = ret;
+
+ if ((ret = usb_interface_id(c, f)) < 0)
+ goto error;
+ uvc_streaming_intf_alt0.bInterfaceNumber = ret;
+ uvc_streaming_intf_alt1.bInterfaceNumber = ret;
+ uvc->streaming_intf = ret;
+
+ /* Copy descriptors. */
+ f->descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
+ f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH);
+
+ /* Preallocate control endpoint request. */
+ uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
+ uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL);
+ if (uvc->control_req == NULL || uvc->control_buf == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ uvc->control_req->buf = uvc->control_buf;
+ uvc->control_req->complete = uvc_function_ep0_complete;
+ uvc->control_req->context = uvc;
+
+ /* Avoid letting this gadget enumerate until the userspace server is
+ * active.
+ */
+ if ((ret = usb_function_deactivate(f)) < 0)
+ goto error;
+
+ /* Initialise video. */
+ ret = uvc_video_init(&uvc->video);
+ if (ret < 0)
+ goto error;
+
+ /* Register a V4L2 device. */
+ ret = uvc_register_video(uvc);
+ if (ret < 0) {
+ printk(KERN_INFO "Unable to register video device\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ uvc_function_unbind(c, f);
+ return ret;
+}
+
+/* --------------------------------------------------------------------------
+ * USB gadget function
+ */
+
+/**
+ * uvc_bind_config - add a UVC function to a configuration
+ * @c: the configuration to support the UVC instance
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @uvc_setup(). Caller is also responsible for
+ * calling @uvc_cleanup() before module unload.
+ */
+int __init
+uvc_bind_config(struct usb_configuration *c,
+ const struct uvc_descriptor_header * const *control,
+ const struct uvc_descriptor_header * const *fs_streaming,
+ const struct uvc_descriptor_header * const *hs_streaming)
+{
+ struct uvc_device *uvc;
+ int ret = 0;
+
+ /* TODO Check if the USB device controller supports the required
+ * features.
+ */
+ if (!gadget_is_dualspeed(c->cdev->gadget))
+ return -EINVAL;
+
+ uvc = kzalloc(sizeof(*uvc), GFP_KERNEL);
+ if (uvc == NULL)
+ return -ENOMEM;
+
+ uvc->state = UVC_STATE_DISCONNECTED;
+
+ /* Validate the descriptors. */
+ if (control == NULL || control[0] == NULL ||
+ control[0]->bDescriptorSubType != UVC_DT_HEADER)
+ goto error;
+
+ if (fs_streaming == NULL || fs_streaming[0] == NULL ||
+ fs_streaming[0]->bDescriptorSubType != UVC_DT_INPUT_HEADER)
+ goto error;
+
+ if (hs_streaming == NULL || hs_streaming[0] == NULL ||
+ hs_streaming[0]->bDescriptorSubType != UVC_DT_INPUT_HEADER)
+ goto error;
+
+ uvc->desc.control = control;
+ uvc->desc.fs_streaming = fs_streaming;
+ uvc->desc.hs_streaming = hs_streaming;
+
+ /* Allocate string descriptor numbers. */
+ if ((ret = usb_string_id(c->cdev)) < 0)
+ goto error;
+ uvc_en_us_strings[UVC_STRING_ASSOCIATION_IDX].id = ret;
+ uvc_iad.iFunction = ret;
+
+ if ((ret = usb_string_id(c->cdev)) < 0)
+ goto error;
+ uvc_en_us_strings[UVC_STRING_CONTROL_IDX].id = ret;
+ uvc_control_intf.iInterface = ret;
+
+ if ((ret = usb_string_id(c->cdev)) < 0)
+ goto error;
+ uvc_en_us_strings[UVC_STRING_STREAMING_IDX].id = ret;
+ uvc_streaming_intf_alt0.iInterface = ret;
+ uvc_streaming_intf_alt1.iInterface = ret;
+
+ /* Register the function. */
+ uvc->func.name = "uvc";
+ uvc->func.strings = uvc_function_strings;
+ uvc->func.bind = uvc_function_bind;
+ uvc->func.unbind = uvc_function_unbind;
+ uvc->func.get_alt = uvc_function_get_alt;
+ uvc->func.set_alt = uvc_function_set_alt;
+ uvc->func.disable = uvc_function_disable;
+ uvc->func.setup = uvc_function_setup;
+
+ ret = usb_add_function(c, &uvc->func);
+ if (ret)
+ kfree(uvc);
+
+ return 0;
+
+error:
+ kfree(uvc);
+ return ret;
+}
+
+module_param_named(trace, uvc_trace_param, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(trace, "Trace level bitmask");
+
diff --git a/drivers/usb/gadget/f_uvc.h b/drivers/usb/gadget/f_uvc.h
new file mode 100644
index 0000000..8a5db7c
--- /dev/null
+++ b/drivers/usb/gadget/f_uvc.h
@@ -0,0 +1,376 @@
+/*
+ * f_uvc.h -- USB Video Class Gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef _F_UVC_H_
+#define _F_UVC_H_
+
+#include <linux/usb/composite.h>
+
+#define USB_CLASS_VIDEO_CONTROL 1
+#define USB_CLASS_VIDEO_STREAMING 2
+
+struct uvc_descriptor_header {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+} __attribute__ ((packed));
+
+struct uvc_header_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u16 bcdUVC;
+ __u16 wTotalLength;
+ __u32 dwClockFrequency;
+ __u8 bInCollection;
+ __u8 baInterfaceNr[];
+} __attribute__((__packed__));
+
+#define UVC_HEADER_DESCRIPTOR(n) uvc_header_descriptor_##n
+
+#define DECLARE_UVC_HEADER_DESCRIPTOR(n) \
+struct UVC_HEADER_DESCRIPTOR(n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u16 bcdUVC; \
+ __u16 wTotalLength; \
+ __u32 dwClockFrequency; \
+ __u8 bInCollection; \
+ __u8 baInterfaceNr[n]; \
+} __attribute__ ((packed))
+
+struct uvc_input_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 iTerminal;
+} __attribute__((__packed__));
+
+struct uvc_output_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 iTerminal;
+} __attribute__((__packed__));
+
+struct uvc_camera_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 iTerminal;
+ __u16 wObjectiveFocalLengthMin;
+ __u16 wObjectiveFocalLengthMax;
+ __u16 wOcularFocalLength;
+ __u8 bControlSize;
+ __u8 bmControls[3];
+} __attribute__((__packed__));
+
+struct uvc_selector_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bUnitID;
+ __u8 bNrInPins;
+ __u8 baSourceID[0];
+ __u8 iSelector;
+} __attribute__((__packed__));
+
+#define UVC_SELECTOR_UNIT_DESCRIPTOR(n) \
+ uvc_selector_unit_descriptor_##n
+
+#define DECLARE_UVC_SELECTOR_UNIT_DESCRIPTOR(n) \
+struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bUnitID; \
+ __u8 bNrInPins; \
+ __u8 baSourceID[n]; \
+ __u8 iSelector; \
+} __attribute__ ((packed))
+
+struct uvc_processing_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ __u16 wMaxMultiplier;
+ __u8 bControlSize;
+ __u8 bmControls[2];
+ __u8 iProcessing;
+} __attribute__((__packed__));
+
+struct uvc_extension_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bUnitID;
+ __u8 guidExtensionCode[16];
+ __u8 bNumControls;
+ __u8 bNrInPins;
+ __u8 baSourceID[0];
+ __u8 bControlSize;
+ __u8 bmControls[0];
+ __u8 iExtension;
+} __attribute__((__packed__));
+
+#define UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \
+ uvc_extension_unit_descriptor_##p_##n
+
+#define DECLARE_UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \
+struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bUnitID; \
+ __u8 guidExtensionCode[16]; \
+ __u8 bNumControls; \
+ __u8 bNrInPins; \
+ __u8 baSourceID[p]; \
+ __u8 bControlSize; \
+ __u8 bmControls[n]; \
+ __u8 iExtension; \
+} __attribute__ ((packed))
+
+struct uvc_control_endpoint_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u16 wMaxTransferSize;
+} __attribute__((__packed__));
+
+#define UVC_DT_HEADER 1
+#define UVC_DT_INPUT_TERMINAL 2
+#define UVC_DT_OUTPUT_TERMINAL 3
+#define UVC_DT_SELECTOR_UNIT 4
+#define UVC_DT_PROCESSING_UNIT 5
+#define UVC_DT_EXTENSION_UNIT 6
+
+#define UVC_DT_HEADER_SIZE(n) (12+(n))
+#define UVC_DT_INPUT_TERMINAL_SIZE 8
+#define UVC_DT_OUTPUT_TERMINAL_SIZE 9
+#define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n))
+#define UVC_DT_SELECTOR_UNIT_SIZE(n) (6+(n))
+#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n))
+#define UVC_DT_EXTENSION_UNIT_SIZE(p,n) (24+(p)+(n))
+#define UVC_DT_CONTROL_ENDPOINT_SIZE 5
+
+struct uvc_input_header_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bNumFormats;
+ __u16 wTotalLength;
+ __u8 bEndpointAddress;
+ __u8 bmInfo;
+ __u8 bTerminalLink;
+ __u8 bStillCaptureMethod;
+ __u8 bTriggerSupport;
+ __u8 bTriggerUsage;
+ __u8 bControlSize;
+ __u8 bmaControls[];
+} __attribute__((__packed__));
+
+#define UVC_INPUT_HEADER_DESCRIPTOR(n, p) \
+ uvc_input_header_descriptor_##n_##p
+
+#define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \
+struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bNumFormats; \
+ __u16 wTotalLength; \
+ __u8 bEndpointAddress; \
+ __u8 bmInfo; \
+ __u8 bTerminalLink; \
+ __u8 bStillCaptureMethod; \
+ __u8 bTriggerSupport; \
+ __u8 bTriggerUsage; \
+ __u8 bControlSize; \
+ __u8 bmaControls[p][n]; \
+} __attribute__ ((packed))
+
+struct uvc_output_header_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bNumFormats;
+ __u16 wTotalLength;
+ __u8 bEndpointAddress;
+ __u8 bTerminalLink;
+ __u8 bControlSize;
+ __u8 bmaControls[];
+} __attribute__((__packed__));
+
+#define UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \
+ uvc_output_header_descriptor_##n_##p
+
+#define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \
+struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bNumFormats; \
+ __u16 wTotalLength; \
+ __u8 bEndpointAddress; \
+ __u8 bTerminalLink; \
+ __u8 bControlSize; \
+ __u8 bmaControls[p][n]; \
+} __attribute__ ((packed))
+
+struct uvc_format_uncompressed {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFormatIndex;
+ __u8 bNumFrameDescriptors;
+ __u8 guidFormat[16];
+ __u8 bBitsPerPixel;
+ __u8 bDefaultFrameIndex;
+ __u8 bAspectRatioX;
+ __u8 bAspectRatioY;
+ __u8 bmInterfaceFlags;
+ __u8 bCopyProtect;
+} __attribute__((__packed__));
+
+struct uvc_frame_uncompressed {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __u16 wWidth;
+ __u16 wHeight;
+ __u32 dwMinBitRate;
+ __u32 dwMaxBitRate;
+ __u32 dwMaxVideoFrameBufferSize;
+ __u32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __u32 dwFrameInterval[];
+} __attribute__((__packed__));
+
+#define UVC_FRAME_UNCOMPRESSED(n) \
+ uvc_frame_uncompressed_##n
+
+#define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \
+struct UVC_FRAME_UNCOMPRESSED(n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __u16 wWidth; \
+ __u16 wHeight; \
+ __u32 dwMinBitRate; \
+ __u32 dwMaxBitRate; \
+ __u32 dwMaxVideoFrameBufferSize; \
+ __u32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __u32 dwFrameInterval[n]; \
+} __attribute__ ((packed))
+
+struct uvc_format_mjpeg {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFormatIndex;
+ __u8 bNumFrameDescriptors;
+ __u8 bmFlags;
+ __u8 bDefaultFrameIndex;
+ __u8 bAspectRatioX;
+ __u8 bAspectRatioY;
+ __u8 bmInterfaceFlags;
+ __u8 bCopyProtect;
+} __attribute__((__packed__));
+
+struct uvc_frame_mjpeg {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __u16 wWidth;
+ __u16 wHeight;
+ __u32 dwMinBitRate;
+ __u32 dwMaxBitRate;
+ __u32 dwMaxVideoFrameBufferSize;
+ __u32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __u32 dwFrameInterval[];
+} __attribute__((__packed__));
+
+#define UVC_FRAME_MJPEG(n) \
+ uvc_frame_mjpeg_##n
+
+#define DECLARE_UVC_FRAME_MJPEG(n) \
+struct UVC_FRAME_MJPEG(n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __u16 wWidth; \
+ __u16 wHeight; \
+ __u32 dwMinBitRate; \
+ __u32 dwMaxBitRate; \
+ __u32 dwMaxVideoFrameBufferSize; \
+ __u32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __u32 dwFrameInterval[n]; \
+} __attribute__ ((packed))
+
+struct uvc_color_matching_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bColorPrimaries;
+ __u8 bTransferCharacteristics;
+ __u8 bMatrixCoefficients;
+} __attribute__((__packed__));
+
+#define UVC_DT_INPUT_HEADER 1
+#define UVC_DT_OUTPUT_HEADER 2
+#define UVC_DT_FORMAT_UNCOMPRESSED 4
+#define UVC_DT_FRAME_UNCOMPRESSED 5
+#define UVC_DT_FORMAT_MJPEG 6
+#define UVC_DT_FRAME_MJPEG 7
+#define UVC_DT_COLOR_MATCHING 13
+
+#define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p))
+#define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p))
+#define UVC_DT_FORMAT_UNCOMPRESSED_SIZE 27
+#define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n))
+#define UVC_DT_FORMAT_MJPEG_SIZE 11
+#define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n))
+#define UVC_DT_COLOR_MATCHING_SIZE 6
+
+extern int uvc_bind_config(struct usb_configuration *c,
+ const struct uvc_descriptor_header * const *control,
+ const struct uvc_descriptor_header * const *fs_streaming,
+ const struct uvc_descriptor_header * const *hs_streaming);
+
+#endif /* _F_UVC_H_ */
+
diff --git a/drivers/usb/gadget/fsl_mx3_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 20a802e..d0b8bde 100644
--- a/drivers/usb/gadget/fsl_mx3_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -50,12 +50,14 @@ int fsl_udc_clk_init(struct platform_device *pdev)
goto egusb;
}
- freq = clk_get_rate(mxc_usb_clk);
- if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
- (freq < 59999000 || freq > 60001000)) {
- dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
- ret = -EINVAL;
- goto eclkrate;
+ if (!cpu_is_mx51()) {
+ freq = clk_get_rate(mxc_usb_clk);
+ if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
+ (freq < 59999000 || freq > 60001000)) {
+ dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
+ ret = -EINVAL;
+ goto eclkrate;
+ }
}
ret = clk_enable(mxc_usb_clk);
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index fa3d142..08a9a62 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -489,7 +489,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
mult = (unsigned char)(1 + ((max >> 11) & 0x03));
- max = max & 0x8ff; /* bit 0~10 */
+ max = max & 0x7ff; /* bit 0~10 */
/* 3 transactions at most */
if (mult > 3)
goto en_done;
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
new file mode 100644
index 0000000..4b0e4a0
--- /dev/null
+++ b/drivers/usb/gadget/g_ffs.c
@@ -0,0 +1,426 @@
+#include <linux/module.h>
+#include <linux/utsname.h>
+
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
+# if defined USB_ETH_RNDIS
+# undef USB_ETH_RNDIS
+# endif
+# ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+# define USB_ETH_RNDIS y
+# endif
+
+# include "f_ecm.c"
+# include "f_subset.c"
+# ifdef USB_ETH_RNDIS
+# include "f_rndis.c"
+# include "rndis.c"
+# endif
+# include "u_ether.c"
+
+static u8 gfs_hostaddr[ETH_ALEN];
+#else
+# if !defined CONFIG_USB_FUNCTIONFS_GENERIC
+# define CONFIG_USB_FUNCTIONFS_GENERIC
+# endif
+# define gether_cleanup() do { } while (0)
+# define gether_setup(gadget, hostaddr) ((int)0)
+#endif
+
+#include "f_fs.c"
+
+
+#define DRIVER_NAME "g_ffs"
+#define DRIVER_DESC "USB Function Filesystem"
+#define DRIVER_VERSION "24 Aug 2004"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Michal Nazarewicz");
+MODULE_LICENSE("GPL");
+
+
+static unsigned short gfs_vendor_id = 0x0525; /* XXX NetChip */
+static unsigned short gfs_product_id = 0xa4ac; /* XXX */
+
+static struct usb_device_descriptor gfs_dev_desc = {
+ .bLength = sizeof gfs_dev_desc,
+ .bDescriptorType = USB_DT_DEVICE,
+
+ .bcdUSB = cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_PER_INTERFACE,
+
+ /* Vendor and product id can be overridden by module parameters. */
+ /* .idVendor = cpu_to_le16(gfs_vendor_id), */
+ /* .idProduct = cpu_to_le16(gfs_product_id), */
+ /* .bcdDevice = f(hardware) */
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ /* NO SERIAL NUMBER */
+ .bNumConfigurations = 1,
+};
+
+#define GFS_MODULE_PARAM_DESC(name, field) \
+ MODULE_PARM_DESC(name, "Value of the " #field " field of the device descriptor sent to the host. Takes effect only prior to the user-space driver registering to the FunctionFS.")
+
+module_param_named(usb_class, gfs_dev_desc.bDeviceClass, byte, 0644);
+GFS_MODULE_PARAM_DESC(usb_class, bDeviceClass);
+module_param_named(usb_subclass, gfs_dev_desc.bDeviceSubClass, byte, 0644);
+GFS_MODULE_PARAM_DESC(usb_subclass, bDeviceSubClass);
+module_param_named(usb_protocol, gfs_dev_desc.bDeviceProtocol, byte, 0644);
+GFS_MODULE_PARAM_DESC(usb_protocol, bDeviceProtocol);
+module_param_named(usb_vendor, gfs_vendor_id, ushort, 0644);
+GFS_MODULE_PARAM_DESC(usb_vendor, idVendor);
+module_param_named(usb_product, gfs_product_id, ushort, 0644);
+GFS_MODULE_PARAM_DESC(usb_product, idProduct);
+
+
+
+static const struct usb_descriptor_header *gfs_otg_desc[] = {
+ (const struct usb_descriptor_header *)
+ &(const struct usb_otg_descriptor) {
+ .bLength = sizeof(struct usb_otg_descriptor),
+ .bDescriptorType = USB_DT_OTG,
+
+ /* REVISIT SRP-only hardware is possible, although
+ * it would not be called "OTG" ... */
+ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
+ },
+
+ NULL
+};
+
+/* string IDs are assigned dynamically */
+
+enum {
+ GFS_STRING_MANUFACTURER_IDX,
+ GFS_STRING_PRODUCT_IDX,
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+ GFS_STRING_RNDIS_CONFIG_IDX,
+#endif
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+ GFS_STRING_ECM_CONFIG_IDX,
+#endif
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+ GFS_STRING_GENERIC_CONFIG_IDX,
+#endif
+};
+
+static char gfs_manufacturer[50];
+static const char gfs_driver_desc[] = DRIVER_DESC;
+static const char gfs_short_name[] = DRIVER_NAME;
+
+static struct usb_string gfs_strings[] = {
+ [GFS_STRING_MANUFACTURER_IDX].s = gfs_manufacturer,
+ [GFS_STRING_PRODUCT_IDX].s = gfs_driver_desc,
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+ [GFS_STRING_RNDIS_CONFIG_IDX].s = "FunctionFS + RNDIS",
+#endif
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+ [GFS_STRING_ECM_CONFIG_IDX].s = "FunctionFS + ECM",
+#endif
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+ [GFS_STRING_GENERIC_CONFIG_IDX].s = "FunctionFS",
+#endif
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings *gfs_dev_strings[] = {
+ &(struct usb_gadget_strings) {
+ .language = 0x0409, /* en-us */
+ .strings = gfs_strings,
+ },
+ NULL,
+};
+
+
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+static int gfs_do_rndis_config(struct usb_configuration *c);
+
+static struct usb_configuration gfs_rndis_config_driver = {
+ .label = "FunctionFS + RNDIS",
+ .bind = gfs_do_rndis_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+# define gfs_add_rndis_config(cdev) \
+ usb_add_config(cdev, &gfs_rndis_config_driver)
+#else
+# define gfs_add_rndis_config(cdev) 0
+#endif
+
+
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+static int gfs_do_ecm_config(struct usb_configuration *c);
+
+static struct usb_configuration gfs_ecm_config_driver = {
+ .label = "FunctionFS + ECM",
+ .bind = gfs_do_ecm_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+# define gfs_add_ecm_config(cdev) \
+ usb_add_config(cdev, &gfs_ecm_config_driver)
+#else
+# define gfs_add_ecm_config(cdev) 0
+#endif
+
+
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+static int gfs_do_generic_config(struct usb_configuration *c);
+
+static struct usb_configuration gfs_generic_config_driver = {
+ .label = "FunctionFS",
+ .bind = gfs_do_generic_config,
+ .bConfigurationValue = 2,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+# define gfs_add_generic_config(cdev) \
+ usb_add_config(cdev, &gfs_generic_config_driver)
+#else
+# define gfs_add_generic_config(cdev) 0
+#endif
+
+
+static int gfs_bind(struct usb_composite_dev *cdev);
+static int gfs_unbind(struct usb_composite_dev *cdev);
+
+static struct usb_composite_driver gfs_driver = {
+ .name = gfs_short_name,
+ .dev = &gfs_dev_desc,
+ .strings = gfs_dev_strings,
+ .bind = gfs_bind,
+ .unbind = gfs_unbind,
+};
+
+
+static struct ffs_data *gfs_ffs_data;
+static unsigned long gfs_registered;
+
+
+static int gfs_init(void)
+{
+ ENTER();
+
+ return functionfs_init();
+}
+module_init(gfs_init);
+
+static void gfs_exit(void)
+{
+ ENTER();
+
+ if (test_and_clear_bit(0, &gfs_registered))
+ usb_composite_unregister(&gfs_driver);
+
+ functionfs_cleanup();
+}
+module_exit(gfs_exit);
+
+
+static int functionfs_ready_callback(struct ffs_data *ffs)
+{
+ int ret;
+
+ ENTER();
+
+ if (WARN_ON(test_and_set_bit(0, &gfs_registered)))
+ return -EBUSY;
+
+ gfs_ffs_data = ffs;
+ ret = usb_composite_register(&gfs_driver);
+ if (unlikely(ret < 0))
+ clear_bit(0, &gfs_registered);
+ return ret;
+}
+
+static void functionfs_closed_callback(struct ffs_data *ffs)
+{
+ ENTER();
+
+ if (test_and_clear_bit(0, &gfs_registered))
+ usb_composite_unregister(&gfs_driver);
+}
+
+
+static int functionfs_check_dev_callback(const char *dev_name)
+{
+ return 0;
+}
+
+
+
+static int gfs_bind(struct usb_composite_dev *cdev)
+{
+ int ret;
+
+ ENTER();
+
+ if (WARN_ON(!gfs_ffs_data))
+ return -ENODEV;
+
+ ret = gether_setup(cdev->gadget, gfs_hostaddr);
+ if (unlikely(ret < 0))
+ goto error_quick;
+
+ gfs_dev_desc.idVendor = cpu_to_le16(gfs_vendor_id);
+ gfs_dev_desc.idProduct = cpu_to_le16(gfs_product_id);
+
+ snprintf(gfs_manufacturer, sizeof gfs_manufacturer, "%s %s with %s",
+ init_utsname()->sysname, init_utsname()->release,
+ cdev->gadget->name);
+ ret = usb_string_id(cdev);
+ if (unlikely(ret < 0))
+ goto error;
+ gfs_strings[GFS_STRING_MANUFACTURER_IDX].id = ret;
+ gfs_dev_desc.iManufacturer = ret;
+
+ ret = usb_string_id(cdev);
+ if (unlikely(ret < 0))
+ goto error;
+ gfs_strings[GFS_STRING_PRODUCT_IDX].id = ret;
+ gfs_dev_desc.iProduct = ret;
+
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+ ret = usb_string_id(cdev);
+ if (unlikely(ret < 0))
+ goto error;
+ gfs_strings[GFS_STRING_RNDIS_CONFIG_IDX].id = ret;
+ gfs_rndis_config_driver.iConfiguration = ret;
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+ ret = usb_string_id(cdev);
+ if (unlikely(ret < 0))
+ goto error;
+ gfs_strings[GFS_STRING_ECM_CONFIG_IDX].id = ret;
+ gfs_ecm_config_driver.iConfiguration = ret;
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+ ret = usb_string_id(cdev);
+ if (unlikely(ret < 0))
+ goto error;
+ gfs_strings[GFS_STRING_GENERIC_CONFIG_IDX].id = ret;
+ gfs_generic_config_driver.iConfiguration = ret;
+#endif
+
+ ret = functionfs_bind(gfs_ffs_data, cdev);
+ if (unlikely(ret < 0))
+ goto error;
+
+ ret = gfs_add_rndis_config(cdev);
+ if (unlikely(ret < 0))
+ goto error_unbind;
+
+ ret = gfs_add_ecm_config(cdev);
+ if (unlikely(ret < 0))
+ goto error_unbind;
+
+ ret = gfs_add_generic_config(cdev);
+ if (unlikely(ret < 0))
+ goto error_unbind;
+
+ return 0;
+
+error_unbind:
+ functionfs_unbind(gfs_ffs_data);
+error:
+ gether_cleanup();
+error_quick:
+ gfs_ffs_data = NULL;
+ return ret;
+}
+
+static int gfs_unbind(struct usb_composite_dev *cdev)
+{
+ ENTER();
+
+ /* We may have been called in an error recovery frem
+ * composite_bind() after gfs_unbind() failure so we need to
+ * check if gfs_ffs_data is not NULL since gfs_bind() handles
+ * all error recovery itself. I'd rather we werent called
+ * from composite on orror recovery, but what you're gonna
+ * do...? */
+
+ if (gfs_ffs_data) {
+ gether_cleanup();
+ functionfs_unbind(gfs_ffs_data);
+ gfs_ffs_data = NULL;
+ }
+
+ return 0;
+}
+
+
+static int __gfs_do_config(struct usb_configuration *c,
+ int (*eth)(struct usb_configuration *c, u8 *ethaddr),
+ u8 *ethaddr)
+{
+ int ret;
+
+ if (WARN_ON(!gfs_ffs_data))
+ return -ENODEV;
+
+ if (gadget_is_otg(c->cdev->gadget)) {
+ c->descriptors = gfs_otg_desc;
+ c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+ if (eth) {
+ ret = eth(c, ethaddr);
+ if (unlikely(ret < 0))
+ return ret;
+ }
+
+ ret = functionfs_add(c->cdev, c, gfs_ffs_data);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 0;
+}
+
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+static int gfs_do_rndis_config(struct usb_configuration *c)
+{
+ ENTER();
+
+ return __gfs_do_config(c, rndis_bind_config, gfs_hostaddr);
+}
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+static int gfs_do_ecm_config(struct usb_configuration *c)
+{
+ ENTER();
+
+ return __gfs_do_config(c,
+ can_support_ecm(c->cdev->gadget)
+ ? ecm_bind_config : geth_bind_config,
+ gfs_hostaddr);
+}
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+static int gfs_do_generic_config(struct usb_configuration *c)
+{
+ ENTER();
+
+ return __gfs_do_config(c, NULL, NULL);
+}
+#endif
diff --git a/drivers/usb/gadget/hid.c b/drivers/usb/gadget/hid.c
new file mode 100644
index 0000000..7757226
--- /dev/null
+++ b/drivers/usb/gadget/hid.c
@@ -0,0 +1,298 @@
+/*
+ * hid.c -- HID Composite driver
+ *
+ * Based on multi.c
+ *
+ * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+
+#define DRIVER_DESC "HID Gadget"
+#define DRIVER_VERSION "2010/03/16"
+
+/*-------------------------------------------------------------------------*/
+
+#define HIDG_VENDOR_NUM 0x0525 /* XXX NetChip */
+#define HIDG_PRODUCT_NUM 0xa4ac /* Linux-USB HID gadget */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_hid.c"
+
+
+struct hidg_func_node {
+ struct list_head node;
+ struct hidg_func_descriptor *func;
+};
+
+static LIST_HEAD(hidg_func_list);
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof device_desc,
+ .bDescriptorType = USB_DT_DEVICE,
+
+ .bcdUSB = cpu_to_le16(0x0200),
+
+ /* .bDeviceClass = USB_CLASS_COMM, */
+ /* .bDeviceSubClass = 0, */
+ /* .bDeviceProtocol = 0, */
+ .bDeviceClass = 0xEF,
+ .bDeviceSubClass = 2,
+ .bDeviceProtocol = 1,
+ /* .bMaxPacketSize0 = f(hardware) */
+
+ /* Vendor and product id can be overridden by module parameters. */
+ .idVendor = cpu_to_le16(HIDG_VENDOR_NUM),
+ .idProduct = cpu_to_le16(HIDG_PRODUCT_NUM),
+ /* .bcdDevice = f(hardware) */
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ /* NO SERIAL NUMBER */
+ .bNumConfigurations = 1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+ .bLength = sizeof otg_descriptor,
+ .bDescriptorType = USB_DT_OTG,
+
+ /* REVISIT SRP-only hardware is possible, although
+ * it would not be called "OTG" ...
+ */
+ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+ (struct usb_descriptor_header *) &otg_descriptor,
+ NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer,
+ [STRING_PRODUCT_IDX].s = DRIVER_DESC,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+
+
+/****************************** Configurations ******************************/
+
+static int __init do_config(struct usb_configuration *c)
+{
+ struct hidg_func_node *e;
+ int func = 0, status = 0;
+
+ if (gadget_is_otg(c->cdev->gadget)) {
+ c->descriptors = otg_desc;
+ c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+ list_for_each_entry(e, &hidg_func_list, node) {
+ status = hidg_bind_config(c, e->func, func++);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+static struct usb_configuration config_driver = {
+ .label = "HID Gadget",
+ .bind = do_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+
+/****************************** Gadget Bind ******************************/
+
+static int __init hid_bind(struct usb_composite_dev *cdev)
+{
+ struct usb_gadget *gadget = cdev->gadget;
+ struct list_head *tmp;
+ int status, gcnum, funcs = 0;
+
+ list_for_each(tmp, &hidg_func_list)
+ funcs++;
+
+ if (!funcs)
+ return -ENODEV;
+
+ /* set up HID */
+ status = ghid_setup(cdev->gadget, funcs);
+ if (status < 0)
+ return status;
+
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+ else
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099);
+
+
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+
+ /* device descriptor strings: manufacturer, product */
+ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+ init_utsname()->sysname, init_utsname()->release,
+ gadget->name);
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+ device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+ device_desc.iProduct = status;
+
+ /* register our configuration */
+ status = usb_add_config(cdev, &config_driver);
+ if (status < 0)
+ return status;
+
+ dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+
+ return 0;
+}
+
+static int __exit hid_unbind(struct usb_composite_dev *cdev)
+{
+ ghid_cleanup();
+ return 0;
+}
+
+static int __init hidg_plat_driver_probe(struct platform_device *pdev)
+{
+ struct hidg_func_descriptor *func = pdev->dev.platform_data;
+ struct hidg_func_node *entry;
+
+ if (!func) {
+ dev_err(&pdev->dev, "Platform data missing\n");
+ return -ENODEV;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->func = func;
+ list_add_tail(&entry->node, &hidg_func_list);
+
+ return 0;
+}
+
+static int __devexit hidg_plat_driver_remove(struct platform_device *pdev)
+{
+ struct hidg_func_node *e, *n;
+
+ list_for_each_entry_safe(e, n, &hidg_func_list, node) {
+ list_del(&e->node);
+ kfree(e);
+ }
+
+ return 0;
+}
+
+
+/****************************** Some noise ******************************/
+
+
+static struct usb_composite_driver hidg_driver = {
+ .name = "g_hid",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .bind = hid_bind,
+ .unbind = __exit_p(hid_unbind),
+};
+
+static struct platform_driver hidg_plat_driver = {
+ .remove = __devexit_p(hidg_plat_driver_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hidg",
+ },
+};
+
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Fabien Chouteau, Peter Korsgaard");
+MODULE_LICENSE("GPL");
+
+static int __init hidg_init(void)
+{
+ int status;
+
+ status = platform_driver_probe(&hidg_plat_driver,
+ hidg_plat_driver_probe);
+ if (status < 0)
+ return status;
+
+ status = usb_composite_register(&hidg_driver);
+ if (status < 0)
+ platform_driver_unregister(&hidg_plat_driver);
+
+ return status;
+}
+module_init(hidg_init);
+
+static void __exit hidg_cleanup(void)
+{
+ platform_driver_unregister(&hidg_plat_driver);
+ usb_composite_unregister(&hidg_driver);
+}
+module_exit(hidg_cleanup);
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index ff61e48..cd16231 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -360,7 +360,7 @@ struct pxa_ep {
* Specific pxa endpoint data, needed for hardware initialization
*/
unsigned dir_in:1;
- unsigned addr:3;
+ unsigned addr:4;
unsigned config:2;
unsigned interface:3;
unsigned alternate:3;
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 07f4178..1da755a 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -715,7 +715,7 @@ static u8 __init nibble(unsigned char c)
return 0;
}
-static int __init get_ether_addr(const char *str, u8 *dev_addr)
+static int get_ether_addr(const char *str, u8 *dev_addr)
{
if (str) {
unsigned i;
@@ -764,7 +764,7 @@ static struct device_type gadget_type = {
*
* Returns negative errno, or zero on success
*/
-int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
{
struct eth_dev *dev;
struct net_device *net;
diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
new file mode 100644
index 0000000..0a705e6
--- /dev/null
+++ b/drivers/usb/gadget/uvc.h
@@ -0,0 +1,241 @@
+/*
+ * uvc_gadget.h -- USB Video Class Gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef _UVC_GADGET_H_
+#define _UVC_GADGET_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+
+#define UVC_EVENT_FIRST (V4L2_EVENT_PRIVATE_START + 0)
+#define UVC_EVENT_CONNECT (V4L2_EVENT_PRIVATE_START + 0)
+#define UVC_EVENT_DISCONNECT (V4L2_EVENT_PRIVATE_START + 1)
+#define UVC_EVENT_STREAMON (V4L2_EVENT_PRIVATE_START + 2)
+#define UVC_EVENT_STREAMOFF (V4L2_EVENT_PRIVATE_START + 3)
+#define UVC_EVENT_SETUP (V4L2_EVENT_PRIVATE_START + 4)
+#define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5)
+#define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5)
+
+struct uvc_request_data
+{
+ unsigned int length;
+ __u8 data[60];
+};
+
+struct uvc_event
+{
+ union {
+ enum usb_device_speed speed;
+ struct usb_ctrlrequest req;
+ struct uvc_request_data data;
+ };
+};
+
+#define UVCIOC_SEND_RESPONSE _IOW('U', 1, struct uvc_request_data)
+
+#define UVC_INTF_CONTROL 0
+#define UVC_INTF_STREAMING 1
+
+/* ------------------------------------------------------------------------
+ * UVC constants & structures
+ */
+
+/* Values for bmHeaderInfo (Video and Still Image Payload Headers, 2.4.3.3) */
+#define UVC_STREAM_EOH (1 << 7)
+#define UVC_STREAM_ERR (1 << 6)
+#define UVC_STREAM_STI (1 << 5)
+#define UVC_STREAM_RES (1 << 4)
+#define UVC_STREAM_SCR (1 << 3)
+#define UVC_STREAM_PTS (1 << 2)
+#define UVC_STREAM_EOF (1 << 1)
+#define UVC_STREAM_FID (1 << 0)
+
+struct uvc_streaming_control {
+ __u16 bmHint;
+ __u8 bFormatIndex;
+ __u8 bFrameIndex;
+ __u32 dwFrameInterval;
+ __u16 wKeyFrameRate;
+ __u16 wPFrameRate;
+ __u16 wCompQuality;
+ __u16 wCompWindowSize;
+ __u16 wDelay;
+ __u32 dwMaxVideoFrameSize;
+ __u32 dwMaxPayloadTransferSize;
+ __u32 dwClockFrequency;
+ __u8 bmFramingInfo;
+ __u8 bPreferedVersion;
+ __u8 bMinVersion;
+ __u8 bMaxVersion;
+} __attribute__((__packed__));
+
+/* ------------------------------------------------------------------------
+ * Debugging, printing and logging
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/usb.h> /* For usb_endpoint_* */
+#include <linux/usb/gadget.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fh.h>
+
+#include "uvc_queue.h"
+
+#define UVC_TRACE_PROBE (1 << 0)
+#define UVC_TRACE_DESCR (1 << 1)
+#define UVC_TRACE_CONTROL (1 << 2)
+#define UVC_TRACE_FORMAT (1 << 3)
+#define UVC_TRACE_CAPTURE (1 << 4)
+#define UVC_TRACE_CALLS (1 << 5)
+#define UVC_TRACE_IOCTL (1 << 6)
+#define UVC_TRACE_FRAME (1 << 7)
+#define UVC_TRACE_SUSPEND (1 << 8)
+#define UVC_TRACE_STATUS (1 << 9)
+
+#define UVC_WARN_MINMAX 0
+#define UVC_WARN_PROBE_DEF 1
+
+extern unsigned int uvc_trace_param;
+
+#define uvc_trace(flag, msg...) \
+ do { \
+ if (uvc_trace_param & flag) \
+ printk(KERN_DEBUG "uvcvideo: " msg); \
+ } while (0)
+
+#define uvc_warn_once(dev, warn, msg...) \
+ do { \
+ if (!test_and_set_bit(warn, &dev->warnings)) \
+ printk(KERN_INFO "uvcvideo: " msg); \
+ } while (0)
+
+#define uvc_printk(level, msg...) \
+ printk(level "uvcvideo: " msg)
+
+/* ------------------------------------------------------------------------
+ * Driver specific constants
+ */
+
+#define DRIVER_VERSION "0.1.0"
+#define DRIVER_VERSION_NUMBER KERNEL_VERSION(0, 1, 0)
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#define UVC_NUM_REQUESTS 4
+#define UVC_MAX_REQUEST_SIZE 64
+#define UVC_MAX_EVENTS 4
+
+#define USB_DT_INTERFACE_ASSOCIATION_SIZE 8
+#define USB_CLASS_MISC 0xef
+
+/* ------------------------------------------------------------------------
+ * Structures
+ */
+
+struct uvc_video
+{
+ struct usb_ep *ep;
+
+ /* Frame parameters */
+ u8 bpp;
+ u32 fcc;
+ unsigned int width;
+ unsigned int height;
+ unsigned int imagesize;
+
+ /* Requests */
+ unsigned int req_size;
+ struct usb_request *req[UVC_NUM_REQUESTS];
+ __u8 *req_buffer[UVC_NUM_REQUESTS];
+ struct list_head req_free;
+ spinlock_t req_lock;
+
+ void (*encode) (struct usb_request *req, struct uvc_video *video,
+ struct uvc_buffer *buf);
+
+ /* Context data used by the completion handler */
+ __u32 payload_size;
+ __u32 max_payload_size;
+
+ struct uvc_video_queue queue;
+ unsigned int fid;
+};
+
+enum uvc_state
+{
+ UVC_STATE_DISCONNECTED,
+ UVC_STATE_CONNECTED,
+ UVC_STATE_STREAMING,
+};
+
+struct uvc_device
+{
+ struct video_device *vdev;
+ enum uvc_state state;
+ struct usb_function func;
+ struct uvc_video video;
+
+ /* Descriptors */
+ struct {
+ const struct uvc_descriptor_header * const *control;
+ const struct uvc_descriptor_header * const *fs_streaming;
+ const struct uvc_descriptor_header * const *hs_streaming;
+ } desc;
+
+ unsigned int control_intf;
+ struct usb_ep *control_ep;
+ struct usb_request *control_req;
+ void *control_buf;
+
+ unsigned int streaming_intf;
+
+ /* Events */
+ unsigned int event_length;
+ unsigned int event_setup_out : 1;
+};
+
+static inline struct uvc_device *to_uvc(struct usb_function *f)
+{
+ return container_of(f, struct uvc_device, func);
+}
+
+struct uvc_file_handle
+{
+ struct v4l2_fh vfh;
+ struct uvc_video *device;
+};
+
+#define to_uvc_file_handle(handle) \
+ container_of(handle, struct uvc_file_handle, vfh)
+
+extern struct v4l2_file_operations uvc_v4l2_fops;
+
+/* ------------------------------------------------------------------------
+ * Functions
+ */
+
+extern int uvc_video_enable(struct uvc_video *video, int enable);
+extern int uvc_video_init(struct uvc_video *video);
+extern int uvc_video_pump(struct uvc_video *video);
+
+extern void uvc_endpoint_stream(struct uvc_device *dev);
+
+extern void uvc_function_connect(struct uvc_device *uvc);
+extern void uvc_function_disconnect(struct uvc_device *uvc);
+
+#endif /* __KERNEL__ */
+
+#endif /* _UVC_GADGET_H_ */
+
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
new file mode 100644
index 0000000..4389199
--- /dev/null
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -0,0 +1,583 @@
+/*
+ * uvc_queue.c -- USB Video Class driver - Buffers management
+ *
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/videodev2.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+
+#include "uvc.h"
+
+/* ------------------------------------------------------------------------
+ * Video buffers queue management.
+ *
+ * Video queues is initialized by uvc_queue_init(). The function performs
+ * basic initialization of the uvc_video_queue struct and never fails.
+ *
+ * Video buffer allocation and freeing are performed by uvc_alloc_buffers and
+ * uvc_free_buffers respectively. The former acquires the video queue lock,
+ * while the later must be called with the lock held (so that allocation can
+ * free previously allocated buffers). Trying to free buffers that are mapped
+ * to user space will return -EBUSY.
+ *
+ * Video buffers are managed using two queues. However, unlike most USB video
+ * drivers that use an in queue and an out queue, we use a main queue to hold
+ * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to
+ * hold empty buffers. This design (copied from video-buf) minimizes locking
+ * in interrupt, as only one queue is shared between interrupt and user
+ * contexts.
+ *
+ * Use cases
+ * ---------
+ *
+ * Unless stated otherwise, all operations that modify the irq buffers queue
+ * are protected by the irq spinlock.
+ *
+ * 1. The user queues the buffers, starts streaming and dequeues a buffer.
+ *
+ * The buffers are added to the main and irq queues. Both operations are
+ * protected by the queue lock, and the later is protected by the irq
+ * spinlock as well.
+ *
+ * The completion handler fetches a buffer from the irq queue and fills it
+ * with video data. If no buffer is available (irq queue empty), the handler
+ * returns immediately.
+ *
+ * When the buffer is full, the completion handler removes it from the irq
+ * queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue.
+ * At that point, any process waiting on the buffer will be woken up. If a
+ * process tries to dequeue a buffer after it has been marked ready, the
+ * dequeing will succeed immediately.
+ *
+ * 2. Buffers are queued, user is waiting on a buffer and the device gets
+ * disconnected.
+ *
+ * When the device is disconnected, the kernel calls the completion handler
+ * with an appropriate status code. The handler marks all buffers in the
+ * irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so
+ * that any process waiting on a buffer gets woken up.
+ *
+ * Waking up up the first buffer on the irq list is not enough, as the
+ * process waiting on the buffer might restart the dequeue operation
+ * immediately.
+ *
+ */
+
+void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
+{
+ mutex_init(&queue->mutex);
+ spin_lock_init(&queue->irqlock);
+ INIT_LIST_HEAD(&queue->mainqueue);
+ INIT_LIST_HEAD(&queue->irqqueue);
+ queue->type = type;
+}
+
+/*
+ * Allocate the video buffers.
+ *
+ * Pages are reserved to make sure they will not be swapped, as they will be
+ * filled in the URB completion handler.
+ *
+ * Buffers will be individually mapped, so they must all be page aligned.
+ */
+int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
+ unsigned int buflength)
+{
+ unsigned int bufsize = PAGE_ALIGN(buflength);
+ unsigned int i;
+ void *mem = NULL;
+ int ret;
+
+ if (nbuffers > UVC_MAX_VIDEO_BUFFERS)
+ nbuffers = UVC_MAX_VIDEO_BUFFERS;
+
+ mutex_lock(&queue->mutex);
+
+ if ((ret = uvc_free_buffers(queue)) < 0)
+ goto done;
+
+ /* Bail out if no buffers should be allocated. */
+ if (nbuffers == 0)
+ goto done;
+
+ /* Decrement the number of buffers until allocation succeeds. */
+ for (; nbuffers > 0; --nbuffers) {
+ mem = vmalloc_32(nbuffers * bufsize);
+ if (mem != NULL)
+ break;
+ }
+
+ if (mem == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < nbuffers; ++i) {
+ memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);
+ queue->buffer[i].buf.index = i;
+ queue->buffer[i].buf.m.offset = i * bufsize;
+ queue->buffer[i].buf.length = buflength;
+ queue->buffer[i].buf.type = queue->type;
+ queue->buffer[i].buf.sequence = 0;
+ queue->buffer[i].buf.field = V4L2_FIELD_NONE;
+ queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
+ queue->buffer[i].buf.flags = 0;
+ init_waitqueue_head(&queue->buffer[i].wait);
+ }
+
+ queue->mem = mem;
+ queue->count = nbuffers;
+ queue->buf_size = bufsize;
+ ret = nbuffers;
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+/*
+ * Free the video buffers.
+ *
+ * This function must be called with the queue lock held.
+ */
+int uvc_free_buffers(struct uvc_video_queue *queue)
+{
+ unsigned int i;
+
+ for (i = 0; i < queue->count; ++i) {
+ if (queue->buffer[i].vma_use_count != 0)
+ return -EBUSY;
+ }
+
+ if (queue->count) {
+ vfree(queue->mem);
+ queue->count = 0;
+ }
+
+ return 0;
+}
+
+static void __uvc_query_buffer(struct uvc_buffer *buf,
+ struct v4l2_buffer *v4l2_buf)
+{
+ memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);
+
+ if (buf->vma_use_count)
+ v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;
+
+ switch (buf->state) {
+ case UVC_BUF_STATE_ERROR:
+ case UVC_BUF_STATE_DONE:
+ v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;
+ break;
+ case UVC_BUF_STATE_QUEUED:
+ case UVC_BUF_STATE_ACTIVE:
+ v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
+ break;
+ case UVC_BUF_STATE_IDLE:
+ default:
+ break;
+ }
+}
+
+int uvc_query_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf)
+{
+ int ret = 0;
+
+ mutex_lock(&queue->mutex);
+ if (v4l2_buf->index >= queue->count) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ __uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+/*
+ * Queue a video buffer. Attempting to queue a buffer that has already been
+ * queued will return -EINVAL.
+ */
+int uvc_queue_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf)
+{
+ struct uvc_buffer *buf;
+ unsigned long flags;
+ int ret = 0;
+
+ uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);
+
+ if (v4l2_buf->type != queue->type ||
+ v4l2_buf->memory != V4L2_MEMORY_MMAP) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
+ "and/or memory (%u).\n", v4l2_buf->type,
+ v4l2_buf->memory);
+ return -EINVAL;
+ }
+
+ mutex_lock(&queue->mutex);
+ if (v4l2_buf->index >= queue->count) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ buf = &queue->buffer[v4l2_buf->index];
+ if (buf->state != UVC_BUF_STATE_IDLE) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
+ "(%u).\n", buf->state);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ v4l2_buf->bytesused > buf->buf.length) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ buf->buf.bytesused = 0;
+ else
+ buf->buf.bytesused = v4l2_buf->bytesused;
+
+ spin_lock_irqsave(&queue->irqlock, flags);
+ if (queue->flags & UVC_QUEUE_DISCONNECTED) {
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+ ret = -ENODEV;
+ goto done;
+ }
+ buf->state = UVC_BUF_STATE_QUEUED;
+
+ ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
+ queue->flags &= ~UVC_QUEUE_PAUSED;
+
+ list_add_tail(&buf->stream, &queue->mainqueue);
+ list_add_tail(&buf->queue, &queue->irqqueue);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
+{
+ if (nonblocking) {
+ return (buf->state != UVC_BUF_STATE_QUEUED &&
+ buf->state != UVC_BUF_STATE_ACTIVE)
+ ? 0 : -EAGAIN;
+ }
+
+ return wait_event_interruptible(buf->wait,
+ buf->state != UVC_BUF_STATE_QUEUED &&
+ buf->state != UVC_BUF_STATE_ACTIVE);
+}
+
+/*
+ * Dequeue a video buffer. If nonblocking is false, block until a buffer is
+ * available.
+ */
+int uvc_dequeue_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf, int nonblocking)
+{
+ struct uvc_buffer *buf;
+ int ret = 0;
+
+ if (v4l2_buf->type != queue->type ||
+ v4l2_buf->memory != V4L2_MEMORY_MMAP) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
+ "and/or memory (%u).\n", v4l2_buf->type,
+ v4l2_buf->memory);
+ return -EINVAL;
+ }
+
+ mutex_lock(&queue->mutex);
+ if (list_empty(&queue->mainqueue)) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
+ if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0)
+ goto done;
+
+ uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n",
+ buf->buf.index, buf->state, buf->buf.bytesused);
+
+ switch (buf->state) {
+ case UVC_BUF_STATE_ERROR:
+ uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data "
+ "(transmission error).\n");
+ ret = -EIO;
+ case UVC_BUF_STATE_DONE:
+ buf->state = UVC_BUF_STATE_IDLE;
+ break;
+
+ case UVC_BUF_STATE_IDLE:
+ case UVC_BUF_STATE_QUEUED:
+ case UVC_BUF_STATE_ACTIVE:
+ default:
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
+ "(driver bug?).\n", buf->state);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ list_del(&buf->stream);
+ __uvc_query_buffer(buf, v4l2_buf);
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+/*
+ * Poll the video queue.
+ *
+ * This function implements video queue polling and is intended to be used by
+ * the device poll handler.
+ */
+unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
+ poll_table *wait)
+{
+ struct uvc_buffer *buf;
+ unsigned int mask = 0;
+
+ mutex_lock(&queue->mutex);
+ if (list_empty(&queue->mainqueue))
+ goto done;
+
+ buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
+
+ poll_wait(file, &buf->wait, wait);
+ if (buf->state == UVC_BUF_STATE_DONE ||
+ buf->state == UVC_BUF_STATE_ERROR)
+ mask |= POLLOUT | POLLWRNORM;
+
+done:
+ mutex_unlock(&queue->mutex);
+ return mask;
+}
+
+/*
+ * VMA operations.
+ */
+static void uvc_vm_open(struct vm_area_struct *vma)
+{
+ struct uvc_buffer *buffer = vma->vm_private_data;
+ buffer->vma_use_count++;
+}
+
+static void uvc_vm_close(struct vm_area_struct *vma)
+{
+ struct uvc_buffer *buffer = vma->vm_private_data;
+ buffer->vma_use_count--;
+}
+
+static struct vm_operations_struct uvc_vm_ops = {
+ .open = uvc_vm_open,
+ .close = uvc_vm_close,
+};
+
+/*
+ * Memory-map a buffer.
+ *
+ * This function implements video buffer memory mapping and is intended to be
+ * used by the device mmap handler.
+ */
+int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
+{
+ struct uvc_buffer *uninitialized_var(buffer);
+ struct page *page;
+ unsigned long addr, start, size;
+ unsigned int i;
+ int ret = 0;
+
+ start = vma->vm_start;
+ size = vma->vm_end - vma->vm_start;
+
+ mutex_lock(&queue->mutex);
+
+ for (i = 0; i < queue->count; ++i) {
+ buffer = &queue->buffer[i];
+ if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
+ break;
+ }
+
+ if (i == queue->count || size != queue->buf_size) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * VM_IO marks the area as being an mmaped region for I/O to a
+ * device. It also prevents the region from being core dumped.
+ */
+ vma->vm_flags |= VM_IO;
+
+ addr = (unsigned long)queue->mem + buffer->buf.m.offset;
+ while (size > 0) {
+ page = vmalloc_to_page((void *)addr);
+ if ((ret = vm_insert_page(vma, start, page)) < 0)
+ goto done;
+
+ start += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ vma->vm_ops = &uvc_vm_ops;
+ vma->vm_private_data = buffer;
+ uvc_vm_open(vma);
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+/*
+ * Enable or disable the video buffers queue.
+ *
+ * The queue must be enabled before starting video acquisition and must be
+ * disabled after stopping it. This ensures that the video buffers queue
+ * state can be properly initialized before buffers are accessed from the
+ * interrupt handler.
+ *
+ * Enabling the video queue initializes parameters (such as sequence number,
+ * sync pattern, ...). If the queue is already enabled, return -EBUSY.
+ *
+ * Disabling the video queue cancels the queue and removes all buffers from
+ * the main queue.
+ *
+ * This function can't be called from interrupt context. Use
+ * uvc_queue_cancel() instead.
+ */
+int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
+{
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&queue->mutex);
+ if (enable) {
+ if (uvc_queue_streaming(queue)) {
+ ret = -EBUSY;
+ goto done;
+ }
+ queue->sequence = 0;
+ queue->flags |= UVC_QUEUE_STREAMING;
+ queue->buf_used = 0;
+ } else {
+ uvc_queue_cancel(queue, 0);
+ INIT_LIST_HEAD(&queue->mainqueue);
+
+ for (i = 0; i < queue->count; ++i)
+ queue->buffer[i].state = UVC_BUF_STATE_IDLE;
+
+ queue->flags &= ~UVC_QUEUE_STREAMING;
+ }
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+/*
+ * Cancel the video buffers queue.
+ *
+ * Cancelling the queue marks all buffers on the irq queue as erroneous,
+ * wakes them up and removes them from the queue.
+ *
+ * If the disconnect parameter is set, further calls to uvc_queue_buffer will
+ * fail with -ENODEV.
+ *
+ * This function acquires the irq spinlock and can be called from interrupt
+ * context.
+ */
+void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
+{
+ struct uvc_buffer *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->irqlock, flags);
+ while (!list_empty(&queue->irqqueue)) {
+ buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
+ queue);
+ list_del(&buf->queue);
+ buf->state = UVC_BUF_STATE_ERROR;
+ wake_up(&buf->wait);
+ }
+ /* This must be protected by the irqlock spinlock to avoid race
+ * conditions between uvc_queue_buffer and the disconnection event that
+ * could result in an interruptible wait in uvc_dequeue_buffer. Do not
+ * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
+ * state outside the queue code.
+ */
+ if (disconnect)
+ queue->flags |= UVC_QUEUE_DISCONNECTED;
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+}
+
+struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
+ struct uvc_buffer *buf)
+{
+ struct uvc_buffer *nextbuf;
+ unsigned long flags;
+
+ if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
+ buf->buf.length != buf->buf.bytesused) {
+ buf->state = UVC_BUF_STATE_QUEUED;
+ buf->buf.bytesused = 0;
+ return buf;
+ }
+
+ spin_lock_irqsave(&queue->irqlock, flags);
+ list_del(&buf->queue);
+ if (!list_empty(&queue->irqqueue))
+ nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
+ queue);
+ else
+ nextbuf = NULL;
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
+ buf->buf.sequence = queue->sequence++;
+ do_gettimeofday(&buf->buf.timestamp);
+
+ wake_up(&buf->wait);
+ return nextbuf;
+}
+
+struct uvc_buffer *uvc_queue_head(struct uvc_video_queue *queue)
+{
+ struct uvc_buffer *buf = NULL;
+
+ if (!list_empty(&queue->irqqueue))
+ buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
+ queue);
+ else
+ queue->flags |= UVC_QUEUE_PAUSED;
+
+ return buf;
+}
+
diff --git a/drivers/usb/gadget/uvc_queue.h b/drivers/usb/gadget/uvc_queue.h
new file mode 100644
index 0000000..7f5a33f
--- /dev/null
+++ b/drivers/usb/gadget/uvc_queue.h
@@ -0,0 +1,89 @@
+#ifndef _UVC_QUEUE_H_
+#define _UVC_QUEUE_H_
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/videodev2.h>
+
+/* Maximum frame size in bytes, for sanity checking. */
+#define UVC_MAX_FRAME_SIZE (16*1024*1024)
+/* Maximum number of video buffers. */
+#define UVC_MAX_VIDEO_BUFFERS 32
+
+/* ------------------------------------------------------------------------
+ * Structures.
+ */
+
+enum uvc_buffer_state {
+ UVC_BUF_STATE_IDLE = 0,
+ UVC_BUF_STATE_QUEUED = 1,
+ UVC_BUF_STATE_ACTIVE = 2,
+ UVC_BUF_STATE_DONE = 3,
+ UVC_BUF_STATE_ERROR = 4,
+};
+
+struct uvc_buffer {
+ unsigned long vma_use_count;
+ struct list_head stream;
+
+ /* Touched by interrupt handler. */
+ struct v4l2_buffer buf;
+ struct list_head queue;
+ wait_queue_head_t wait;
+ enum uvc_buffer_state state;
+};
+
+#define UVC_QUEUE_STREAMING (1 << 0)
+#define UVC_QUEUE_DISCONNECTED (1 << 1)
+#define UVC_QUEUE_DROP_INCOMPLETE (1 << 2)
+#define UVC_QUEUE_PAUSED (1 << 3)
+
+struct uvc_video_queue {
+ enum v4l2_buf_type type;
+
+ void *mem;
+ unsigned int flags;
+ __u32 sequence;
+
+ unsigned int count;
+ unsigned int buf_size;
+ unsigned int buf_used;
+ struct uvc_buffer buffer[UVC_MAX_VIDEO_BUFFERS];
+ struct mutex mutex; /* protects buffers and mainqueue */
+ spinlock_t irqlock; /* protects irqqueue */
+
+ struct list_head mainqueue;
+ struct list_head irqqueue;
+};
+
+extern void uvc_queue_init(struct uvc_video_queue *queue,
+ enum v4l2_buf_type type);
+extern int uvc_alloc_buffers(struct uvc_video_queue *queue,
+ unsigned int nbuffers, unsigned int buflength);
+extern int uvc_free_buffers(struct uvc_video_queue *queue);
+extern int uvc_query_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf);
+extern int uvc_queue_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf);
+extern int uvc_dequeue_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf, int nonblocking);
+extern int uvc_queue_enable(struct uvc_video_queue *queue, int enable);
+extern void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect);
+extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
+ struct uvc_buffer *buf);
+extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue,
+ struct file *file, poll_table *wait);
+extern int uvc_queue_mmap(struct uvc_video_queue *queue,
+ struct vm_area_struct *vma);
+static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
+{
+ return queue->flags & UVC_QUEUE_STREAMING;
+}
+extern struct uvc_buffer *uvc_queue_head(struct uvc_video_queue *queue);
+
+#endif /* __KERNEL__ */
+
+#endif /* _UVC_QUEUE_H_ */
+
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
new file mode 100644
index 0000000..a7989f2
--- /dev/null
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -0,0 +1,374 @@
+/*
+ * uvc_v4l2.c -- USB Video Class Gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "uvc.h"
+#include "uvc_queue.h"
+
+/* --------------------------------------------------------------------------
+ * Requests handling
+ */
+
+static int
+uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
+{
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct usb_request *req = uvc->control_req;
+
+ if (data->length < 0)
+ return usb_ep_set_halt(cdev->gadget->ep0);
+
+ req->length = min(uvc->event_length, data->length);
+ req->zero = data->length < uvc->event_length;
+ req->dma = DMA_ADDR_INVALID;
+
+ memcpy(req->buf, data->data, data->length);
+
+ return usb_ep_queue(cdev->gadget->ep0, req, GFP_KERNEL);
+}
+
+/* --------------------------------------------------------------------------
+ * V4L2
+ */
+
+struct uvc_format
+{
+ u8 bpp;
+ u32 fcc;
+};
+
+static struct uvc_format uvc_formats[] = {
+ { 16, V4L2_PIX_FMT_YUYV },
+ { 0, V4L2_PIX_FMT_MJPEG },
+};
+
+static int
+uvc_v4l2_get_format(struct uvc_video *video, struct v4l2_format *fmt)
+{
+ fmt->fmt.pix.pixelformat = video->fcc;
+ fmt->fmt.pix.width = video->width;
+ fmt->fmt.pix.height = video->height;
+ fmt->fmt.pix.field = V4L2_FIELD_NONE;
+ fmt->fmt.pix.bytesperline = video->bpp * video->width / 8;
+ fmt->fmt.pix.sizeimage = video->imagesize;
+ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->fmt.pix.priv = 0;
+
+ return 0;
+}
+
+static int
+uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
+{
+ struct uvc_format *format;
+ unsigned int imagesize;
+ unsigned int bpl;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(uvc_formats); ++i) {
+ format = &uvc_formats[i];
+ if (format->fcc == fmt->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) {
+ printk(KERN_INFO "Unsupported format 0x%08x.\n",
+ fmt->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ bpl = format->bpp * fmt->fmt.pix.width / 8;
+ imagesize = bpl ? bpl * fmt->fmt.pix.height : fmt->fmt.pix.sizeimage;
+
+ video->fcc = format->fcc;
+ video->bpp = format->bpp;
+ video->width = fmt->fmt.pix.width;
+ video->height = fmt->fmt.pix.height;
+ video->imagesize = imagesize;
+
+ fmt->fmt.pix.field = V4L2_FIELD_NONE;
+ fmt->fmt.pix.bytesperline = bpl;
+ fmt->fmt.pix.sizeimage = imagesize;
+ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->fmt.pix.priv = 0;
+
+ return 0;
+}
+
+static int
+uvc_v4l2_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_file_handle *handle;
+ int ret;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (handle == NULL)
+ return -ENOMEM;
+
+ ret = v4l2_fh_init(&handle->vfh, vdev);
+ if (ret < 0)
+ goto error;
+
+ ret = v4l2_event_init(&handle->vfh);
+ if (ret < 0)
+ goto error;
+
+ ret = v4l2_event_alloc(&handle->vfh, 8);
+ if (ret < 0)
+ goto error;
+
+ v4l2_fh_add(&handle->vfh);
+
+ handle->device = &uvc->video;
+ file->private_data = &handle->vfh;
+
+ uvc_function_connect(uvc);
+ return 0;
+
+error:
+ v4l2_fh_exit(&handle->vfh);
+ return ret;
+}
+
+static int
+uvc_v4l2_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+ struct uvc_video *video = handle->device;
+
+ uvc_function_disconnect(uvc);
+
+ uvc_video_enable(video, 0);
+ mutex_lock(&video->queue.mutex);
+ if (uvc_free_buffers(&video->queue) < 0)
+ printk(KERN_ERR "uvc_v4l2_release: Unable to free "
+ "buffers.\n");
+ mutex_unlock(&video->queue.mutex);
+
+ file->private_data = NULL;
+ v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
+ kfree(handle);
+ return 0;
+}
+
+static long
+uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct uvc_video *video = &uvc->video;
+ int ret = 0;
+
+ switch (cmd) {
+ /* Query capabilities */
+ case VIDIOC_QUERYCAP:
+ {
+ struct v4l2_capability *cap = arg;
+
+ memset(cap, 0, sizeof *cap);
+ strncpy(cap->driver, "g_uvc", sizeof(cap->driver));
+ strncpy(cap->card, cdev->gadget->name, sizeof(cap->card));
+ strncpy(cap->bus_info, dev_name(&cdev->gadget->dev),
+ sizeof cap->bus_info);
+ cap->version = DRIVER_VERSION_NUMBER;
+ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ break;
+ }
+
+ /* Get & Set format */
+ case VIDIOC_G_FMT:
+ {
+ struct v4l2_format *fmt = arg;
+
+ if (fmt->type != video->queue.type)
+ return -EINVAL;
+
+ return uvc_v4l2_get_format(video, fmt);
+ }
+
+ case VIDIOC_S_FMT:
+ {
+ struct v4l2_format *fmt = arg;
+
+ if (fmt->type != video->queue.type)
+ return -EINVAL;
+
+ return uvc_v4l2_set_format(video, fmt);
+ }
+
+ /* Buffers & streaming */
+ case VIDIOC_REQBUFS:
+ {
+ struct v4l2_requestbuffers *rb = arg;
+
+ if (rb->type != video->queue.type ||
+ rb->memory != V4L2_MEMORY_MMAP)
+ return -EINVAL;
+
+ ret = uvc_alloc_buffers(&video->queue, rb->count,
+ video->imagesize);
+ if (ret < 0)
+ return ret;
+
+ rb->count = ret;
+ ret = 0;
+ break;
+ }
+
+ case VIDIOC_QUERYBUF:
+ {
+ struct v4l2_buffer *buf = arg;
+
+ if (buf->type != video->queue.type)
+ return -EINVAL;
+
+ return uvc_query_buffer(&video->queue, buf);
+ }
+
+ case VIDIOC_QBUF:
+ if ((ret = uvc_queue_buffer(&video->queue, arg)) < 0)
+ return ret;
+
+ return uvc_video_pump(video);
+
+ case VIDIOC_DQBUF:
+ return uvc_dequeue_buffer(&video->queue, arg,
+ file->f_flags & O_NONBLOCK);
+
+ case VIDIOC_STREAMON:
+ {
+ int *type = arg;
+
+ if (*type != video->queue.type)
+ return -EINVAL;
+
+ return uvc_video_enable(video, 1);
+ }
+
+ case VIDIOC_STREAMOFF:
+ {
+ int *type = arg;
+
+ if (*type != video->queue.type)
+ return -EINVAL;
+
+ return uvc_video_enable(video, 0);
+ }
+
+ /* Events */
+ case VIDIOC_DQEVENT:
+ {
+ struct v4l2_event *event = arg;
+
+ ret = v4l2_event_dequeue(&handle->vfh, event,
+ file->f_flags & O_NONBLOCK);
+ if (ret == 0 && event->type == UVC_EVENT_SETUP) {
+ struct uvc_event *uvc_event = (void *)&event->u.data;
+
+ /* Tell the complete callback to generate an event for
+ * the next request that will be enqueued by
+ * uvc_event_write.
+ */
+ uvc->event_setup_out =
+ !(uvc_event->req.bRequestType & USB_DIR_IN);
+ uvc->event_length = uvc_event->req.wLength;
+ }
+
+ return ret;
+ }
+
+ case VIDIOC_SUBSCRIBE_EVENT:
+ {
+ struct v4l2_event_subscription *sub = arg;
+
+ if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(&handle->vfh, arg);
+ }
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_event_unsubscribe(&handle->vfh, arg);
+
+ case UVCIOC_SEND_RESPONSE:
+ ret = uvc_send_response(uvc, arg);
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return ret;
+}
+
+static long
+uvc_v4l2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, uvc_v4l2_do_ioctl);
+}
+
+static int
+uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+
+ return uvc_queue_mmap(&uvc->video.queue, vma);
+}
+
+static unsigned int
+uvc_v4l2_poll(struct file *file, poll_table *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+ unsigned int mask = 0;
+
+ poll_wait(file, &handle->vfh.events->wait, wait);
+ if (v4l2_event_pending(&handle->vfh))
+ mask |= POLLPRI;
+
+ mask |= uvc_queue_poll(&uvc->video.queue, file, wait);
+
+ return mask;
+}
+
+struct v4l2_file_operations uvc_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = uvc_v4l2_open,
+ .release = uvc_v4l2_release,
+ .ioctl = uvc_v4l2_ioctl,
+ .mmap = uvc_v4l2_mmap,
+ .poll = uvc_v4l2_poll,
+};
+
diff --git a/drivers/usb/gadget/uvc_video.c b/drivers/usb/gadget/uvc_video.c
new file mode 100644
index 0000000..de8cbc4
--- /dev/null
+++ b/drivers/usb/gadget/uvc_video.c
@@ -0,0 +1,386 @@
+/*
+ * uvc_video.c -- USB Video Class Gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <media/v4l2-dev.h>
+
+#include "uvc.h"
+#include "uvc_queue.h"
+
+/* --------------------------------------------------------------------------
+ * Video codecs
+ */
+
+static int
+uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
+ u8 *data, int len)
+{
+ data[0] = 2;
+ data[1] = UVC_STREAM_EOH | video->fid;
+
+ if (buf->buf.bytesused - video->queue.buf_used <= len - 2)
+ data[1] |= UVC_STREAM_EOF;
+
+ return 2;
+}
+
+static int
+uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
+ u8 *data, int len)
+{
+ struct uvc_video_queue *queue = &video->queue;
+ unsigned int nbytes;
+ void *mem;
+
+ /* Copy video data to the USB buffer. */
+ mem = queue->mem + buf->buf.m.offset + queue->buf_used;
+ nbytes = min((unsigned int)len, buf->buf.bytesused - queue->buf_used);
+
+ memcpy(data, mem, nbytes);
+ queue->buf_used += nbytes;
+
+ return nbytes;
+}
+
+static void
+uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
+ struct uvc_buffer *buf)
+{
+ void *mem = req->buf;
+ int len = video->req_size;
+ int ret;
+
+ /* Add a header at the beginning of the payload. */
+ if (video->payload_size == 0) {
+ ret = uvc_video_encode_header(video, buf, mem, len);
+ video->payload_size += ret;
+ mem += ret;
+ len -= ret;
+ }
+
+ /* Process video data. */
+ len = min((int)(video->max_payload_size - video->payload_size), len);
+ ret = uvc_video_encode_data(video, buf, mem, len);
+
+ video->payload_size += ret;
+ len -= ret;
+
+ req->length = video->req_size - len;
+ req->zero = video->payload_size == video->max_payload_size;
+
+ if (buf->buf.bytesused == video->queue.buf_used) {
+ video->queue.buf_used = 0;
+ buf->state = UVC_BUF_STATE_DONE;
+ uvc_queue_next_buffer(&video->queue, buf);
+ video->fid ^= UVC_STREAM_FID;
+
+ video->payload_size = 0;
+ }
+
+ if (video->payload_size == video->max_payload_size ||
+ buf->buf.bytesused == video->queue.buf_used)
+ video->payload_size = 0;
+}
+
+static void
+uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
+ struct uvc_buffer *buf)
+{
+ void *mem = req->buf;
+ int len = video->req_size;
+ int ret;
+
+ /* Add the header. */
+ ret = uvc_video_encode_header(video, buf, mem, len);
+ mem += ret;
+ len -= ret;
+
+ /* Process video data. */
+ ret = uvc_video_encode_data(video, buf, mem, len);
+ len -= ret;
+
+ req->length = video->req_size - len;
+
+ if (buf->buf.bytesused == video->queue.buf_used) {
+ video->queue.buf_used = 0;
+ buf->state = UVC_BUF_STATE_DONE;
+ uvc_queue_next_buffer(&video->queue, buf);
+ video->fid ^= UVC_STREAM_FID;
+ }
+}
+
+/* --------------------------------------------------------------------------
+ * Request handling
+ */
+
+/*
+ * I somehow feel that synchronisation won't be easy to achieve here. We have
+ * three events that control USB requests submission:
+ *
+ * - USB request completion: the completion handler will resubmit the request
+ * if a video buffer is available.
+ *
+ * - USB interface setting selection: in response to a SET_INTERFACE request,
+ * the handler will start streaming if a video buffer is available and if
+ * video is not currently streaming.
+ *
+ * - V4L2 buffer queueing: the driver will start streaming if video is not
+ * currently streaming.
+ *
+ * Race conditions between those 3 events might lead to deadlocks or other
+ * nasty side effects.
+ *
+ * The "video currently streaming" condition can't be detected by the irqqueue
+ * being empty, as a request can still be in flight. A separate "queue paused"
+ * flag is thus needed.
+ *
+ * The paused flag will be set when we try to retrieve the irqqueue head if the
+ * queue is empty, and cleared when we queue a buffer.
+ *
+ * The USB request completion handler will get the buffer at the irqqueue head
+ * under protection of the queue spinlock. If the queue is empty, the streaming
+ * paused flag will be set. Right after releasing the spinlock a userspace
+ * application can queue a buffer. The flag will then cleared, and the ioctl
+ * handler will restart the video stream.
+ */
+static void
+uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct uvc_video *video = req->context;
+ struct uvc_buffer *buf;
+ unsigned long flags;
+ int ret;
+
+ switch (req->status) {
+ case 0:
+ break;
+
+ case -ESHUTDOWN:
+ printk(KERN_INFO "VS request cancelled.\n");
+ goto requeue;
+
+ default:
+ printk(KERN_INFO "VS request completed with status %d.\n",
+ req->status);
+ goto requeue;
+ }
+
+ spin_lock_irqsave(&video->queue.irqlock, flags);
+ buf = uvc_queue_head(&video->queue);
+ if (buf == NULL) {
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ goto requeue;
+ }
+
+ video->encode(req, video, buf);
+
+ if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) {
+ printk(KERN_INFO "Failed to queue request (%d).\n", ret);
+ usb_ep_set_halt(ep);
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ goto requeue;
+ }
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+
+ return;
+
+requeue:
+ spin_lock_irqsave(&video->req_lock, flags);
+ list_add_tail(&req->list, &video->req_free);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+}
+
+static int
+uvc_video_free_requests(struct uvc_video *video)
+{
+ unsigned int i;
+
+ for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
+ if (video->req[i]) {
+ usb_ep_free_request(video->ep, video->req[i]);
+ video->req[i] = NULL;
+ }
+
+ if (video->req_buffer[i]) {
+ kfree(video->req_buffer[i]);
+ video->req_buffer[i] = NULL;
+ }
+ }
+
+ INIT_LIST_HEAD(&video->req_free);
+ video->req_size = 0;
+ return 0;
+}
+
+static int
+uvc_video_alloc_requests(struct uvc_video *video)
+{
+ unsigned int i;
+ int ret = -ENOMEM;
+
+ BUG_ON(video->req_size);
+
+ for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
+ video->req_buffer[i] = kmalloc(video->ep->maxpacket, GFP_KERNEL);
+ if (video->req_buffer[i] == NULL)
+ goto error;
+
+ video->req[i] = usb_ep_alloc_request(video->ep, GFP_KERNEL);
+ if (video->req[i] == NULL)
+ goto error;
+
+ video->req[i]->buf = video->req_buffer[i];
+ video->req[i]->length = 0;
+ video->req[i]->dma = DMA_ADDR_INVALID;
+ video->req[i]->complete = uvc_video_complete;
+ video->req[i]->context = video;
+
+ list_add_tail(&video->req[i]->list, &video->req_free);
+ }
+
+ video->req_size = video->ep->maxpacket;
+ return 0;
+
+error:
+ uvc_video_free_requests(video);
+ return ret;
+}
+
+/* --------------------------------------------------------------------------
+ * Video streaming
+ */
+
+/*
+ * uvc_video_pump - Pump video data into the USB requests
+ *
+ * This function fills the available USB requests (listed in req_free) with
+ * video data from the queued buffers.
+ */
+int
+uvc_video_pump(struct uvc_video *video)
+{
+ struct usb_request *req;
+ struct uvc_buffer *buf;
+ unsigned long flags;
+ int ret;
+
+ /* FIXME TODO Race between uvc_video_pump and requests completion
+ * handler ???
+ */
+
+ while (1) {
+ /* Retrieve the first available USB request, protected by the
+ * request lock.
+ */
+ spin_lock_irqsave(&video->req_lock, flags);
+ if (list_empty(&video->req_free)) {
+ spin_unlock_irqrestore(&video->req_lock, flags);
+ return 0;
+ }
+ req = list_first_entry(&video->req_free, struct usb_request,
+ list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+
+ /* Retrieve the first available video buffer and fill the
+ * request, protected by the video queue irqlock.
+ */
+ spin_lock_irqsave(&video->queue.irqlock, flags);
+ buf = uvc_queue_head(&video->queue);
+ if (buf == NULL) {
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ break;
+ }
+
+ video->encode(req, video, buf);
+
+ /* Queue the USB request */
+ if ((ret = usb_ep_queue(video->ep, req, GFP_KERNEL)) < 0) {
+ printk(KERN_INFO "Failed to queue request (%d)\n", ret);
+ usb_ep_set_halt(video->ep);
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ }
+
+ spin_lock_irqsave(&video->req_lock, flags);
+ list_add_tail(&req->list, &video->req_free);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+ return 0;
+}
+
+/*
+ * Enable or disable the video stream.
+ */
+int
+uvc_video_enable(struct uvc_video *video, int enable)
+{
+ unsigned int i;
+ int ret;
+
+ if (video->ep == NULL) {
+ printk(KERN_INFO "Video enable failed, device is "
+ "uninitialized.\n");
+ return -ENODEV;
+ }
+
+ if (!enable) {
+ for (i = 0; i < UVC_NUM_REQUESTS; ++i)
+ usb_ep_dequeue(video->ep, video->req[i]);
+
+ uvc_video_free_requests(video);
+ uvc_queue_enable(&video->queue, 0);
+ return 0;
+ }
+
+ if ((ret = uvc_queue_enable(&video->queue, 1)) < 0)
+ return ret;
+
+ if ((ret = uvc_video_alloc_requests(video)) < 0)
+ return ret;
+
+ if (video->max_payload_size) {
+ video->encode = uvc_video_encode_bulk;
+ video->payload_size = 0;
+ } else
+ video->encode = uvc_video_encode_isoc;
+
+ return uvc_video_pump(video);
+}
+
+/*
+ * Initialize the UVC video stream.
+ */
+int
+uvc_video_init(struct uvc_video *video)
+{
+ INIT_LIST_HEAD(&video->req_free);
+ spin_lock_init(&video->req_lock);
+
+ video->fcc = V4L2_PIX_FMT_YUYV;
+ video->bpp = 16;
+ video->width = 320;
+ video->height = 240;
+ video->imagesize = 320 * 240 * 2;
+
+ /* Initialize the video buffers queue. */
+ uvc_queue_init(&video->queue, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ return 0;
+}
+
diff --git a/drivers/usb/gadget/webcam.c b/drivers/usb/gadget/webcam.c
new file mode 100644
index 0000000..417fd68
--- /dev/null
+++ b/drivers/usb/gadget/webcam.c
@@ -0,0 +1,399 @@
+/*
+ * webcam.c -- USB webcam gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/video.h>
+
+#include "f_uvc.h"
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_uvc.c"
+#include "uvc_queue.c"
+#include "uvc_v4l2.c"
+#include "uvc_video.c"
+
+/* --------------------------------------------------------------------------
+ * Device descriptor
+ */
+
+#define WEBCAM_VENDOR_ID 0x1d6b /* Linux Foundation */
+#define WEBCAM_PRODUCT_ID 0x0102 /* Webcam A/V gadget */
+#define WEBCAM_DEVICE_BCD 0x0010 /* 0.10 */
+
+static char webcam_vendor_label[] = "Linux Foundation";
+static char webcam_product_label[] = "Webcam gadget";
+static char webcam_config_label[] = "Video";
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_DESCRIPTION_IDX 2
+
+static struct usb_string webcam_strings[] = {
+ [STRING_MANUFACTURER_IDX].s = webcam_vendor_label,
+ [STRING_PRODUCT_IDX].s = webcam_product_label,
+ [STRING_DESCRIPTION_IDX].s = webcam_config_label,
+ { }
+};
+
+static struct usb_gadget_strings webcam_stringtab = {
+ .language = 0x0409, /* en-us */
+ .strings = webcam_strings,
+};
+
+static struct usb_gadget_strings *webcam_device_strings[] = {
+ &webcam_stringtab,
+ NULL,
+};
+
+static struct usb_device_descriptor webcam_device_descriptor = {
+ .bLength = USB_DT_DEVICE_SIZE,
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_MISC,
+ .bDeviceSubClass = 0x02,
+ .bDeviceProtocol = 0x01,
+ .bMaxPacketSize0 = 0, /* dynamic */
+ .idVendor = cpu_to_le16(WEBCAM_VENDOR_ID),
+ .idProduct = cpu_to_le16(WEBCAM_PRODUCT_ID),
+ .bcdDevice = cpu_to_le16(WEBCAM_DEVICE_BCD),
+ .iManufacturer = 0, /* dynamic */
+ .iProduct = 0, /* dynamic */
+ .iSerialNumber = 0, /* dynamic */
+ .bNumConfigurations = 0, /* dynamic */
+};
+
+DECLARE_UVC_HEADER_DESCRIPTOR(1);
+
+static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = {
+ .bLength = UVC_DT_HEADER_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_HEADER,
+ .bcdUVC = cpu_to_le16(0x0100),
+ .wTotalLength = 0, /* dynamic */
+ .dwClockFrequency = cpu_to_le32(48000000),
+ .bInCollection = 0, /* dynamic */
+ .baInterfaceNr[0] = 0, /* dynamic */
+};
+
+static const struct uvc_camera_terminal_descriptor uvc_camera_terminal = {
+ .bLength = UVC_DT_CAMERA_TERMINAL_SIZE(3),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_INPUT_TERMINAL,
+ .bTerminalID = 1,
+ .wTerminalType = cpu_to_le16(0x0201),
+ .bAssocTerminal = 0,
+ .iTerminal = 0,
+ .wObjectiveFocalLengthMin = cpu_to_le16(0),
+ .wObjectiveFocalLengthMax = cpu_to_le16(0),
+ .wOcularFocalLength = cpu_to_le16(0),
+ .bControlSize = 3,
+ .bmControls[0] = 2,
+ .bmControls[1] = 0,
+ .bmControls[2] = 0,
+};
+
+static const struct uvc_processing_unit_descriptor uvc_processing = {
+ .bLength = UVC_DT_PROCESSING_UNIT_SIZE(2),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_PROCESSING_UNIT,
+ .bUnitID = 2,
+ .bSourceID = 1,
+ .wMaxMultiplier = cpu_to_le16(16*1024),
+ .bControlSize = 2,
+ .bmControls[0] = 1,
+ .bmControls[1] = 0,
+ .iProcessing = 0,
+};
+
+static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
+ .bLength = UVC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_OUTPUT_TERMINAL,
+ .bTerminalID = 3,
+ .wTerminalType = cpu_to_le16(0x0101),
+ .bAssocTerminal = 0,
+ .bSourceID = 2,
+ .iTerminal = 0,
+};
+
+DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(1, 2);
+
+static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = {
+ .bLength = UVC_DT_INPUT_HEADER_SIZE(1, 2),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_INPUT_HEADER,
+ .bNumFormats = 2,
+ .wTotalLength = 0, /* dynamic */
+ .bEndpointAddress = 0, /* dynamic */
+ .bmInfo = 0,
+ .bTerminalLink = 3,
+ .bStillCaptureMethod = 0,
+ .bTriggerSupport = 0,
+ .bTriggerUsage = 0,
+ .bControlSize = 1,
+ .bmaControls[0][0] = 0,
+ .bmaControls[1][0] = 4,
+};
+
+static const struct uvc_format_uncompressed uvc_format_yuv = {
+ .bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FORMAT_UNCOMPRESSED,
+ .bFormatIndex = 1,
+ .bNumFrameDescriptors = 2,
+ .guidFormat =
+ { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71},
+ .bBitsPerPixel = 16,
+ .bDefaultFrameIndex = 1,
+ .bAspectRatioX = 0,
+ .bAspectRatioY = 0,
+ .bmInterfaceFlags = 0,
+ .bCopyProtect = 0,
+};
+
+DECLARE_UVC_FRAME_UNCOMPRESSED(1);
+DECLARE_UVC_FRAME_UNCOMPRESSED(3);
+
+static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = {
+ .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FRAME_UNCOMPRESSED,
+ .bFrameIndex = 1,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(640),
+ .wHeight = cpu_to_le16(360),
+ .dwMinBitRate = cpu_to_le32(18432000),
+ .dwMaxBitRate = cpu_to_le32(55296000),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
+ .dwDefaultFrameInterval = cpu_to_le32(666666),
+ .bFrameIntervalType = 3,
+ .dwFrameInterval[0] = cpu_to_le32(666666),
+ .dwFrameInterval[1] = cpu_to_le32(1000000),
+ .dwFrameInterval[2] = cpu_to_le32(5000000),
+};
+
+static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
+ .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FRAME_UNCOMPRESSED,
+ .bFrameIndex = 2,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(1280),
+ .wHeight = cpu_to_le16(720),
+ .dwMinBitRate = cpu_to_le32(29491200),
+ .dwMaxBitRate = cpu_to_le32(29491200),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
+ .dwDefaultFrameInterval = cpu_to_le32(5000000),
+ .bFrameIntervalType = 1,
+ .dwFrameInterval[0] = cpu_to_le32(5000000),
+};
+
+static const struct uvc_format_mjpeg uvc_format_mjpg = {
+ .bLength = UVC_DT_FORMAT_MJPEG_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FORMAT_MJPEG,
+ .bFormatIndex = 2,
+ .bNumFrameDescriptors = 2,
+ .bmFlags = 0,
+ .bDefaultFrameIndex = 1,
+ .bAspectRatioX = 0,
+ .bAspectRatioY = 0,
+ .bmInterfaceFlags = 0,
+ .bCopyProtect = 0,
+};
+
+DECLARE_UVC_FRAME_MJPEG(1);
+DECLARE_UVC_FRAME_MJPEG(3);
+
+static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = {
+ .bLength = UVC_DT_FRAME_MJPEG_SIZE(3),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FRAME_MJPEG,
+ .bFrameIndex = 1,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(640),
+ .wHeight = cpu_to_le16(360),
+ .dwMinBitRate = cpu_to_le32(18432000),
+ .dwMaxBitRate = cpu_to_le32(55296000),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
+ .dwDefaultFrameInterval = cpu_to_le32(666666),
+ .bFrameIntervalType = 3,
+ .dwFrameInterval[0] = cpu_to_le32(666666),
+ .dwFrameInterval[1] = cpu_to_le32(1000000),
+ .dwFrameInterval[2] = cpu_to_le32(5000000),
+};
+
+static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
+ .bLength = UVC_DT_FRAME_MJPEG_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FRAME_MJPEG,
+ .bFrameIndex = 2,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(1280),
+ .wHeight = cpu_to_le16(720),
+ .dwMinBitRate = cpu_to_le32(29491200),
+ .dwMaxBitRate = cpu_to_le32(29491200),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
+ .dwDefaultFrameInterval = cpu_to_le32(5000000),
+ .bFrameIntervalType = 1,
+ .dwFrameInterval[0] = cpu_to_le32(5000000),
+};
+
+static const struct uvc_color_matching_descriptor uvc_color_matching = {
+ .bLength = UVC_DT_COLOR_MATCHING_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_COLOR_MATCHING,
+ .bColorPrimaries = 1,
+ .bTransferCharacteristics = 1,
+ .bMatrixCoefficients = 4,
+};
+
+static const struct uvc_descriptor_header * const uvc_control_cls[] = {
+ (const struct uvc_descriptor_header *) &uvc_control_header,
+ (const struct uvc_descriptor_header *) &uvc_camera_terminal,
+ (const struct uvc_descriptor_header *) &uvc_processing,
+ (const struct uvc_descriptor_header *) &uvc_output_terminal,
+ NULL,
+};
+
+static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
+ (const struct uvc_descriptor_header *) &uvc_input_header,
+ (const struct uvc_descriptor_header *) &uvc_format_yuv,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ (const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+ (const struct uvc_descriptor_header *) &uvc_color_matching,
+ NULL,
+};
+
+static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
+ (const struct uvc_descriptor_header *) &uvc_input_header,
+ (const struct uvc_descriptor_header *) &uvc_format_yuv,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ (const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+ (const struct uvc_descriptor_header *) &uvc_color_matching,
+ NULL,
+};
+
+/* --------------------------------------------------------------------------
+ * USB configuration
+ */
+
+static int __init
+webcam_config_bind(struct usb_configuration *c)
+{
+ return uvc_bind_config(c, uvc_control_cls, uvc_fs_streaming_cls,
+ uvc_hs_streaming_cls);
+}
+
+static struct usb_configuration webcam_config_driver = {
+ .label = webcam_config_label,
+ .bind = webcam_config_bind,
+ .bConfigurationValue = 1,
+ .iConfiguration = 0, /* dynamic */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+ .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
+};
+
+static int /* __init_or_exit */
+webcam_unbind(struct usb_composite_dev *cdev)
+{
+ return 0;
+}
+
+static int __init
+webcam_bind(struct usb_composite_dev *cdev)
+{
+ int ret;
+
+ /* Allocate string descriptor numbers ... note that string contents
+ * can be overridden by the composite_dev glue.
+ */
+ if ((ret = usb_string_id(cdev)) < 0)
+ goto error;
+ webcam_strings[STRING_MANUFACTURER_IDX].id = ret;
+ webcam_device_descriptor.iManufacturer = ret;
+
+ if ((ret = usb_string_id(cdev)) < 0)
+ goto error;
+ webcam_strings[STRING_PRODUCT_IDX].id = ret;
+ webcam_device_descriptor.iProduct = ret;
+
+ if ((ret = usb_string_id(cdev)) < 0)
+ goto error;
+ webcam_strings[STRING_DESCRIPTION_IDX].id = ret;
+ webcam_config_driver.iConfiguration = ret;
+
+ /* Register our configuration. */
+ if ((ret = usb_add_config(cdev, &webcam_config_driver)) < 0)
+ goto error;
+
+ INFO(cdev, "Webcam Video Gadget\n");
+ return 0;
+
+error:
+ webcam_unbind(cdev);
+ return ret;
+}
+
+/* --------------------------------------------------------------------------
+ * Driver
+ */
+
+static struct usb_composite_driver webcam_driver = {
+ .name = "g_webcam",
+ .dev = &webcam_device_descriptor,
+ .strings = webcam_device_strings,
+ .bind = webcam_bind,
+ .unbind = webcam_unbind,
+};
+
+static int __init
+webcam_init(void)
+{
+ return usb_composite_register(&webcam_driver);
+}
+
+static void __exit
+webcam_cleanup(void)
+{
+ usb_composite_unregister(&webcam_driver);
+}
+
+module_init(webcam_init);
+module_exit(webcam_cleanup);
+
+MODULE_AUTHOR("Laurent Pinchart");
+MODULE_DESCRIPTION("Webcam Video Gadget");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.0");
+
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 8d3df03..f865be2 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -207,6 +207,21 @@ config USB_OHCI_HCD
To compile this driver as a module, choose M here: the
module will be called ohci-hcd.
+config USB_OHCI_HCD_OMAP1
+ bool "OHCI support for OMAP1/2 chips"
+ depends on USB_OHCI_HCD && (ARCH_OMAP1 || ARCH_OMAP2)
+ default y
+ ---help---
+ Enables support for the OHCI controller on OMAP1/2 chips.
+
+config USB_OHCI_HCD_OMAP3
+ bool "OHCI support for OMAP3 and later chips"
+ depends on USB_OHCI_HCD && (ARCH_OMAP3 || ARCH_OMAP4)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ OMAP3 and later chips.
+
config USB_OHCI_HCD_PPC_SOC
bool "OHCI support for on-chip PPC USB controller"
depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx)
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index e3a74e7..faa6174 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -69,6 +69,15 @@ static void au1xxx_stop_ehc(void)
au_sync();
}
+static int au1xxx_ehci_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int ret = ehci_init(hcd);
+
+ ehci->need_io_watchdog = 0;
+ return ret;
+}
+
static const struct hc_driver ehci_au1xxx_hc_driver = {
.description = hcd_name,
.product_desc = "Au1xxx EHCI",
@@ -86,7 +95,7 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
* FIXME -- ehci_init() doesn't do enough here.
* See ehci-ppc-soc for a complete implementation.
*/
- .reset = ehci_init,
+ .reset = au1xxx_ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
@@ -215,26 +224,17 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
msleep(10);
/* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
+ * mark HW unaccessible. The PM and USB cores make sure that
+ * the root hub is either suspended or stopped.
*/
spin_lock_irqsave(&ehci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
+ ehci_prepare_ports_for_controller_suspend(ehci);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
au1xxx_stop_ehc();
-
-bail:
spin_unlock_irqrestore(&ehci->lock, flags);
// could save FLADJ in case of Vaux power loss
@@ -264,6 +264,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
int mask = INTR_MASK;
+ ehci_prepare_ports_for_controller_resume(ehci);
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 0e26aa1..5cd967d 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -313,6 +313,7 @@ static int ehci_fsl_drv_suspend(struct device *dev)
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
void __iomem *non_ehci = hcd->regs;
+ ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd));
if (!fsl_deep_sleep())
return 0;
@@ -327,6 +328,7 @@ static int ehci_fsl_drv_resume(struct device *dev)
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
+ ehci_prepare_ports_for_controller_resume(ehci);
if (!fsl_deep_sleep())
return 0;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 13ead00..ef3e88f 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -31,13 +31,12 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
-#include "../core/hcd.h"
-
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index c7178bc..e7d3d8d 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -106,12 +106,75 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
ehci->owned_ports = 0;
}
+static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
+ bool suspending)
+{
+ int port;
+ u32 temp;
+
+ /* If remote wakeup is enabled for the root hub but disabled
+ * for the controller, we must adjust all the port wakeup flags
+ * when the controller is suspended or resumed. In all other
+ * cases they don't need to be changed.
+ */
+ if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup ||
+ device_may_wakeup(ehci_to_hcd(ehci)->self.controller))
+ return;
+
+ /* clear phy low-power mode before changing wakeup flags */
+ if (ehci->has_hostpc) {
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * port);
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
+ }
+ msleep(5);
+ }
+
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *reg = &ehci->regs->port_status[port];
+ u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+ u32 t2 = t1 & ~PORT_WAKE_BITS;
+
+ /* If we are suspending the controller, clear the flags.
+ * If we are resuming the controller, set the wakeup flags.
+ */
+ if (!suspending) {
+ if (t1 & PORT_CONNECT)
+ t2 |= PORT_WKOC_E | PORT_WKDISC_E;
+ else
+ t2 |= PORT_WKOC_E | PORT_WKCONN_E;
+ }
+ ehci_vdbg(ehci, "port %d, %08x -> %08x\n",
+ port + 1, t1, t2);
+ ehci_writel(ehci, t2, reg);
+ }
+
+ /* enter phy low-power mode again */
+ if (ehci->has_hostpc) {
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * port);
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
+ }
+ }
+}
+
static int ehci_bus_suspend (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int port;
int mask;
- u32 __iomem *hostpc_reg = NULL;
+ int changed;
ehci_dbg(ehci, "suspend root hub\n");
@@ -155,15 +218,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
*/
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
+ changed = 0;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
- u32 t2 = t1;
+ u32 t2 = t1 & ~PORT_WAKE_BITS;
- if (ehci->has_hostpc)
- hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
- + HOSTPC0 + 4 * (port & 0xff));
/* keep track of which ports we suspend */
if (t1 & PORT_OWNER)
set_bit(port, &ehci->owned_ports);
@@ -172,40 +233,45 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
set_bit(port, &ehci->bus_suspended);
}
- /* enable remote wakeup on all ports */
+ /* enable remote wakeup on all ports, if told to do so */
if (hcd->self.root_hub->do_remote_wakeup) {
/* only enable appropriate wake bits, otherwise the
* hardware can not go phy low power mode. If a race
* condition happens here(connection change during bits
* set), the port change detection will finally fix it.
*/
- if (t1 & PORT_CONNECT) {
+ if (t1 & PORT_CONNECT)
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
- t2 &= ~PORT_WKCONN_E;
- } else {
+ else
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
- t2 &= ~PORT_WKDISC_E;
- }
- } else
- t2 &= ~PORT_WAKE_BITS;
+ }
if (t1 != t2) {
ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
port + 1, t1, t2);
ehci_writel(ehci, t2, reg);
- if (hostpc_reg) {
- u32 t3;
+ changed = 1;
+ }
+ }
- spin_unlock_irq(&ehci->lock);
- msleep(5);/* 5ms for HCD enter low pwr mode */
- spin_lock_irq(&ehci->lock);
- t3 = ehci_readl(ehci, hostpc_reg);
- ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
- t3 = ehci_readl(ehci, hostpc_reg);
- ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
+ if (changed && ehci->has_hostpc) {
+ spin_unlock_irq(&ehci->lock);
+ msleep(5); /* 5 ms for HCD to enter low-power mode */
+ spin_lock_irq(&ehci->lock);
+
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg;
+ u32 t3;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * port);
+ t3 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
+ t3 = ehci_readl(ehci, hostpc_reg);
+ ehci_dbg(ehci, "Port %d phy low-power mode %s\n",
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
- }
}
}
@@ -291,6 +357,25 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
msleep(8);
spin_lock_irq(&ehci->lock);
+ /* clear phy low-power mode before resume */
+ if (ehci->bus_suspended && ehci->has_hostpc) {
+ i = HCS_N_PORTS(ehci->hcs_params);
+ while (i--) {
+ if (test_bit(i, &ehci->bus_suspended)) {
+ u32 __iomem *hostpc_reg;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * i);
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp & ~HOSTPC_PHCD,
+ hostpc_reg);
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+ msleep(5);
+ spin_lock_irq(&ehci->lock);
+ }
+
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
@@ -659,7 +744,7 @@ static int ehci_hub_control (
* Even if OWNER is set, so the port is owned by the
* companion controller, khubd needs to be able to clear
* the port-change status bits (especially
- * USB_PORT_FEAT_C_CONNECTION).
+ * USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
@@ -675,16 +760,25 @@ static int ehci_hub_control (
goto error;
if (ehci->no_selective_suspend)
break;
- if (temp & PORT_SUSPEND) {
- if ((temp & PORT_PE) == 0)
- goto error;
- /* resume signaling for 20 msec */
- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
- ehci_writel(ehci, temp | PORT_RESUME,
- status_reg);
- ehci->reset_done [wIndex] = jiffies
- + msecs_to_jiffies (20);
+ if (!(temp & PORT_SUSPEND))
+ break;
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+ /* clear phy low-power mode before resume */
+ if (hostpc_reg) {
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
+ hostpc_reg);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ msleep(5);/* wait to leave low-power mode */
+ spin_lock_irqsave(&ehci->lock, flags);
}
+ /* resume signaling for 20 msec */
+ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ ehci->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &ehci->port_c_suspend);
@@ -729,12 +823,12 @@ static int ehci_hub_control (
// wPortChange bits
if (temp & PORT_CSC)
- status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
- status |= 1 << USB_PORT_FEAT_C_ENABLE;
+ status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC) && !ignore_oc){
- status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/*
* Hubs should disable port power on over-current.
@@ -791,7 +885,7 @@ static int ehci_hub_control (
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
ehci->reset_done[wIndex])) {
- status |= 1 << USB_PORT_FEAT_C_RESET;
+ status |= USB_PORT_STAT_C_RESET << 16;
ehci->reset_done [wIndex] = 0;
/* force reset to complete */
@@ -833,7 +927,7 @@ static int ehci_hub_control (
*/
if (temp & PORT_CONNECT) {
- status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= USB_PORT_STAT_CONNECTION;
// status may be from integrated TT
if (ehci->has_hostpc) {
temp1 = ehci_readl(ehci, hostpc_reg);
@@ -842,11 +936,11 @@ static int ehci_hub_control (
status |= ehci_port_speed(ehci, temp);
}
if (temp & PORT_PE)
- status |= 1 << USB_PORT_FEAT_ENABLE;
+ status |= USB_PORT_STAT_ENABLE;
/* maybe the port was unsuspended without our knowledge */
if (temp & (PORT_SUSPEND|PORT_RESUME)) {
- status |= 1 << USB_PORT_FEAT_SUSPEND;
+ status |= USB_PORT_STAT_SUSPEND;
} else if (test_bit(wIndex, &ehci->suspended_ports)) {
clear_bit(wIndex, &ehci->suspended_ports);
ehci->reset_done[wIndex] = 0;
@@ -855,13 +949,13 @@ static int ehci_hub_control (
}
if (temp & PORT_OC)
- status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
- status |= 1 << USB_PORT_FEAT_RESET;
+ status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
- status |= 1 << USB_PORT_FEAT_POWER;
+ status |= USB_PORT_STAT_POWER;
if (test_bit(wIndex, &ehci->port_c_suspend))
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
#ifndef VERBOSE_DEBUG
if (status & ~0xffff) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 0cd6c77..5450e62 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -116,6 +116,8 @@
#define OMAP_UHH_DEBUG_CSR (0x44)
/* EHCI Register Set */
+#define EHCI_INSNREG04 (0xA0)
+#define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5)
#define EHCI_INSNREG05_ULPI (0xA4)
#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
@@ -352,8 +354,8 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
/* Bypass the TLL module for PHY mode operation */
- if (omap_rev() <= OMAP3430_REV_ES2_1) {
- dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1 \n");
+ if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
+ dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) ||
(omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) ||
(omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY))
@@ -382,6 +384,18 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
+ /*
+ * An undocumented "feature" in the OMAP3 EHCI controller,
+ * causes suspended ports to be taken out of suspend when
+ * the USBCMD.Run/Stop bit is cleared (for example when
+ * we do ehci_bus_suspend).
+ * This breaks suspend-resume if the root-hub is allowed
+ * to suspend. Writing 1 to this undocumented register bit
+ * disables this feature and restores normal behavior.
+ */
+ ehci_omap_writel(omap->ehci_base, EHCI_INSNREG04,
+ EHCI_INSNREG04_DISABLE_UNSUSPEND);
+
if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) ||
(omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) ||
(omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) {
@@ -659,6 +673,9 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
goto err_add_hcd;
}
+ /* root ports should always stay powered */
+ ehci_port_power(omap->ehci, 1);
+
return 0;
err_add_hcd:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index ead5f4f..d43d176 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -109,6 +109,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
return retval;
switch (pdev->vendor) {
+ case PCI_VENDOR_ID_NEC:
+ ehci->need_io_watchdog = 0;
+ break;
case PCI_VENDOR_ID_INTEL:
ehci->need_io_watchdog = 0;
if (pdev->device == 0x27cc) {
@@ -284,23 +287,15 @@ static int ehci_pci_suspend(struct usb_hcd *hcd)
msleep(10);
/* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
+ * mark HW unaccessible. The PM and USB cores make sure that
+ * the root hub is either suspended or stopped.
*/
spin_lock_irqsave (&ehci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
+ ehci_prepare_ports_for_controller_suspend(ehci);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- bail:
spin_unlock_irqrestore (&ehci->lock, flags);
// could save FLADJ in case of Vaux power loss
@@ -330,6 +325,7 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
!hibernated) {
int mask = INTR_MASK;
+ ehci_prepare_ports_for_controller_resume(ehci);
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 8952177..11a79c4 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -663,7 +663,7 @@ qh_urb_transaction (
*/
i = urb->num_sgs;
if (len > 0 && i > 0) {
- sg = urb->sg->sg;
+ sg = urb->sg;
buf = sg_dma_address(sg);
/* urb->transfer_buffer_length may be smaller than the
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 556c0b4..650a687 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -536,6 +536,16 @@ struct ehci_fstn {
/*-------------------------------------------------------------------------*/
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define ehci_prepare_ports_for_controller_suspend(ehci) \
+ ehci_adjust_port_wakeup_flags(ehci, true);
+
+#define ehci_prepare_ports_for_controller_resume(ehci) \
+ ehci_adjust_port_wakeup_flags(ehci, false);
+
+/*-------------------------------------------------------------------------*/
+
#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT
/*
@@ -556,20 +566,20 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
case 0:
return 0;
case 1:
- return (1<<USB_PORT_FEAT_LOWSPEED);
+ return USB_PORT_STAT_LOW_SPEED;
case 2:
default:
- return (1<<USB_PORT_FEAT_HIGHSPEED);
+ return USB_PORT_STAT_HIGH_SPEED;
}
}
- return (1<<USB_PORT_FEAT_HIGHSPEED);
+ return USB_PORT_STAT_HIGH_SPEED;
}
#else
#define ehci_is_TDI(e) (0)
-#define ehci_port_speed(ehci, portsc) (1<<USB_PORT_FEAT_HIGHSPEED)
+#define ehci_port_speed(ehci, portsc) USB_PORT_STAT_HIGH_SPEED
#endif
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index e799f86..6fe5500 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -20,7 +20,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er)
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 15379c6..9045337 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -25,12 +25,12 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <asm/qe.h>
#include <asm/fsl_gtm.h>
-#include "../core/hcd.h"
#include "fhci.h"
void fhci_start_sof_timer(struct fhci_hcd *fhci)
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
index 0cfaedc..348fe62 100644
--- a/drivers/usb/host/fhci-hub.c
+++ b/drivers/usb/host/fhci-hub.c
@@ -22,9 +22,9 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/gpio.h>
#include <asm/qe.h>
-#include "../core/hcd.h"
#include "fhci.h"
/* virtual root hub specific descriptor */
diff --git a/drivers/usb/host/fhci-mem.c b/drivers/usb/host/fhci-mem.c
index 5591bfb..b0b88f5 100644
--- a/drivers/usb/host/fhci-mem.c
+++ b/drivers/usb/host/fhci-mem.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
static void init_td(struct td *td)
diff --git a/drivers/usb/host/fhci-q.c b/drivers/usb/host/fhci-q.c
index f73c923..03be749 100644
--- a/drivers/usb/host/fhci-q.c
+++ b/drivers/usb/host/fhci-q.c
@@ -22,7 +22,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
/* maps the hardware error code to the USB error code */
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index ff43747..4f2cbdc 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -24,9 +24,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <asm/qe.h>
#include <asm/fsl_gtm.h>
-#include "../core/hcd.h"
#include "fhci.h"
static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index 5701347..7be548c 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -22,7 +22,7 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
#define DUMMY_BD_BUFFER 0xdeadbeef
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 72dae1c..71c3caa 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -20,13 +20,14 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/bug.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/kfifo.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <asm/qe.h>
-#include "../core/hcd.h"
#define USB_CLOCK 48000000
@@ -515,9 +516,13 @@ static inline int cq_put(struct kfifo *kfifo, void *p)
static inline void *cq_get(struct kfifo *kfifo)
{
- void *p = NULL;
+ unsigned int sz;
+ void *p;
+
+ sz = kfifo_out(kfifo, (void *)&p, sizeof(p));
+ if (sz != sizeof(p))
+ return NULL;
- kfifo_out(kfifo, (void *)&p, sizeof(p));
return p;
}
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 8a12f29..ca0e98d 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -56,8 +56,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
-#include "../core/hcd.h"
#include "imx21-hcd.h"
#ifdef DEBUG
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 92de71d..d9e8212 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -65,6 +65,7 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/isp116x.h>
+#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <asm/io.h>
@@ -72,7 +73,6 @@
#include <asm/system.h>
#include <asm/byteorder.h>
-#include "../core/hcd.h"
#include "isp116x.h"
#define DRIVER_VERSION "03 Nov 2005"
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 217fb51..20a0dfe 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -77,6 +77,7 @@
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/isp1362.h>
+#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/io.h>
@@ -95,7 +96,6 @@ module_param(dbg_level, int, 0);
#define STUB_DEBUG_FILE
#endif
-#include "../core/hcd.h"
#include "../core/usb.h"
#include "isp1362.h"
@@ -1265,7 +1265,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
/* don't submit to a dead or disabled port */
if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
- (1 << USB_PORT_FEAT_ENABLE)) ||
+ USB_PORT_STAT_ENABLE) ||
!HC_IS_RUNNING(hcd->state)) {
kfree(ep);
retval = -ENODEV;
@@ -2217,7 +2217,7 @@ static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
{
if (isp1362_hcd->pde)
- remove_proc_entry(proc_filename, 0);
+ remove_proc_entry(proc_filename, NULL);
}
#endif
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 9f01293..dbcafa2 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -21,7 +22,6 @@
#include <asm/unaligned.h>
#include <asm/cacheflush.h>
-#include "../core/hcd.h"
#include "isp1760-hcd.h"
static struct kmem_cache *qtd_cachep;
@@ -111,7 +111,7 @@ struct isp1760_qh {
u32 ping;
};
-#define ehci_port_speed(priv, portsc) (1 << USB_PORT_FEAT_HIGHSPEED)
+#define ehci_port_speed(priv, portsc) USB_PORT_STAT_HIGH_SPEED
static unsigned int isp1760_readl(__u32 __iomem *regs)
{
@@ -713,12 +713,11 @@ static int check_error(struct ptd *ptd)
u32 dw3;
dw3 = le32_to_cpu(ptd->dw3);
- if (dw3 & DW3_HALT_BIT)
+ if (dw3 & DW3_HALT_BIT) {
error = -EPIPE;
- if (dw3 & DW3_ERROR_BIT) {
- printk(KERN_ERR "error bit is set in DW3\n");
- error = -EPIPE;
+ if (dw3 & DW3_ERROR_BIT)
+ pr_err("error bit is set in DW3\n");
}
if (dw3 & DW3_QTD_ACTIVE) {
@@ -1923,7 +1922,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
* Even if OWNER is set, so the port is owned by the
* companion controller, khubd needs to be able to clear
* the port-change status bits (especially
- * USB_PORT_FEAT_C_CONNECTION).
+ * USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
@@ -1987,7 +1986,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* wPortChange bits */
if (temp & PORT_CSC)
- status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
/* whoever resumes must GetPortStatus to complete it!! */
@@ -2007,7 +2006,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* resume completed? */
else if (time_after_eq(jiffies,
priv->reset_done)) {
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
priv->reset_done = 0;
/* stop resume signaling */
@@ -2031,7 +2030,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
priv->reset_done)) {
- status |= 1 << USB_PORT_FEAT_C_RESET;
+ status |= USB_PORT_STAT_C_RESET << 16;
priv->reset_done = 0;
/* force reset to complete */
@@ -2062,18 +2061,18 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
printk(KERN_ERR "Warning: PORT_OWNER is set\n");
if (temp & PORT_CONNECT) {
- status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
status |= ehci_port_speed(priv, temp);
}
if (temp & PORT_PE)
- status |= 1 << USB_PORT_FEAT_ENABLE;
+ status |= USB_PORT_STAT_ENABLE;
if (temp & (PORT_SUSPEND|PORT_RESUME))
- status |= 1 << USB_PORT_FEAT_SUSPEND;
+ status |= USB_PORT_STAT_SUSPEND;
if (temp & PORT_RESET)
- status |= 1 << USB_PORT_FEAT_RESET;
+ status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
- status |= 1 << USB_PORT_FEAT_POWER;
+ status |= USB_PORT_STAT_POWER;
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 4293cfd..8f0259e 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -13,8 +13,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/usb/isp1760.h>
+#include <linux/usb/hcd.h>
-#include "../core/hcd.h"
#include "isp1760-hcd.h"
#ifdef CONFIG_PPC_OF
@@ -36,7 +36,7 @@ static int of_isp1760_probe(struct of_device *dev,
struct resource memory;
struct of_irq oirq;
int virq;
- u64 res_len;
+ resource_size_t res_len;
int ret;
const unsigned int *prop;
unsigned int devflags = 0;
@@ -45,13 +45,12 @@ static int of_isp1760_probe(struct of_device *dev,
if (ret)
return -ENXIO;
- res = request_mem_region(memory.start, memory.end - memory.start + 1,
- dev_name(&dev->dev));
+ res_len = resource_size(&memory);
+
+ res = request_mem_region(memory.start, res_len, dev_name(&dev->dev));
if (!res)
return -EBUSY;
- res_len = memory.end - memory.start + 1;
-
if (of_irq_map_one(dp, 0, &oirq)) {
ret = -ENODEV;
goto release_reg;
@@ -92,7 +91,7 @@ static int of_isp1760_probe(struct of_device *dev,
return ret;
release_reg:
- release_mem_region(memory.start, memory.end - memory.start + 1);
+ release_mem_region(memory.start, res_len);
return ret;
}
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index afe59be..fc57655 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
+#include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/workqueue.h>
@@ -43,7 +44,6 @@
#include <asm/unaligned.h>
#include <asm/byteorder.h>
-#include "../core/hcd.h"
#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
@@ -1006,9 +1006,14 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
#endif
-#ifdef CONFIG_ARCH_OMAP
+#ifdef CONFIG_USB_OHCI_HCD_OMAP1
#include "ohci-omap.c"
-#define PLATFORM_DRIVER ohci_hcd_omap_driver
+#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver
+#endif
+
+#ifdef CONFIG_USB_OHCI_HCD_OMAP3
+#include "ohci-omap3.c"
+#define OMAP3_PLATFORM_DRIVER ohci_hcd_omap3_driver
#endif
#ifdef CONFIG_ARCH_LH7A404
@@ -1092,6 +1097,8 @@ MODULE_LICENSE ("GPL");
#if !defined(PCI_DRIVER) && \
!defined(PLATFORM_DRIVER) && \
+ !defined(OMAP1_PLATFORM_DRIVER) && \
+ !defined(OMAP3_PLATFORM_DRIVER) && \
!defined(OF_PLATFORM_DRIVER) && \
!defined(SA1111_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && \
@@ -1133,6 +1140,18 @@ static int __init ohci_hcd_mod_init(void)
goto error_platform;
#endif
+#ifdef OMAP1_PLATFORM_DRIVER
+ retval = platform_driver_register(&OMAP1_PLATFORM_DRIVER);
+ if (retval < 0)
+ goto error_omap1_platform;
+#endif
+
+#ifdef OMAP3_PLATFORM_DRIVER
+ retval = platform_driver_register(&OMAP3_PLATFORM_DRIVER);
+ if (retval < 0)
+ goto error_omap3_platform;
+#endif
+
#ifdef OF_PLATFORM_DRIVER
retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
if (retval < 0)
@@ -1200,6 +1219,14 @@ static int __init ohci_hcd_mod_init(void)
platform_driver_unregister(&PLATFORM_DRIVER);
error_platform:
#endif
+#ifdef OMAP1_PLATFORM_DRIVER
+ platform_driver_unregister(&OMAP1_PLATFORM_DRIVER);
+ error_omap1_platform:
+#endif
+#ifdef OMAP3_PLATFORM_DRIVER
+ platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
+ error_omap3_platform:
+#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
error_ps3:
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
new file mode 100644
index 0000000..2cc8a50
--- /dev/null
+++ b/drivers/usb/host/ohci-omap3.c
@@ -0,0 +1,735 @@
+/*
+ * ohci-omap3.c - driver for OHCI on OMAP3 and later processors
+ *
+ * Bus Glue for OMAP3 USBHOST 3 port OHCI controller
+ * This controller is also used in later OMAPs and AM35x chips
+ *
+ * Copyright (C) 2007-2010 Texas Instruments, Inc.
+ * Author: Vikram Pandita <vikram.pandita@ti.com>
+ * Author: Anand Gadiyar <gadiyar@ti.com>
+ *
+ * Based on ehci-omap.c and some other ohci glue layers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * TODO (last updated Mar 10th, 2010):
+ * - add kernel-doc
+ * - Factor out code common to EHCI to a separate file
+ * - Make EHCI and OHCI coexist together
+ * - needs newer silicon versions to actually work
+ * - the last one to be loaded currently steps on the other's toes
+ * - Add hooks for configuring transceivers, etc. at init/exit
+ * - Add aggressive clock-management code
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <plat/usb.h>
+
+/*
+ * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
+ * Use ohci_omap_readl()/ohci_omap_writel() functions
+ */
+
+/* TLL Register Set */
+#define OMAP_USBTLL_REVISION (0x00)
+#define OMAP_USBTLL_SYSCONFIG (0x10)
+#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
+#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
+#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
+#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
+#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define OMAP_USBTLL_SYSSTATUS (0x14)
+#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
+
+#define OMAP_USBTLL_IRQSTATUS (0x18)
+#define OMAP_USBTLL_IRQENABLE (0x1C)
+
+#define OMAP_TLL_SHARED_CONF (0x30)
+#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
+#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
+#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
+#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
+#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
+
+#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
+#define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24
+#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
+#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
+#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
+#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
+#define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1)
+#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
+
+#define OMAP_TLL_CHANNEL_COUNT 3
+
+/* UHH Register Set */
+#define OMAP_UHH_REVISION (0x00)
+#define OMAP_UHH_SYSCONFIG (0x10)
+#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
+#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
+#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
+#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
+#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
+#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define OMAP_UHH_SYSSTATUS (0x14)
+#define OMAP_UHH_SYSSTATUS_UHHRESETDONE (1 << 0)
+#define OMAP_UHH_SYSSTATUS_OHCIRESETDONE (1 << 1)
+#define OMAP_UHH_SYSSTATUS_EHCIRESETDONE (1 << 2)
+#define OMAP_UHH_HOSTCONFIG (0x40)
+#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
+#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
+#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
+#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
+#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
+#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
+#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
+#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
+
+#define OMAP_UHH_DEBUG_CSR (0x44)
+
+/*-------------------------------------------------------------------------*/
+
+static inline void ohci_omap_writel(void __iomem *base, u32 reg, u32 val)
+{
+ __raw_writel(val, base + reg);
+}
+
+static inline u32 ohci_omap_readl(void __iomem *base, u32 reg)
+{
+ return __raw_readl(base + reg);
+}
+
+static inline void ohci_omap_writeb(void __iomem *base, u8 reg, u8 val)
+{
+ __raw_writeb(val, base + reg);
+}
+
+static inline u8 ohci_omap_readb(void __iomem *base, u8 reg)
+{
+ return __raw_readb(base + reg);
+}
+
+/*-------------------------------------------------------------------------*/
+
+struct ohci_hcd_omap3 {
+ struct ohci_hcd *ohci;
+ struct device *dev;
+
+ struct clk *usbhost_ick;
+ struct clk *usbhost2_120m_fck;
+ struct clk *usbhost1_48m_fck;
+ struct clk *usbtll_fck;
+ struct clk *usbtll_ick;
+
+ /* port_mode: TLL/PHY, 2/3/4/6-PIN, DP-DM/DAT-SE0 */
+ enum ohci_omap3_port_mode port_mode[OMAP3_HS_USB_PORTS];
+ void __iomem *uhh_base;
+ void __iomem *tll_base;
+ void __iomem *ohci_base;
+
+ unsigned es2_compatibility:1;
+};
+
+/*-------------------------------------------------------------------------*/
+
+static void ohci_omap3_clock_power(struct ohci_hcd_omap3 *omap, int on)
+{
+ if (on) {
+ clk_enable(omap->usbtll_ick);
+ clk_enable(omap->usbtll_fck);
+ clk_enable(omap->usbhost_ick);
+ clk_enable(omap->usbhost1_48m_fck);
+ clk_enable(omap->usbhost2_120m_fck);
+ } else {
+ clk_disable(omap->usbhost2_120m_fck);
+ clk_disable(omap->usbhost1_48m_fck);
+ clk_disable(omap->usbhost_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbtll_ick);
+ }
+}
+
+static int ohci_omap3_init(struct usb_hcd *hcd)
+{
+ dev_dbg(hcd->self.controller, "starting OHCI controller\n");
+
+ return ohci_init(hcd_to_ohci(hcd));
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int ohci_omap3_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ /*
+ * RemoteWakeupConnected has to be set explicitly before
+ * calling ohci_run. The reset value of RWC is 0.
+ */
+ ohci->hc_control = OHCI_CTRL_RWC;
+ writel(OHCI_CTRL_RWC, &ohci->regs->control);
+
+ ret = ohci_run(ohci);
+
+ if (ret < 0) {
+ dev_err(hcd->self.controller, "can't start\n");
+ ohci_stop(hcd);
+ }
+
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * convert the port-mode enum to a value we can use in the FSLSMODE
+ * field of USBTLL_CHANNEL_CONF
+ */
+static unsigned ohci_omap3_fslsmode(enum ohci_omap3_port_mode mode)
+{
+ switch (mode) {
+ case OMAP_OHCI_PORT_MODE_UNUSED:
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
+ return 0x0;
+
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
+ return 0x1;
+
+ case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
+ return 0x2;
+
+ case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
+ return 0x3;
+
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
+ return 0x4;
+
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
+ return 0x5;
+
+ case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
+ return 0x6;
+
+ case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
+ return 0x7;
+
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
+ return 0xA;
+
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
+ return 0xB;
+ default:
+ pr_warning("Invalid port mode, using default\n");
+ return 0x0;
+ }
+}
+
+static void ohci_omap3_tll_config(struct ohci_hcd_omap3 *omap)
+{
+ u32 reg;
+ int i;
+
+ /* Program TLL SHARED CONF */
+ reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
+ reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
+ reg &= ~OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN;
+ reg |= OMAP_TLL_SHARED_CONF_USB_DIVRATION;
+ reg |= OMAP_TLL_SHARED_CONF_FCLK_IS_ON;
+ ohci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
+
+ /* Program each TLL channel */
+ /*
+ * REVISIT: Only the 3-pin and 4-pin PHY modes have
+ * actually been tested.
+ */
+ for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
+
+ /* Enable only those channels that are actually used */
+ if (omap->port_mode[i] == OMAP_OHCI_PORT_MODE_UNUSED)
+ continue;
+
+ reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
+ reg |= ohci_omap3_fslsmode(omap->port_mode[i])
+ << OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT;
+ reg |= OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS;
+ reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
+ ohci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
+ }
+}
+
+/* omap3_start_ohci
+ * - Start the TI USBHOST controller
+ */
+static int omap3_start_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ u32 reg = 0;
+ int ret = 0;
+
+ dev_dbg(omap->dev, "starting TI OHCI USB Controller\n");
+
+ /* Get all the clock handles we need */
+ omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
+ if (IS_ERR(omap->usbhost_ick)) {
+ dev_err(omap->dev, "could not get usbhost_ick\n");
+ ret = PTR_ERR(omap->usbhost_ick);
+ goto err_host_ick;
+ }
+
+ omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
+ if (IS_ERR(omap->usbhost2_120m_fck)) {
+ dev_err(omap->dev, "could not get usbhost_120m_fck\n");
+ ret = PTR_ERR(omap->usbhost2_120m_fck);
+ goto err_host_120m_fck;
+ }
+
+ omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
+ if (IS_ERR(omap->usbhost1_48m_fck)) {
+ dev_err(omap->dev, "could not get usbhost_48m_fck\n");
+ ret = PTR_ERR(omap->usbhost1_48m_fck);
+ goto err_host_48m_fck;
+ }
+
+ omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
+ if (IS_ERR(omap->usbtll_fck)) {
+ dev_err(omap->dev, "could not get usbtll_fck\n");
+ ret = PTR_ERR(omap->usbtll_fck);
+ goto err_tll_fck;
+ }
+
+ omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
+ if (IS_ERR(omap->usbtll_ick)) {
+ dev_err(omap->dev, "could not get usbtll_ick\n");
+ ret = PTR_ERR(omap->usbtll_ick);
+ goto err_tll_ick;
+ }
+
+ /* Now enable all the clocks in the correct order */
+ ohci_omap3_clock_power(omap, 1);
+
+ /* perform TLL soft reset, and wait until reset is complete */
+ ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+
+ /* Wait for TLL reset to complete */
+ while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(omap->dev, "operation timed out\n");
+ ret = -EINVAL;
+ goto err_sys_status;
+ }
+ }
+
+ dev_dbg(omap->dev, "TLL reset done\n");
+
+ /* (1<<3) = no idle mode only for initial debugging */
+ ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+ OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+ OMAP_USBTLL_SYSCONFIG_CACTIVITY);
+
+
+ /* Put UHH in NoIdle/NoStandby mode */
+ reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+ reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+ | OMAP_UHH_SYSCONFIG_SIDLEMODE
+ | OMAP_UHH_SYSCONFIG_CACTIVITY
+ | OMAP_UHH_SYSCONFIG_MIDLEMODE);
+ reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+ reg &= ~OMAP_UHH_SYSCONFIG_SOFTRESET;
+
+ ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+
+ reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
+
+ /* setup ULPI bypass and burst configurations */
+ reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
+ | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
+ | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
+ reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
+
+ /*
+ * REVISIT: Pi_CONNECT_STATUS controls MStandby
+ * assertion and Swakeup generation - let us not
+ * worry about this for now. OMAP HWMOD framework
+ * might take care of this later. If not, we can
+ * update these registers when adding aggressive
+ * clock management code.
+ *
+ * For now, turn off all the Pi_CONNECT_STATUS bits
+ *
+ if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
+ if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
+ if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
+ */
+ reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
+ reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
+ reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
+
+ if (omap->es2_compatibility) {
+ /*
+ * All OHCI modes need to go through the TLL,
+ * unlike in the EHCI case. So use UTMI mode
+ * for all ports for OHCI, on ES2.x silicon
+ */
+ dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ } else {
+ dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
+ if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+
+ if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+
+ if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+
+ }
+ ohci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
+ dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
+
+ ohci_omap3_tll_config(omap);
+
+ return 0;
+
+err_sys_status:
+ ohci_omap3_clock_power(omap, 0);
+ clk_put(omap->usbtll_ick);
+
+err_tll_ick:
+ clk_put(omap->usbtll_fck);
+
+err_tll_fck:
+ clk_put(omap->usbhost1_48m_fck);
+
+err_host_48m_fck:
+ clk_put(omap->usbhost2_120m_fck);
+
+err_host_120m_fck:
+ clk_put(omap->usbhost_ick);
+
+err_host_ick:
+ return ret;
+}
+
+static void omap3_stop_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
+
+ /* Reset USBHOST for insmod/rmmod to work */
+ ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
+ OMAP_UHH_SYSCONFIG_SOFTRESET);
+ while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & OMAP_UHH_SYSSTATUS_UHHRESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & OMAP_UHH_SYSSTATUS_OHCIRESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & OMAP_UHH_SYSSTATUS_EHCIRESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
+
+ while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & (1 << 0))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ ohci_omap3_clock_power(omap, 0);
+
+ if (omap->usbtll_fck != NULL) {
+ clk_put(omap->usbtll_fck);
+ omap->usbtll_fck = NULL;
+ }
+
+ if (omap->usbhost_ick != NULL) {
+ clk_put(omap->usbhost_ick);
+ omap->usbhost_ick = NULL;
+ }
+
+ if (omap->usbhost1_48m_fck != NULL) {
+ clk_put(omap->usbhost1_48m_fck);
+ omap->usbhost1_48m_fck = NULL;
+ }
+
+ if (omap->usbhost2_120m_fck != NULL) {
+ clk_put(omap->usbhost2_120m_fck);
+ omap->usbhost2_120m_fck = NULL;
+ }
+
+ if (omap->usbtll_ick != NULL) {
+ clk_put(omap->usbtll_ick);
+ omap->usbtll_ick = NULL;
+ }
+
+ dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver ohci_omap3_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "OMAP3 OHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ohci_omap3_init,
+ .start = ohci_omap3_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * configure so an HC device and id are always provided
+ * always called with process context; sleeping is OK
+ */
+
+/**
+ * ohci_hcd_omap3_probe - initialize OMAP-based HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
+{
+ struct ohci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
+ struct ohci_hcd_omap3 *omap;
+ struct resource *res;
+ struct usb_hcd *hcd;
+ int ret = -ENODEV;
+ int irq;
+
+ if (usb_disabled())
+ goto err_disabled;
+
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "missing platform_data\n");
+ goto err_pdata;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+
+ omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ if (!omap) {
+ ret = -ENOMEM;
+ goto err_disabled;
+ }
+
+ hcd = usb_create_hcd(&ohci_omap3_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto err_create_hcd;
+ }
+
+ platform_set_drvdata(pdev, omap);
+ omap->dev = &pdev->dev;
+ omap->port_mode[0] = pdata->port_mode[0];
+ omap->port_mode[1] = pdata->port_mode[1];
+ omap->port_mode[2] = pdata->port_mode[2];
+ omap->es2_compatibility = pdata->es2_compatibility;
+ omap->ohci = hcd_to_ohci(hcd);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "OHCI ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ omap->uhh_base = ioremap(res->start, resource_size(res));
+ if (!omap->uhh_base) {
+ dev_err(&pdev->dev, "UHH ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_uhh_ioremap;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ omap->tll_base = ioremap(res->start, resource_size(res));
+ if (!omap->tll_base) {
+ dev_err(&pdev->dev, "TLL ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_tll_ioremap;
+ }
+
+ ret = omap3_start_ohci(omap, hcd);
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to start ehci\n");
+ goto err_start;
+ }
+
+ ohci_hcd_init(omap->ohci);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ goto err_add_hcd;
+ }
+
+ return 0;
+
+err_add_hcd:
+ omap3_stop_ohci(omap, hcd);
+
+err_start:
+ iounmap(omap->tll_base);
+
+err_tll_ioremap:
+ iounmap(omap->uhh_base);
+
+err_uhh_ioremap:
+ iounmap(hcd->regs);
+
+err_ioremap:
+ usb_put_hcd(hcd);
+
+err_create_hcd:
+ kfree(omap);
+err_pdata:
+err_disabled:
+ return ret;
+}
+
+/*
+ * may be called without controller electrically present
+ * may be called with controller, bus, and devices active
+ */
+
+/**
+ * ohci_hcd_omap3_remove - shutdown processing for OHCI HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of ohci_hcd_omap3_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev)
+{
+ struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ohci_to_hcd(omap->ohci);
+
+ usb_remove_hcd(hcd);
+ omap3_stop_ohci(omap, hcd);
+ iounmap(hcd->regs);
+ iounmap(omap->tll_base);
+ iounmap(omap->uhh_base);
+ usb_put_hcd(hcd);
+ kfree(omap);
+
+ return 0;
+}
+
+static void ohci_hcd_omap3_shutdown(struct platform_device *pdev)
+{
+ struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ohci_to_hcd(omap->ohci);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ohci_hcd_omap3_driver = {
+ .probe = ohci_hcd_omap3_probe,
+ .remove = __devexit_p(ohci_hcd_omap3_remove),
+ .shutdown = ohci_hcd_omap3_shutdown,
+ .driver = {
+ .name = "ohci-omap3",
+ },
+};
+
+MODULE_ALIAS("platform:ohci-omap3");
+MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>");
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index e62b30b..f608dfd 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -34,12 +34,11 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
-#include "../core/hcd.h"
-
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
@@ -3154,10 +3153,10 @@ static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
case 0:
return 0;
case 1:
- return 1 << USB_PORT_FEAT_LOWSPEED;
+ return USB_PORT_STAT_LOW_SPEED;
case 2:
default:
- return 1 << USB_PORT_FEAT_HIGHSPEED;
+ return USB_PORT_STAT_HIGH_SPEED;
}
}
@@ -3202,7 +3201,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
* Even if OWNER is set, so the port is owned by the
* companion controller, khubd needs to be able to clear
* the port-change status bits (especially
- * USB_PORT_FEAT_C_CONNECTION).
+ * USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
@@ -3264,11 +3263,11 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* wPortChange bits */
if (temp & PORT_CSC)
- status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
- status |= 1 << USB_PORT_FEAT_C_ENABLE;
+ status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC) && !ignore_oc)
- status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
@@ -3286,7 +3285,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* resume completed? */
else if (time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
oxu->reset_done[wIndex] = 0;
/* stop resume signaling */
@@ -3309,7 +3308,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
- status |= 1 << USB_PORT_FEAT_C_RESET;
+ status |= USB_PORT_STAT_C_RESET << 16;
oxu->reset_done[wIndex] = 0;
/* force reset to complete */
@@ -3348,20 +3347,20 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
*/
if (temp & PORT_CONNECT) {
- status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
status |= oxu_port_speed(oxu, temp);
}
if (temp & PORT_PE)
- status |= 1 << USB_PORT_FEAT_ENABLE;
+ status |= USB_PORT_STAT_ENABLE;
if (temp & (PORT_SUSPEND|PORT_RESUME))
- status |= 1 << USB_PORT_FEAT_SUSPEND;
+ status |= USB_PORT_STAT_SUSPEND;
if (temp & PORT_OC)
- status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
- status |= 1 << USB_PORT_FEAT_RESET;
+ status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
- status |= 1 << USB_PORT_FEAT_POWER;
+ status |= USB_PORT_STAT_POWER;
#ifndef OXU_VERBOSE_DEBUG
if (status & ~0xffff) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index d478ffa..6db57ab 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mm.h>
@@ -40,7 +41,6 @@
#include <linux/slab.h>
#include <asm/cacheflush.h>
-#include "../core/hcd.h"
#include "r8a66597.h"
MODULE_DESCRIPTION("R8A66597 USB Host Controller Driver");
@@ -1018,10 +1018,10 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
rh->scount = R8A66597_MAX_SAMPLING;
if (connect)
- rh->port |= 1 << USB_PORT_FEAT_CONNECTION;
+ rh->port |= USB_PORT_STAT_CONNECTION;
else
- rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION);
- rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ rh->port &= ~USB_PORT_STAT_CONNECTION;
+ rh->port |= USB_PORT_STAT_C_CONNECTION << 16;
r8a66597_root_hub_start_polling(r8a66597);
}
@@ -1059,15 +1059,14 @@ static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port)
u16 speed = get_rh_usb_speed(r8a66597, port);
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
- rh->port &= ~((1 << USB_PORT_FEAT_HIGHSPEED) |
- (1 << USB_PORT_FEAT_LOWSPEED));
+ rh->port &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED);
if (speed == HSMODE)
- rh->port |= (1 << USB_PORT_FEAT_HIGHSPEED);
+ rh->port |= USB_PORT_STAT_HIGH_SPEED;
else if (speed == LSMODE)
- rh->port |= (1 << USB_PORT_FEAT_LOWSPEED);
+ rh->port |= USB_PORT_STAT_LOW_SPEED;
- rh->port &= ~(1 << USB_PORT_FEAT_RESET);
- rh->port |= 1 << USB_PORT_FEAT_ENABLE;
+ rh->port &= USB_PORT_STAT_RESET;
+ rh->port |= USB_PORT_STAT_ENABLE;
}
/* this function must be called with interrupt disabled */
@@ -1706,7 +1705,7 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
u16 tmp;
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
- if (rh->port & (1 << USB_PORT_FEAT_RESET)) {
+ if (rh->port & USB_PORT_STAT_RESET) {
unsigned long dvstctr_reg = get_dvstctr_reg(port);
tmp = r8a66597_read(r8a66597, dvstctr_reg);
@@ -1718,7 +1717,7 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
r8a66597_usb_connect(r8a66597, port);
}
- if (!(rh->port & (1 << USB_PORT_FEAT_CONNECTION))) {
+ if (!(rh->port & USB_PORT_STAT_CONNECTION)) {
r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
}
@@ -2186,7 +2185,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- rh->port &= ~(1 << USB_PORT_FEAT_POWER);
+ rh->port &= ~USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_SUSPEND:
break;
@@ -2227,12 +2226,12 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_POWER:
r8a66597_port_power(r8a66597, port, 1);
- rh->port |= (1 << USB_PORT_FEAT_POWER);
+ rh->port |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_RESET: {
struct r8a66597_device *dev = rh->dev;
- rh->port |= (1 << USB_PORT_FEAT_RESET);
+ rh->port |= USB_PORT_STAT_RESET;
disable_r8a66597_pipe_all(r8a66597, dev);
free_usb_address(r8a66597, dev, 1);
@@ -2270,12 +2269,12 @@ static int r8a66597_bus_suspend(struct usb_hcd *hcd)
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
unsigned long dvstctr_reg = get_dvstctr_reg(port);
- if (!(rh->port & (1 << USB_PORT_FEAT_ENABLE)))
+ if (!(rh->port & USB_PORT_STAT_ENABLE))
continue;
dbg("suspend port = %d", port);
r8a66597_bclr(r8a66597, UACT, dvstctr_reg); /* suspend */
- rh->port |= 1 << USB_PORT_FEAT_SUSPEND;
+ rh->port |= USB_PORT_STAT_SUSPEND;
if (rh->dev->udev->do_remote_wakeup) {
msleep(3); /* waiting last SOF */
@@ -2301,12 +2300,12 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
unsigned long dvstctr_reg = get_dvstctr_reg(port);
- if (!(rh->port & (1 << USB_PORT_FEAT_SUSPEND)))
+ if (!(rh->port & USB_PORT_STAT_SUSPEND))
continue;
dbg("resume port = %d", port);
- rh->port &= ~(1 << USB_PORT_FEAT_SUSPEND);
- rh->port |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ rh->port &= ~USB_PORT_STAT_SUSPEND;
+ rh->port |= USB_PORT_STAT_C_SUSPEND < 16;
r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
msleep(50);
r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 3b867a8..bcf9f0e 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -45,6 +45,7 @@
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/sl811.h>
+#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <asm/io.h>
@@ -53,7 +54,6 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
-#include "../core/hcd.h"
#include "sl811.h"
@@ -90,10 +90,10 @@ static void port_power(struct sl811 *sl811, int is_on)
/* hub is inactive unless the port is powered */
if (is_on) {
- if (sl811->port1 & (1 << USB_PORT_FEAT_POWER))
+ if (sl811->port1 & USB_PORT_STAT_POWER)
return;
- sl811->port1 = (1 << USB_PORT_FEAT_POWER);
+ sl811->port1 = USB_PORT_STAT_POWER;
sl811->irq_enable = SL11H_INTMASK_INSRMV;
} else {
sl811->port1 = 0;
@@ -407,7 +407,7 @@ static struct sl811h_ep *start(struct sl811 *sl811, u8 bank)
static inline void start_transfer(struct sl811 *sl811)
{
- if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND))
+ if (sl811->port1 & USB_PORT_STAT_SUSPEND)
return;
if (sl811->active_a == NULL) {
sl811->active_a = start(sl811, SL811_EP_A(SL811_HOST_BUF));
@@ -721,23 +721,23 @@ retry:
* force the reset and make khubd clean up later.
*/
if (irqstat & SL11H_INTMASK_RD)
- sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION);
+ sl811->port1 &= ~USB_PORT_STAT_CONNECTION;
else
- sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION;
+ sl811->port1 |= USB_PORT_STAT_CONNECTION;
- sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ sl811->port1 |= USB_PORT_STAT_C_CONNECTION << 16;
} else if (irqstat & SL11H_INTMASK_RD) {
- if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) {
+ if (sl811->port1 & USB_PORT_STAT_SUSPEND) {
DBG("wakeup\n");
- sl811->port1 |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ sl811->port1 |= USB_PORT_STAT_C_SUSPEND << 16;
sl811->stat_wake++;
} else
irqstat &= ~SL11H_INTMASK_RD;
}
if (irqstat) {
- if (sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))
+ if (sl811->port1 & USB_PORT_STAT_ENABLE)
start_transfer(sl811);
ret = IRQ_HANDLED;
if (retries--)
@@ -819,7 +819,7 @@ static int sl811h_urb_enqueue(
spin_lock_irqsave(&sl811->lock, flags);
/* don't submit to a dead or disabled port */
- if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))
+ if (!(sl811->port1 & USB_PORT_STAT_ENABLE)
|| !HC_IS_RUNNING(hcd->state)) {
retval = -ENODEV;
kfree(ep);
@@ -1119,9 +1119,9 @@ sl811h_timer(unsigned long _sl811)
unsigned long flags;
u8 irqstat;
u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
- const u32 mask = (1 << USB_PORT_FEAT_CONNECTION)
- | (1 << USB_PORT_FEAT_ENABLE)
- | (1 << USB_PORT_FEAT_LOWSPEED);
+ const u32 mask = USB_PORT_STAT_CONNECTION
+ | USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_LOW_SPEED;
spin_lock_irqsave(&sl811->lock, flags);
@@ -1135,8 +1135,8 @@ sl811h_timer(unsigned long _sl811)
switch (signaling) {
case SL11H_CTL1MASK_SE0:
DBG("end reset\n");
- sl811->port1 = (1 << USB_PORT_FEAT_C_RESET)
- | (1 << USB_PORT_FEAT_POWER);
+ sl811->port1 = (USB_PORT_STAT_C_RESET << 16)
+ | USB_PORT_STAT_POWER;
sl811->ctrl1 = 0;
/* don't wrongly ack RD */
if (irqstat & SL11H_INTMASK_INSRMV)
@@ -1144,7 +1144,7 @@ sl811h_timer(unsigned long _sl811)
break;
case SL11H_CTL1MASK_K:
DBG("end resume\n");
- sl811->port1 &= ~(1 << USB_PORT_FEAT_SUSPEND);
+ sl811->port1 &= ~USB_PORT_STAT_SUSPEND;
break;
default:
DBG("odd timer signaling: %02x\n", signaling);
@@ -1154,26 +1154,26 @@ sl811h_timer(unsigned long _sl811)
if (irqstat & SL11H_INTMASK_RD) {
/* usbcore nukes all pending transactions on disconnect */
- if (sl811->port1 & (1 << USB_PORT_FEAT_CONNECTION))
- sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION)
- | (1 << USB_PORT_FEAT_C_ENABLE);
+ if (sl811->port1 & USB_PORT_STAT_CONNECTION)
+ sl811->port1 |= (USB_PORT_STAT_C_CONNECTION << 16)
+ | (USB_PORT_STAT_C_ENABLE << 16);
sl811->port1 &= ~mask;
sl811->irq_enable = SL11H_INTMASK_INSRMV;
} else {
sl811->port1 |= mask;
if (irqstat & SL11H_INTMASK_DP)
- sl811->port1 &= ~(1 << USB_PORT_FEAT_LOWSPEED);
+ sl811->port1 &= ~USB_PORT_STAT_LOW_SPEED;
sl811->irq_enable = SL11H_INTMASK_INSRMV | SL11H_INTMASK_RD;
}
- if (sl811->port1 & (1 << USB_PORT_FEAT_CONNECTION)) {
+ if (sl811->port1 & USB_PORT_STAT_CONNECTION) {
u8 ctrl2 = SL811HS_CTL2_INIT;
sl811->irq_enable |= SL11H_INTMASK_DONE_A;
#ifdef USE_B
sl811->irq_enable |= SL11H_INTMASK_DONE_B;
#endif
- if (sl811->port1 & (1 << USB_PORT_FEAT_LOWSPEED)) {
+ if (sl811->port1 & USB_PORT_STAT_LOW_SPEED) {
sl811->ctrl1 |= SL11H_CTL1MASK_LSPD;
ctrl2 |= SL811HS_CTL2MASK_DSWAP;
}
@@ -1233,7 +1233,7 @@ sl811h_hub_control(
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- sl811->port1 &= (1 << USB_PORT_FEAT_POWER);
+ sl811->port1 &= USB_PORT_STAT_POWER;
sl811->ctrl1 = 0;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
sl811->irq_enable = SL11H_INTMASK_INSRMV;
@@ -1241,7 +1241,7 @@ sl811h_hub_control(
sl811->irq_enable);
break;
case USB_PORT_FEAT_SUSPEND:
- if (!(sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)))
+ if (!(sl811->port1 & USB_PORT_STAT_SUSPEND))
break;
/* 20 msec of resume/K signaling, other irqs blocked */
@@ -1290,9 +1290,9 @@ sl811h_hub_control(
goto error;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- if (sl811->port1 & (1 << USB_PORT_FEAT_RESET))
+ if (sl811->port1 & USB_PORT_STAT_RESET)
goto error;
- if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE)))
+ if (!(sl811->port1 & USB_PORT_STAT_ENABLE))
goto error;
DBG("suspend...\n");
@@ -1303,9 +1303,9 @@ sl811h_hub_control(
port_power(sl811, 1);
break;
case USB_PORT_FEAT_RESET:
- if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND))
+ if (sl811->port1 & USB_PORT_STAT_SUSPEND)
goto error;
- if (!(sl811->port1 & (1 << USB_PORT_FEAT_POWER)))
+ if (!(sl811->port1 & USB_PORT_STAT_POWER))
break;
/* 50 msec of reset/SE0 signaling, irqs blocked */
@@ -1314,7 +1314,7 @@ sl811h_hub_control(
sl811->irq_enable);
sl811->ctrl1 = SL11H_CTL1MASK_SE0;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
- sl811->port1 |= (1 << USB_PORT_FEAT_RESET);
+ sl811->port1 |= USB_PORT_STAT_RESET;
mod_timer(&sl811->timer, jiffies
+ msecs_to_jiffies(50));
break;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 228f2b0..5b31bae 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -49,6 +49,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
@@ -56,7 +57,6 @@
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/byteorder.h>
-#include "../core/hcd.h"
/* FIXME ohci.h is ONLY for internal use by the OHCI driver.
* If you're going to try stuff like this, you need to split
@@ -1446,9 +1446,9 @@ static void u132_hcd_endp_work_scheduler(struct work_struct *work)
return;
} else {
int retval;
- u8 address = u132->addr[endp->usb_addr].address;
struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
endp->queue_next];
+ address = u132->addr[endp->usb_addr].address;
endp->active = 1;
ring->curr_endp = endp;
ring->in_use = 1;
@@ -3120,8 +3120,8 @@ static int __devinit u132_probe(struct platform_device *pdev)
ftdi_elan_gone_away(pdev);
return -ENOMEM;
} else {
- int retval = 0;
struct u132 *u132 = hcd_to_u132(hcd);
+ retval = 0;
hcd->rsrc_start = 0;
mutex_lock(&u132_module_lock);
list_add_tail(&u132->u132_list, &u132_static_list);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 0919706..6637e52 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -38,6 +38,7 @@
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/bitops.h>
#include <linux/dmi.h>
@@ -46,7 +47,6 @@
#include <asm/irq.h>
#include <asm/system.h>
-#include "../core/hcd.h"
#include "uhci-hcd.h"
#include "pci-quirks.h"
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index c5305b5..767af26 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -30,7 +30,7 @@ struct whc_dbg {
struct dentry *pzl_f;
};
-void qset_print(struct seq_file *s, struct whc_qset *qset)
+static void qset_print(struct seq_file *s, struct whc_qset *qset)
{
static const char *qh_type[] = {
"ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index 141d049..ab5a14f 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
dma_addr_t dma_addr;
size_t dma_remaining;
dma_addr_t sp, ep;
@@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) {
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
size_t len;
size_t sg_remaining;
void *orig;
@@ -646,7 +646,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
wurb->urb = urb;
INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
- if (urb->sg) {
+ if (urb->num_sgs) {
ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
if (ret == -EINVAL) {
qset_free_stds(qset, urb);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 105fa8b..fcbf4ab 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -364,6 +364,30 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
xhci_debug_segment(xhci, seg);
}
+void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_virt_ep *ep)
+{
+ int i;
+ struct xhci_ring *ring;
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ for (i = 1; i < ep->stream_info->num_streams; i++) {
+ ring = ep->stream_info->stream_rings[i];
+ xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n",
+ slot_id, ep_index, i);
+ xhci_debug_segment(xhci, ring->deq_seg);
+ }
+ } else {
+ ring = ep->ring;
+ if (!ring)
+ return;
+ xhci_dbg(xhci, "Dev %d endpoint ring %d:\n",
+ slot_id, ep_index);
+ xhci_debug_segment(xhci, ring->deq_seg);
+ }
+}
+
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
u32 addr = (u32) erst->erst_dma_addr;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 208b805..a1a7a97 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -64,15 +64,15 @@ static void xhci_hub_descriptor(struct xhci_hcd *xhci,
static unsigned int xhci_port_speed(unsigned int port_status)
{
if (DEV_LOWSPEED(port_status))
- return 1 << USB_PORT_FEAT_LOWSPEED;
+ return USB_PORT_STAT_LOW_SPEED;
if (DEV_HIGHSPEED(port_status))
- return 1 << USB_PORT_FEAT_HIGHSPEED;
+ return USB_PORT_STAT_HIGH_SPEED;
if (DEV_SUPERSPEED(port_status))
- return 1 << USB_PORT_FEAT_SUPERSPEED;
+ return USB_PORT_STAT_SUPER_SPEED;
/*
* FIXME: Yes, we should check for full speed, but the core uses that as
* a default in portspeed() in usb/core/hub.c (which is the only place
- * USB_PORT_FEAT_*SPEED is used).
+ * USB_PORT_STAT_*_SPEED is used).
*/
return 0;
}
@@ -205,27 +205,27 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* wPortChange bits */
if (temp & PORT_CSC)
- status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
- status |= 1 << USB_PORT_FEAT_C_ENABLE;
+ status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC))
- status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/*
* FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
* changes
*/
if (temp & PORT_CONNECT) {
- status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= USB_PORT_STAT_CONNECTION;
status |= xhci_port_speed(temp);
}
if (temp & PORT_PE)
- status |= 1 << USB_PORT_FEAT_ENABLE;
+ status |= USB_PORT_STAT_ENABLE;
if (temp & PORT_OC)
- status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
- status |= 1 << USB_PORT_FEAT_RESET;
+ status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
- status |= 1 << USB_PORT_FEAT_POWER;
+ status |= USB_PORT_STAT_POWER;
xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
@@ -298,7 +298,6 @@ error:
* Returns 0 if the status hasn't changed, or the number of bytes in buf.
* Ports are 0-indexed from the HCD point of view,
* and 1-indexed from the USB core pointer of view.
- * xHCI instances can have up to 127 ports, so FIXME if you see more than 15.
*
* Note that the status change bits will be cleared as soon as a port status
* change event is generated, so we use the saved status from that event.
@@ -315,14 +314,9 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
ports = HCS_MAX_PORTS(xhci->hcs_params1);
/* Initial status is no changes */
- buf[0] = 0;
+ retval = (ports + 8) / 8;
+ memset(buf, 0, retval);
status = 0;
- if (ports > 7) {
- buf[1] = 0;
- retval = 2;
- } else {
- retval = 1;
- }
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
@@ -331,10 +325,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
NUM_PORT_REGS*i;
temp = xhci_readl(xhci, addr);
if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
- if (i < 7)
- buf[0] |= 1 << (i + 1);
- else
- buf[1] |= 1 << (i - 7);
+ buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
status = 1;
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d64f572..fd9e03a 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -41,13 +41,13 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
seg = kzalloc(sizeof *seg, flags);
if (!seg)
- return 0;
+ return NULL;
xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
if (!seg->trbs) {
kfree(seg);
- return 0;
+ return NULL;
}
xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
seg->trbs, (unsigned long long)dma);
@@ -159,7 +159,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
ring = kzalloc(sizeof *(ring), flags);
xhci_dbg(xhci, "Allocating ring at %p\n", ring);
if (!ring)
- return 0;
+ return NULL;
INIT_LIST_HEAD(&ring->td_list);
if (num_segs == 0)
@@ -196,7 +196,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
fail:
xhci_ring_free(xhci, ring);
- return 0;
+ return NULL;
}
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
@@ -247,7 +247,7 @@ static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
-struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
+static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
@@ -265,7 +265,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
return ctx;
}
-void xhci_free_container_ctx(struct xhci_hcd *xhci,
+static void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (!ctx)
@@ -304,6 +304,422 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
}
+
+/***************** Streams structures manipulation *************************/
+
+void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs,
+ struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
+{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+ if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
+ pci_free_consistent(pdev,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ stream_ctx, dma);
+ else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ return dma_pool_free(xhci->small_streams_pool,
+ stream_ctx, dma);
+ else
+ return dma_pool_free(xhci->medium_streams_pool,
+ stream_ctx, dma);
+}
+
+/*
+ * The stream context array for each endpoint with bulk streams enabled can
+ * vary in size, based on:
+ * - how many streams the endpoint supports,
+ * - the maximum primary stream array size the host controller supports,
+ * - and how many streams the device driver asks for.
+ *
+ * The stream context array must be a power of 2, and can be as small as
+ * 64 bytes or as large as 1MB.
+ */
+struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs, dma_addr_t *dma,
+ gfp_t mem_flags)
+{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+ if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
+ return pci_alloc_consistent(pdev,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ dma);
+ else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ return dma_pool_alloc(xhci->small_streams_pool,
+ mem_flags, dma);
+ else
+ return dma_pool_alloc(xhci->medium_streams_pool,
+ mem_flags, dma);
+}
+
+struct xhci_ring *xhci_dma_to_transfer_ring(
+ struct xhci_virt_ep *ep,
+ u64 address)
+{
+ if (ep->ep_state & EP_HAS_STREAMS)
+ return radix_tree_lookup(&ep->stream_info->trb_address_map,
+ address >> SEGMENT_SHIFT);
+ return ep->ring;
+}
+
+/* Only use this when you know stream_info is valid */
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+static struct xhci_ring *dma_to_stream_ring(
+ struct xhci_stream_info *stream_info,
+ u64 address)
+{
+ return radix_tree_lookup(&stream_info->trb_address_map,
+ address >> SEGMENT_SHIFT);
+}
+#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
+
+struct xhci_ring *xhci_stream_id_to_ring(
+ struct xhci_virt_device *dev,
+ unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
+
+ if (stream_id == 0)
+ return ep->ring;
+ if (!ep->stream_info)
+ return NULL;
+
+ if (stream_id > ep->stream_info->num_streams)
+ return NULL;
+ return ep->stream_info->stream_rings[stream_id];
+}
+
+struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct xhci_virt_ep *ep;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ /* Common case: no streams */
+ if (!(ep->ep_state & EP_HAS_STREAMS))
+ return ep->ring;
+
+ if (stream_id == 0) {
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has streams, "
+ "but URB has no stream ID.\n",
+ slot_id, ep_index);
+ return NULL;
+ }
+
+ if (stream_id < ep->stream_info->num_streams)
+ return ep->stream_info->stream_rings[stream_id];
+
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has "
+ "stream IDs 1 to %u allocated, "
+ "but stream ID %u is requested.\n",
+ slot_id, ep_index,
+ ep->stream_info->num_streams - 1,
+ stream_id);
+ return NULL;
+}
+
+/* Get the right ring for the given URB.
+ * If the endpoint supports streams, boundary check the URB's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb)
+{
+ return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
+ xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+static int xhci_test_radix_tree(struct xhci_hcd *xhci,
+ unsigned int num_streams,
+ struct xhci_stream_info *stream_info)
+{
+ u32 cur_stream;
+ struct xhci_ring *cur_ring;
+ u64 addr;
+
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ struct xhci_ring *mapped_ring;
+ int trb_size = sizeof(union xhci_trb);
+
+ cur_ring = stream_info->stream_rings[cur_stream];
+ for (addr = cur_ring->first_seg->dma;
+ addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
+ addr += trb_size) {
+ mapped_ring = dma_to_stream_ring(stream_info, addr);
+ if (cur_ring != mapped_ring) {
+ xhci_warn(xhci, "WARN: DMA address 0x%08llx "
+ "didn't map to stream ID %u; "
+ "mapped to ring %p\n",
+ (unsigned long long) addr,
+ cur_stream,
+ mapped_ring);
+ return -EINVAL;
+ }
+ }
+ /* One TRB after the end of the ring segment shouldn't return a
+ * pointer to the current ring (although it may be a part of a
+ * different ring).
+ */
+ mapped_ring = dma_to_stream_ring(stream_info, addr);
+ if (mapped_ring != cur_ring) {
+ /* One TRB before should also fail */
+ addr = cur_ring->first_seg->dma - trb_size;
+ mapped_ring = dma_to_stream_ring(stream_info, addr);
+ }
+ if (mapped_ring == cur_ring) {
+ xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
+ "mapped to valid stream ID %u; "
+ "mapped ring = %p\n",
+ (unsigned long long) addr,
+ cur_stream,
+ mapped_ring);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
+
+/*
+ * Change an endpoint's internal structure so it supports stream IDs. The
+ * number of requested streams includes stream 0, which cannot be used by device
+ * drivers.
+ *
+ * The number of stream contexts in the stream context array may be bigger than
+ * the number of streams the driver wants to use. This is because the number of
+ * stream context array entries must be a power of two.
+ *
+ * We need a radix tree for mapping physical addresses of TRBs to which stream
+ * ID they belong to. We need to do this because the host controller won't tell
+ * us which stream ring the TRB came from. We could store the stream ID in an
+ * event data TRB, but that doesn't help us for the cancellation case, since the
+ * endpoint may stop before it reaches that event data TRB.
+ *
+ * The radix tree maps the upper portion of the TRB DMA address to a ring
+ * segment that has the same upper portion of DMA addresses. For example, say I
+ * have segments of size 1KB, that are always 64-byte aligned. A segment may
+ * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
+ * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
+ * pass the radix tree a key to get the right stream ID:
+ *
+ * 0x10c90fff >> 10 = 0x43243
+ * 0x10c912c0 >> 10 = 0x43244
+ * 0x10c91400 >> 10 = 0x43245
+ *
+ * Obviously, only those TRBs with DMA addresses that are within the segment
+ * will make the radix tree return the stream ID for that ring.
+ *
+ * Caveats for the radix tree:
+ *
+ * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
+ * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
+ * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
+ * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
+ * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
+ * extended systems (where the DMA address can be bigger than 32-bits),
+ * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
+ */
+struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs,
+ unsigned int num_streams, gfp_t mem_flags)
+{
+ struct xhci_stream_info *stream_info;
+ u32 cur_stream;
+ struct xhci_ring *cur_ring;
+ unsigned long key;
+ u64 addr;
+ int ret;
+
+ xhci_dbg(xhci, "Allocating %u streams and %u "
+ "stream context array entries.\n",
+ num_streams, num_stream_ctxs);
+ if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
+ xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
+ return NULL;
+ }
+ xhci->cmd_ring_reserved_trbs++;
+
+ stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
+ if (!stream_info)
+ goto cleanup_trbs;
+
+ stream_info->num_streams = num_streams;
+ stream_info->num_stream_ctxs = num_stream_ctxs;
+
+ /* Initialize the array of virtual pointers to stream rings. */
+ stream_info->stream_rings = kzalloc(
+ sizeof(struct xhci_ring *)*num_streams,
+ mem_flags);
+ if (!stream_info->stream_rings)
+ goto cleanup_info;
+
+ /* Initialize the array of DMA addresses for stream rings for the HW. */
+ stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
+ num_stream_ctxs, &stream_info->ctx_array_dma,
+ mem_flags);
+ if (!stream_info->stream_ctx_array)
+ goto cleanup_ctx;
+ memset(stream_info->stream_ctx_array, 0,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
+
+ /* Allocate everything needed to free the stream rings later */
+ stream_info->free_streams_command =
+ xhci_alloc_command(xhci, true, true, mem_flags);
+ if (!stream_info->free_streams_command)
+ goto cleanup_ctx;
+
+ INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
+
+ /* Allocate rings for all the streams that the driver will use,
+ * and add their segment DMA addresses to the radix tree.
+ * Stream 0 is reserved.
+ */
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ stream_info->stream_rings[cur_stream] =
+ xhci_ring_alloc(xhci, 1, true, mem_flags);
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (!cur_ring)
+ goto cleanup_rings;
+ cur_ring->stream_id = cur_stream;
+ /* Set deq ptr, cycle bit, and stream context type */
+ addr = cur_ring->first_seg->dma |
+ SCT_FOR_CTX(SCT_PRI_TR) |
+ cur_ring->cycle_state;
+ stream_info->stream_ctx_array[cur_stream].stream_ring = addr;
+ xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
+ cur_stream, (unsigned long long) addr);
+
+ key = (unsigned long)
+ (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
+ ret = radix_tree_insert(&stream_info->trb_address_map,
+ key, cur_ring);
+ if (ret) {
+ xhci_ring_free(xhci, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ goto cleanup_rings;
+ }
+ }
+ /* Leave the other unused stream ring pointers in the stream context
+ * array initialized to zero. This will cause the xHC to give us an
+ * error if the device asks for a stream ID we don't have setup (if it
+ * was any other way, the host controller would assume the ring is
+ * "empty" and wait forever for data to be queued to that stream ID).
+ */
+#if XHCI_DEBUG
+ /* Do a little test on the radix tree to make sure it returns the
+ * correct values.
+ */
+ if (xhci_test_radix_tree(xhci, num_streams, stream_info))
+ goto cleanup_rings;
+#endif
+
+ return stream_info;
+
+cleanup_rings:
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (cur_ring) {
+ addr = cur_ring->first_seg->dma;
+ radix_tree_delete(&stream_info->trb_address_map,
+ addr >> SEGMENT_SHIFT);
+ xhci_ring_free(xhci, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ }
+ }
+ xhci_free_command(xhci, stream_info->free_streams_command);
+cleanup_ctx:
+ kfree(stream_info->stream_rings);
+cleanup_info:
+ kfree(stream_info);
+cleanup_trbs:
+ xhci->cmd_ring_reserved_trbs--;
+ return NULL;
+}
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field.
+ * Sets the dequeue pointer to the stream context array.
+ */
+void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_stream_info *stream_info)
+{
+ u32 max_primary_streams;
+ /* MaxPStreams is the number of stream context array entries, not the
+ * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
+ * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
+ */
+ max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
+ xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
+ 1 << (max_primary_streams + 1));
+ ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
+ ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams);
+ ep_ctx->ep_info |= EP_HAS_LSA;
+ ep_ctx->deq = stream_info->ctx_array_dma;
+}
+
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field to 0.
+ * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
+ * not at the beginning of the ring).
+ */
+void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_virt_ep *ep)
+{
+ dma_addr_t addr;
+ ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
+ ep_ctx->ep_info &= ~EP_HAS_LSA;
+ addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
+ ep_ctx->deq = addr | ep->ring->cycle_state;
+}
+
+/* Frees all stream contexts associated with the endpoint,
+ *
+ * Caller should fix the endpoint context streams fields.
+ */
+void xhci_free_stream_info(struct xhci_hcd *xhci,
+ struct xhci_stream_info *stream_info)
+{
+ int cur_stream;
+ struct xhci_ring *cur_ring;
+ dma_addr_t addr;
+
+ if (!stream_info)
+ return;
+
+ for (cur_stream = 1; cur_stream < stream_info->num_streams;
+ cur_stream++) {
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (cur_ring) {
+ addr = cur_ring->first_seg->dma;
+ radix_tree_delete(&stream_info->trb_address_map,
+ addr >> SEGMENT_SHIFT);
+ xhci_ring_free(xhci, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ }
+ }
+ xhci_free_command(xhci, stream_info->free_streams_command);
+ xhci->cmd_ring_reserved_trbs--;
+ if (stream_info->stream_ctx_array)
+ xhci_free_stream_ctx(xhci,
+ stream_info->num_stream_ctxs,
+ stream_info->stream_ctx_array,
+ stream_info->ctx_array_dma);
+
+ if (stream_info)
+ kfree(stream_info->stream_rings);
+ kfree(stream_info);
+}
+
+
+/***************** Device context manipulation *************************/
+
static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
@@ -328,9 +744,13 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (!dev)
return;
- for (i = 0; i < 31; ++i)
+ for (i = 0; i < 31; ++i) {
if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->eps[i].ring);
+ if (dev->eps[i].stream_info)
+ xhci_free_stream_info(xhci,
+ dev->eps[i].stream_info);
+ }
if (dev->ring_cache) {
for (i = 0; i < dev->num_rings_cached; i++)
@@ -344,7 +764,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
xhci_free_container_ctx(xhci, dev->out_ctx);
kfree(xhci->devs[slot_id]);
- xhci->devs[slot_id] = 0;
+ xhci->devs[slot_id] = NULL;
}
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
@@ -590,9 +1010,9 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
- if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
+ if (udev->speed != USB_SPEED_SUPER)
return 0;
- return ep->ss_ep_comp->desc.bmAttributes;
+ return ep->ss_ep_comp.bmAttributes;
}
static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
@@ -641,13 +1061,8 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
usb_endpoint_xfer_bulk(&ep->desc))
return 0;
- if (udev->speed == USB_SPEED_SUPER) {
- if (ep->ss_ep_comp)
- return ep->ss_ep_comp->desc.wBytesPerInterval;
- xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
- /* Assume no bursts, no multiple opportunities to send. */
- return ep->desc.wMaxPacketSize;
- }
+ if (udev->speed == USB_SPEED_SUPER)
+ return ep->ss_ep_comp.wBytesPerInterval;
max_packet = ep->desc.wMaxPacketSize & 0x3ff;
max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
@@ -655,6 +1070,9 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
return max_packet * (max_burst + 1);
}
+/* Set up an endpoint with one ring segment. Do not allocate stream rings.
+ * Drivers will have to call usb_alloc_streams() to do that.
+ */
int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *udev,
@@ -708,12 +1126,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
max_packet = ep->desc.wMaxPacketSize;
ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
/* dig out max burst from ep companion desc */
- if (!ep->ss_ep_comp) {
- xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
- max_packet = 0;
- } else {
- max_packet = ep->ss_ep_comp->desc.bMaxBurst;
- }
+ max_packet = ep->ss_ep_comp.bMaxBurst;
+ if (!max_packet)
+ xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
ep_ctx->ep_info2 |= MAX_BURST(max_packet);
break;
case USB_SPEED_HIGH:
@@ -1003,6 +1418,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->device_pool = NULL;
xhci_dbg(xhci, "Freed device context pool\n");
+ if (xhci->small_streams_pool)
+ dma_pool_destroy(xhci->small_streams_pool);
+ xhci->small_streams_pool = NULL;
+ xhci_dbg(xhci, "Freed small stream array pool\n");
+
+ if (xhci->medium_streams_pool)
+ dma_pool_destroy(xhci->medium_streams_pool);
+ xhci->medium_streams_pool = NULL;
+ xhci_dbg(xhci, "Freed medium stream array pool\n");
+
xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
if (xhci->dcbaa)
pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
@@ -1239,6 +1664,22 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->segment_pool || !xhci->device_pool)
goto fail;
+ /* Linear stream context arrays don't have any boundary restrictions,
+ * and only need to be 16-byte aligned.
+ */
+ xhci->small_streams_pool =
+ dma_pool_create("xHCI 256 byte stream ctx arrays",
+ dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
+ xhci->medium_streams_pool =
+ dma_pool_create("xHCI 1KB stream ctx arrays",
+ dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
+ /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
+ * will be allocated with pci_alloc_consistent()
+ */
+
+ if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
+ goto fail;
+
/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
if (!xhci->cmd_ring)
@@ -1330,7 +1771,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
*/
init_completion(&xhci->addr_dev);
for (i = 0; i < MAX_HC_SLOTS; ++i)
- xhci->devs[i] = 0;
+ xhci->devs[i] = NULL;
if (scratchpad_alloc(xhci, flags))
goto fail;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 417d37a..edffd81 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,7 +54,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
- hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1;
+ hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
@@ -132,6 +132,8 @@ static const struct hc_driver xhci_pci_hc_driver = {
.urb_dequeue = xhci_urb_dequeue,
.alloc_dev = xhci_alloc_dev,
.free_dev = xhci_free_dev,
+ .alloc_streams = xhci_alloc_streams,
+ .free_streams = xhci_free_streams,
.add_endpoint = xhci_add_endpoint,
.drop_endpoint = xhci_drop_endpoint,
.endpoint_reset = xhci_endpoint_reset,
@@ -175,12 +177,12 @@ static struct pci_driver xhci_pci_driver = {
.shutdown = usb_hcd_pci_shutdown,
};
-int xhci_register_pci()
+int xhci_register_pci(void)
{
return pci_register_driver(&xhci_pci_driver);
}
-void xhci_unregister_pci()
+void xhci_unregister_pci(void)
{
pci_unregister_driver(&xhci_pci_driver);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 85d7e8f..36c858e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -112,6 +112,12 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
}
+static inline int enqueue_is_link_trb(struct xhci_ring *ring)
+{
+ struct xhci_link_trb *link = &ring->enqueue->link;
+ return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
+}
+
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
@@ -193,20 +199,15 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (!consumer) {
if (ring != xhci->event_ring) {
- /* If we're not dealing with 0.95 hardware,
- * carry over the chain bit of the previous TRB
- * (which may mean the chain bit is cleared).
- */
- if (!xhci_link_trb_quirk(xhci)) {
- next->link.control &= ~TRB_CHAIN;
- next->link.control |= chain;
+ if (chain) {
+ next->link.control |= TRB_CHAIN;
+
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= TRB_CYCLE;
+ } else {
+ break;
}
- /* Give this link TRB to the hardware */
- wmb();
- if (next->link.control & TRB_CYCLE)
- next->link.control &= (u32) ~TRB_CYCLE;
- else
- next->link.control |= (u32) TRB_CYCLE;
}
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -242,10 +243,34 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
int i;
union xhci_trb *enq = ring->enqueue;
struct xhci_segment *enq_seg = ring->enq_seg;
+ struct xhci_segment *cur_seg;
+ unsigned int left_on_ring;
+
+ /* If we are currently pointing to a link TRB, advance the
+ * enqueue pointer before checking for space */
+ while (last_trb(xhci, ring, enq_seg, enq)) {
+ enq_seg = enq_seg->next;
+ enq = enq_seg->trbs;
+ }
/* Check if ring is empty */
- if (enq == ring->dequeue)
+ if (enq == ring->dequeue) {
+ /* Can't use link trbs */
+ left_on_ring = TRBS_PER_SEGMENT - 1;
+ for (cur_seg = enq_seg->next; cur_seg != enq_seg;
+ cur_seg = cur_seg->next)
+ left_on_ring += TRBS_PER_SEGMENT - 1;
+
+ /* Always need one TRB free in the ring. */
+ left_on_ring -= 1;
+ if (num_trbs > left_on_ring) {
+ xhci_warn(xhci, "Not enough room on ring; "
+ "need %u TRBs, %u TRBs left\n",
+ num_trbs, left_on_ring);
+ return 0;
+ }
return 1;
+ }
/* Make sure there's an extra empty TRB available */
for (i = 0; i <= num_trbs; ++i) {
if (enq == ring->dequeue)
@@ -295,7 +320,8 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
static void ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int slot_id,
- unsigned int ep_index)
+ unsigned int ep_index,
+ unsigned int stream_id)
{
struct xhci_virt_ep *ep;
unsigned int ep_state;
@@ -306,11 +332,16 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
ep_state = ep->ep_state;
/* Don't ring the doorbell for this endpoint if there are pending
* cancellations because the we don't want to interrupt processing.
+ * We don't want to restart any stream rings if there's a set dequeue
+ * pointer command pending because the device can choose to start any
+ * stream once the endpoint is on the HW schedule.
+ * FIXME - check all the stream rings for pending cancellations.
*/
if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
&& !(ep_state & EP_HALTED)) {
field = xhci_readl(xhci, db_addr) & DB_MASK;
- xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
+ field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
+ xhci_writel(xhci, field, db_addr);
/* Flush PCI posted writes - FIXME Matthew Wilcox says this
* isn't time-critical and we shouldn't make the CPU wait for
* the flush.
@@ -319,6 +350,31 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
}
}
+/* Ring the doorbell for any rings with pending URBs */
+static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index)
+{
+ unsigned int stream_id;
+ struct xhci_virt_ep *ep;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+
+ /* A ring has pending URBs if its TD list is not empty */
+ if (!(ep->ep_state & EP_HAS_STREAMS)) {
+ if (!(list_empty(&ep->ring->td_list)))
+ ring_ep_doorbell(xhci, slot_id, ep_index, 0);
+ return;
+ }
+
+ for (stream_id = 1; stream_id < ep->stream_info->num_streams;
+ stream_id++) {
+ struct xhci_stream_info *stream_info = ep->stream_info;
+ if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
+ ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
+ }
+}
+
/*
* Find the segment that trb is in. Start searching in start_seg.
* If we must move past a segment that has a link TRB with a toggle cycle state
@@ -334,13 +390,14 @@ static struct xhci_segment *find_trb_seg(
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
- if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
+ if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
+ TRB_TYPE(TRB_LINK) &&
(generic_trb->field[3] & LINK_TOGGLE))
*cycle_state = ~(*cycle_state) & 0x1;
cur_seg = cur_seg->next;
if (cur_seg == start_seg)
/* Looped over the entire list. Oops! */
- return 0;
+ return NULL;
}
return cur_seg;
}
@@ -361,14 +418,23 @@ static struct xhci_segment *find_trb_seg(
*/
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
- struct xhci_td *cur_td, struct xhci_dequeue_state *state)
+ unsigned int stream_id, struct xhci_td *cur_td,
+ struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
- struct xhci_ring *ep_ring = dev->eps[ep_index].ring;
+ struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
+ ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
+ ep_index, stream_id);
+ if (!ep_ring) {
+ xhci_warn(xhci, "WARN can't find new dequeue state "
+ "for invalid stream ID %u.\n",
+ stream_id);
+ return;
+ }
state->new_cycle_state = 0;
xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
@@ -390,7 +456,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
BUG();
trb = &state->new_deq_ptr->generic;
- if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
+ if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
(trb->field[3] & LINK_TOGGLE))
state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
@@ -448,11 +514,13 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, struct xhci_segment *deq_seg,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id,
struct xhci_dequeue_state *deq_state)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
@@ -464,7 +532,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
deq_state->new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
deq_state->new_cycle_state);
- queue_set_tr_deq(xhci, slot_id, ep_index,
+ queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
deq_state->new_deq_seg,
deq_state->new_deq_ptr,
(u32) deq_state->new_cycle_state);
@@ -523,7 +591,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct list_head *entry;
- struct xhci_td *cur_td = 0;
+ struct xhci_td *cur_td = NULL;
struct xhci_td *last_unlinked_td;
struct xhci_dequeue_state deq_state;
@@ -532,11 +600,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
ep = &xhci->devs[slot_id]->eps[ep_index];
- ep_ring = ep->ring;
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
@@ -550,15 +617,36 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
cur_td->first_trb,
(unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+ ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
+ if (!ep_ring) {
+ /* This shouldn't happen unless a driver is mucking
+ * with the stream ID after submission. This will
+ * leave the TD on the hardware ring, and the hardware
+ * will try to execute it, and may access a buffer
+ * that has already been freed. In the best case, the
+ * hardware will execute it, and the event handler will
+ * ignore the completion event for that TD, since it was
+ * removed from the td_list for that endpoint. In
+ * short, don't muck with the stream ID after
+ * submission.
+ */
+ xhci_warn(xhci, "WARN Cancelled URB %p "
+ "has invalid stream ID %u.\n",
+ cur_td->urb,
+ cur_td->urb->stream_id);
+ goto remove_finished_td;
+ }
/*
* If we stopped on the TD we need to cancel, then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
*/
if (cur_td == ep->stopped_td)
- xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
- &deq_state);
+ xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
+ cur_td->urb->stream_id,
+ cur_td, &deq_state);
else
td_to_noop(xhci, ep_ring, cur_td);
+remove_finished_td:
/*
* The event handler won't see a completion for this TD anymore,
* so remove it from the endpoint ring's TD list. Keep it in
@@ -572,12 +660,16 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
xhci_queue_new_dequeue_state(xhci,
- slot_id, ep_index, &deq_state);
+ slot_id, ep_index,
+ ep->stopped_td->urb->stream_id,
+ &deq_state);
xhci_ring_cmd_db(xhci);
} else {
- /* Otherwise just ring the doorbell to restart the ring */
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ /* Otherwise ring the doorbell(s) to restart queued transfers */
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
/*
* Drop the lock and complete the URBs in the cancelled TD list.
@@ -734,6 +826,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
{
unsigned int slot_id;
unsigned int ep_index;
+ unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep_ctx;
@@ -741,8 +834,19 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+ stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
dev = xhci->devs[slot_id];
- ep_ring = dev->eps[ep_index].ring;
+
+ ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
+ if (!ep_ring) {
+ xhci_warn(xhci, "WARN Set TR deq ptr command for "
+ "freed stream ID %u\n",
+ stream_id);
+ /* XXX: Harmless??? */
+ dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
+ return;
+ }
+
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
@@ -787,7 +891,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
}
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ /* Restart any rings with pending URBs */
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void handle_reset_ep_completion(struct xhci_hcd *xhci,
@@ -796,11 +901,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
{
int slot_id;
unsigned int ep_index;
- struct xhci_ring *ep_ring;
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
@@ -818,9 +921,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
false);
xhci_ring_cmd_db(xhci);
} else {
- /* Clear our internal halted state and restart the ring */
+ /* Clear our internal halted state and restart the ring(s) */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
}
@@ -897,16 +1000,19 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
* Configure endpoint commands can come from the USB core
* configuration or alt setting changes, or because the HW
* needed an extra configure endpoint command after a reset
- * endpoint command. In the latter case, the xHCI driver is
- * not waiting on the configure endpoint command.
+ * endpoint command or streams were being configured.
+ * If the command was for a halted endpoint, the xHCI driver
+ * is not waiting on the configure endpoint command.
*/
ctrl_ctx = xhci_get_input_control_ctx(xhci,
virt_dev->in_ctx);
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
/* A usb_set_interface() call directly after clearing a halted
- * condition may race on this quirky hardware.
- * Not worth worrying about, since this is prototype hardware.
+ * condition may race on this quirky hardware. Not worth
+ * worrying about, since this is prototype hardware. Not sure
+ * if this will work for streams, but streams support was
+ * untested on this prototype.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
ep_index != (unsigned int) -1 &&
@@ -919,10 +1025,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Completed config ep cmd - "
"last ep index = %d, state = %d\n",
ep_index, ep_state);
- /* Clear our internal halted state and restart ring */
+ /* Clear internal halted state and restart ring(s) */
xhci->devs[slot_id]->eps[ep_index].ep_state &=
~EP_HALTED;
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
break;
}
bandwidth_change:
@@ -1018,7 +1124,7 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
do {
if (start_dma == 0)
- return 0;
+ return NULL;
/* We may get an event for a Link TRB in the middle of a TD */
end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
@@ -1040,7 +1146,7 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
suspect_dma <= end_trb_dma))
return cur_seg;
}
- return 0;
+ return NULL;
} else {
/* Might still be somewhere in this segment */
if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
@@ -1050,19 +1156,27 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
} while (cur_seg != start_seg);
- return 0;
+ return NULL;
}
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id,
struct xhci_td *td, union xhci_trb *event_trb)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
ep->ep_state |= EP_HALTED;
ep->stopped_td = td;
ep->stopped_trb = event_trb;
+ ep->stopped_stream = stream_id;
+
xhci_queue_reset_ep(xhci, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
+ ep->stopped_stream = 0;
+
xhci_ring_cmd_db(xhci);
}
@@ -1119,11 +1233,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
- struct xhci_td *td = 0;
+ struct xhci_td *td = NULL;
dma_addr_t event_dma;
struct xhci_segment *event_seg;
union xhci_trb *event_trb;
- struct urb *urb = 0;
+ struct urb *urb = NULL;
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
@@ -1140,10 +1254,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep_index = TRB_TO_EP_ID(event->flags) - 1;
xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
ep = &xdev->eps[ep_index];
- ep_ring = ep->ring;
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
- xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
+ xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
+ "or incorrect stream ring\n");
return -ENODEV;
}
@@ -1274,7 +1389,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td->urb->actual_length = 0;
xhci_cleanup_halted_endpoint(xhci,
- slot_id, ep_index, td, event_trb);
+ slot_id, ep_index, 0, td, event_trb);
goto td_cleanup;
}
/*
@@ -1390,8 +1505,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
- TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
+ if ((cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
+ (cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
td->urb->actual_length +=
TRB_LEN(cur_trb->generic.field[2]);
}
@@ -1423,6 +1540,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
ep->stopped_td = td;
ep->stopped_trb = event_trb;
+ ep->stopped_stream = ep_ring->stream_id;
} else if (xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code)) {
/* Other types of errors halt the endpoint, but the
@@ -1431,7 +1549,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* xHCI hardware manually.
*/
xhci_cleanup_halted_endpoint(xhci,
- slot_id, ep_index, td, event_trb);
+ slot_id, ep_index, ep_ring->stream_id, td, event_trb);
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
@@ -1621,20 +1739,66 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
xhci_err(xhci, "ERROR no room on ep ring\n");
return -ENOMEM;
}
+
+ if (enqueue_is_link_trb(ep_ring)) {
+ struct xhci_ring *ring = ep_ring;
+ union xhci_trb *next;
+
+ xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
+ next = ring->enqueue;
+
+ while (last_trb(xhci, ring, ring->enq_seg, next)) {
+
+ /* If we're not dealing with 0.95 hardware,
+ * clear the chain bit.
+ */
+ if (!xhci_link_trb_quirk(xhci))
+ next->link.control &= ~TRB_CHAIN;
+ else
+ next->link.control |= TRB_CHAIN;
+
+ wmb();
+ next->link.control ^= (u32) TRB_CYCLE;
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ if (!in_interrupt()) {
+ xhci_dbg(xhci, "queue_trb: Toggle cycle "
+ "state for ring %p = %i\n",
+ ring, (unsigned int)ring->cycle_state);
+ }
+ }
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+ next = ring->enqueue;
+ }
+ }
+
return 0;
}
static int prepare_transfer(struct xhci_hcd *xhci,
struct xhci_virt_device *xdev,
unsigned int ep_index,
+ unsigned int stream_id,
unsigned int num_trbs,
struct urb *urb,
struct xhci_td **td,
gfp_t mem_flags)
{
int ret;
+ struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- ret = prepare_ring(xhci, xdev->eps[ep_index].ring,
+
+ ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
+ if (!ep_ring) {
+ xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
+ stream_id);
+ return -EINVAL;
+ }
+
+ ret = prepare_ring(xhci, ep_ring,
ep_ctx->ep_info & EP_STATE_MASK,
num_trbs, mem_flags);
if (ret)
@@ -1654,9 +1818,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
(*td)->urb = urb;
urb->hcpriv = (void *) (*td);
/* Add this TD to the tail of the endpoint ring's TD list */
- list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list);
- (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg;
- (*td)->first_trb = xdev->eps[ep_index].ring->enqueue;
+ list_add_tail(&(*td)->td_list, &ep_ring->td_list);
+ (*td)->start_seg = ep_ring->enq_seg;
+ (*td)->first_trb = ep_ring->enqueue;
return 0;
}
@@ -1672,7 +1836,7 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
xhci_dbg(xhci, "count sg list trbs: \n");
num_trbs = 0;
- for_each_sg(urb->sg->sg, sg, num_sgs, i) {
+ for_each_sg(urb->sg, sg, num_sgs, i) {
unsigned int previous_total_trbs = num_trbs;
unsigned int len = sg_dma_len(sg);
@@ -1722,7 +1886,7 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
}
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, int start_cycle,
+ unsigned int ep_index, unsigned int stream_id, int start_cycle,
struct xhci_generic_trb *start_trb, struct xhci_td *td)
{
/*
@@ -1731,7 +1895,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
*/
wmb();
start_trb->field[3] |= start_cycle;
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
/*
@@ -1805,12 +1969,16 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_generic_trb *start_trb;
int start_cycle;
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring)
+ return -EINVAL;
+
num_trbs = count_sg_trbs_needed(xhci, urb);
num_sgs = urb->num_sgs;
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
- ep_index, num_trbs, urb, &td, mem_flags);
+ ep_index, urb->stream_id,
+ num_trbs, urb, &td, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
/*
@@ -1831,7 +1999,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
* the amount of memory allocated for this scatter-gather list.
* 3. TRBs buffers can't cross 64KB boundaries.
*/
- sg = urb->sg->sg;
+ sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
trb_buff_len = TRB_MAX_BUFF_SIZE -
@@ -1919,7 +2087,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
} while (running_total < urb->transfer_buffer_length);
check_trb_math(urb, num_trbs, running_total);
- giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+ start_cycle, start_trb, td);
return 0;
}
@@ -1938,10 +2107,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int running_total, trb_buff_len, ret;
u64 addr;
- if (urb->sg)
+ if (urb->num_sgs)
return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring)
+ return -EINVAL;
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
@@ -1968,7 +2139,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned long long)urb->transfer_dma,
num_trbs);
- ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
num_trbs, urb, &td, mem_flags);
if (ret < 0)
return ret;
@@ -2038,7 +2210,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
} while (running_total < urb->transfer_buffer_length);
check_trb_math(urb, num_trbs, running_total);
- giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+ start_cycle, start_trb, td);
return 0;
}
@@ -2055,7 +2228,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u32 field, length_field;
struct xhci_td *td;
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring)
+ return -EINVAL;
/*
* Need to copy setup packet into setup TRB, so we can't use the setup
@@ -2076,8 +2251,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
*/
if (urb->transfer_buffer_length > 0)
num_trbs++;
- ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
- urb, &td, mem_flags);
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+ num_trbs, urb, &td, mem_flags);
if (ret < 0)
return ret;
@@ -2132,7 +2308,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Event on completion */
field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
- giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ giveback_first_trb(xhci, slot_id, ep_index, 0,
+ start_cycle, start_trb, td);
return 0;
}
@@ -2244,12 +2421,14 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
* This should not be used for endpoints that have streams enabled.
*/
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, struct xhci_segment *deq_seg,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state)
{
dma_addr_t addr;
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
u32 type = TRB_TYPE(TRB_SET_DEQ);
addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
@@ -2260,7 +2439,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
return 0;
}
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
- upper_32_bits(addr), 0,
+ upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7e42772..40e0a0c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -21,6 +21,7 @@
*/
#include <linux/irq.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
@@ -352,11 +353,7 @@ void xhci_event_ring_work(unsigned long arg)
if (!xhci->devs[i])
continue;
for (j = 0; j < 31; ++j) {
- struct xhci_ring *ring = xhci->devs[i]->eps[j].ring;
- if (!ring)
- continue;
- xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
- xhci_debug_segment(xhci, ring->deq_seg);
+ xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
}
}
@@ -726,8 +723,21 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
spin_lock_irqsave(&xhci->lock, flags);
if (xhci->xhc_state & XHCI_STATE_DYING)
goto dying;
- ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
- slot_id, ep_index);
+ if (xhci->devs[slot_id]->eps[ep_index].ep_state &
+ EP_GETTING_STREAMS) {
+ xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
+ "is transitioning to using streams.\n");
+ ret = -EINVAL;
+ } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
+ EP_GETTING_NO_STREAMS) {
+ xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
+ "is transitioning to "
+ "not having streams.\n");
+ ret = -EINVAL;
+ } else {
+ ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
+ slot_id, ep_index);
+ }
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
@@ -825,7 +835,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
- ep_ring = ep->ring;
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring) {
+ ret = -EINVAL;
+ goto done;
+ }
+
xhci_dbg(xhci, "Endpoint ring:\n");
xhci_debug_ring(xhci, ep_ring);
td = (struct xhci_td *) urb->hcpriv;
@@ -1369,7 +1384,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, udev->slot_id,
- ep_index, ep->stopped_td,
+ ep_index, ep->stopped_stream, ep->stopped_td,
&deq_state);
/* HW with the reset endpoint quirk will use the saved dequeue state to
@@ -1378,10 +1393,12 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
xhci_dbg(xhci, "Queueing new dequeue state\n");
xhci_queue_new_dequeue_state(xhci, udev->slot_id,
- ep_index, &deq_state);
+ ep_index, ep->stopped_stream, &deq_state);
} else {
/* Better hope no one uses the input context between now and the
* reset endpoint completion!
+ * XXX: No idea how this hardware will react when stream rings
+ * are enabled.
*/
xhci_dbg(xhci, "Setting up input context for "
"configure endpoint command\n");
@@ -1438,12 +1455,391 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
kfree(virt_ep->stopped_td);
xhci_ring_cmd_db(xhci);
}
+ virt_ep->stopped_td = NULL;
+ virt_ep->stopped_trb = NULL;
+ virt_ep->stopped_stream = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
if (ret)
xhci_warn(xhci, "FIXME allocate a new ring segment\n");
}
+static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
+ struct usb_device *udev, struct usb_host_endpoint *ep,
+ unsigned int slot_id)
+{
+ int ret;
+ unsigned int ep_index;
+ unsigned int ep_state;
+
+ if (!ep)
+ return -EINVAL;
+ ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__);
+ if (ret <= 0)
+ return -EINVAL;
+ if (ep->ss_ep_comp.bmAttributes == 0) {
+ xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
+ " descriptor for ep 0x%x does not support streams\n",
+ ep->desc.bEndpointAddress);
+ return -EINVAL;
+ }
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+ if (ep_state & EP_HAS_STREAMS ||
+ ep_state & EP_GETTING_STREAMS) {
+ xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
+ "already has streams set up.\n",
+ ep->desc.bEndpointAddress);
+ xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
+ "dynamic stream context array reallocation.\n");
+ return -EINVAL;
+ }
+ if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
+ xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
+ "endpoint 0x%x; URBs are pending.\n",
+ ep->desc.bEndpointAddress);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
+ unsigned int *num_streams, unsigned int *num_stream_ctxs)
+{
+ unsigned int max_streams;
+
+ /* The stream context array size must be a power of two */
+ *num_stream_ctxs = roundup_pow_of_two(*num_streams);
+ /*
+ * Find out how many primary stream array entries the host controller
+ * supports. Later we may use secondary stream arrays (similar to 2nd
+ * level page entries), but that's an optional feature for xHCI host
+ * controllers. xHCs must support at least 4 stream IDs.
+ */
+ max_streams = HCC_MAX_PSA(xhci->hcc_params);
+ if (*num_stream_ctxs > max_streams) {
+ xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
+ max_streams);
+ *num_stream_ctxs = max_streams;
+ *num_streams = max_streams;
+ }
+}
+
+/* Returns an error code if one of the endpoint already has streams.
+ * This does not change any data structures, it only checks and gathers
+ * information.
+ */
+static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int *num_streams, u32 *changed_ep_bitmask)
+{
+ unsigned int max_streams;
+ unsigned int endpoint_flag;
+ int i;
+ int ret;
+
+ for (i = 0; i < num_eps; i++) {
+ ret = xhci_check_streams_endpoint(xhci, udev,
+ eps[i], udev->slot_id);
+ if (ret < 0)
+ return ret;
+
+ max_streams = USB_SS_MAX_STREAMS(
+ eps[i]->ss_ep_comp.bmAttributes);
+ if (max_streams < (*num_streams - 1)) {
+ xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
+ eps[i]->desc.bEndpointAddress,
+ max_streams);
+ *num_streams = max_streams+1;
+ }
+
+ endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
+ if (*changed_ep_bitmask & endpoint_flag)
+ return -EINVAL;
+ *changed_ep_bitmask |= endpoint_flag;
+ }
+ return 0;
+}
+
+static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps)
+{
+ u32 changed_ep_bitmask = 0;
+ unsigned int slot_id;
+ unsigned int ep_index;
+ unsigned int ep_state;
+ int i;
+
+ slot_id = udev->slot_id;
+ if (!xhci->devs[slot_id])
+ return 0;
+
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+ /* Are streams already being freed for the endpoint? */
+ if (ep_state & EP_GETTING_NO_STREAMS) {
+ xhci_warn(xhci, "WARN Can't disable streams for "
+ "endpoint 0x%x\n, "
+ "streams are being disabled already.",
+ eps[i]->desc.bEndpointAddress);
+ return 0;
+ }
+ /* Are there actually any streams to free? */
+ if (!(ep_state & EP_HAS_STREAMS) &&
+ !(ep_state & EP_GETTING_STREAMS)) {
+ xhci_warn(xhci, "WARN Can't disable streams for "
+ "endpoint 0x%x\n, "
+ "streams are already disabled!",
+ eps[i]->desc.bEndpointAddress);
+ xhci_warn(xhci, "WARN xhci_free_streams() called "
+ "with non-streams endpoint\n");
+ return 0;
+ }
+ changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
+ }
+ return changed_ep_bitmask;
+}
+
+/*
+ * The USB device drivers use this function (though the HCD interface in USB
+ * core) to prepare a set of bulk endpoints to use streams. Streams are used to
+ * coordinate mass storage command queueing across multiple endpoints (basically
+ * a stream ID == a task ID).
+ *
+ * Setting up streams involves allocating the same size stream context array
+ * for each endpoint and issuing a configure endpoint command for all endpoints.
+ *
+ * Don't allow the call to succeed if one endpoint only supports one stream
+ * (which means it doesn't support streams at all).
+ *
+ * Drivers may get less stream IDs than they asked for, if the host controller
+ * hardware or endpoints claim they can't support the number of requested
+ * stream IDs.
+ */
+int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags)
+{
+ int i, ret;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *vdev;
+ struct xhci_command *config_cmd;
+ unsigned int ep_index;
+ unsigned int num_stream_ctxs;
+ unsigned long flags;
+ u32 changed_ep_bitmask = 0;
+
+ if (!eps)
+ return -EINVAL;
+
+ /* Add one to the number of streams requested to account for
+ * stream 0 that is reserved for xHCI usage.
+ */
+ num_streams += 1;
+ xhci = hcd_to_xhci(hcd);
+ xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
+ num_streams);
+
+ config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
+ if (!config_cmd) {
+ xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Check to make sure all endpoints are not already configured for
+ * streams. While we're at it, find the maximum number of streams that
+ * all the endpoints will support and check for duplicate endpoints.
+ */
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
+ num_eps, &num_streams, &changed_ep_bitmask);
+ if (ret < 0) {
+ xhci_free_command(xhci, config_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+ }
+ if (num_streams <= 1) {
+ xhci_warn(xhci, "WARN: endpoints can't handle "
+ "more than one stream.\n");
+ xhci_free_command(xhci, config_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -EINVAL;
+ }
+ vdev = xhci->devs[udev->slot_id];
+ /* Mark each endpoint as being in transistion, so
+ * xhci_urb_enqueue() will reject all URBs.
+ */
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Setup internal data structures and allocate HW data structures for
+ * streams (but don't install the HW structures in the input context
+ * until we're sure all memory allocation succeeded).
+ */
+ xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
+ xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
+ num_stream_ctxs, num_streams);
+
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
+ num_stream_ctxs,
+ num_streams, mem_flags);
+ if (!vdev->eps[ep_index].stream_info)
+ goto cleanup;
+ /* Set maxPstreams in endpoint context and update deq ptr to
+ * point to stream context array. FIXME
+ */
+ }
+
+ /* Set up the input context for a configure endpoint command. */
+ for (i = 0; i < num_eps; i++) {
+ struct xhci_ep_ctx *ep_ctx;
+
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
+
+ xhci_endpoint_copy(xhci, config_cmd->in_ctx,
+ vdev->out_ctx, ep_index);
+ xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
+ vdev->eps[ep_index].stream_info);
+ }
+ /* Tell the HW to drop its old copy of the endpoint context info
+ * and add the updated copy from the input context.
+ */
+ xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
+ vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
+
+ /* Issue and wait for the configure endpoint command */
+ ret = xhci_configure_endpoint(xhci, udev, config_cmd,
+ false, false);
+
+ /* xHC rejected the configure endpoint command for some reason, so we
+ * leave the old ring intact and free our internal streams data
+ * structure.
+ */
+ if (ret < 0)
+ goto cleanup;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
+ xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
+ udev->slot_id, ep_index);
+ vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
+ }
+ xhci_free_command(xhci, config_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Subtract 1 for stream 0, which drivers can't use */
+ return num_streams - 1;
+
+cleanup:
+ /* If it didn't work, free the streams! */
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
+ vdev->eps[ep_index].stream_info = NULL;
+ /* FIXME Unset maxPstreams in endpoint context and
+ * update deq ptr to point to normal string ring.
+ */
+ vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
+ vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
+ xhci_endpoint_zero(xhci, vdev, eps[i]);
+ }
+ xhci_free_command(xhci, config_cmd);
+ return -ENOMEM;
+}
+
+/* Transition the endpoint from using streams to being a "normal" endpoint
+ * without streams.
+ *
+ * Modify the endpoint context state, submit a configure endpoint command,
+ * and free all endpoint rings for streams if that completes successfully.
+ */
+int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags)
+{
+ int i, ret;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *vdev;
+ struct xhci_command *command;
+ unsigned int ep_index;
+ unsigned long flags;
+ u32 changed_ep_bitmask;
+
+ xhci = hcd_to_xhci(hcd);
+ vdev = xhci->devs[udev->slot_id];
+
+ /* Set up a configure endpoint command to remove the streams rings */
+ spin_lock_irqsave(&xhci->lock, flags);
+ changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
+ udev, eps, num_eps);
+ if (changed_ep_bitmask == 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -EINVAL;
+ }
+
+ /* Use the xhci_command structure from the first endpoint. We may have
+ * allocated too many, but the driver may call xhci_free_streams() for
+ * each endpoint it grouped into one call to xhci_alloc_streams().
+ */
+ ep_index = xhci_get_endpoint_index(&eps[0]->desc);
+ command = vdev->eps[ep_index].stream_info->free_streams_command;
+ for (i = 0; i < num_eps; i++) {
+ struct xhci_ep_ctx *ep_ctx;
+
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+ xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
+ EP_GETTING_NO_STREAMS;
+
+ xhci_endpoint_copy(xhci, command->in_ctx,
+ vdev->out_ctx, ep_index);
+ xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
+ &vdev->eps[ep_index]);
+ }
+ xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
+ vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Issue and wait for the configure endpoint command,
+ * which must succeed.
+ */
+ ret = xhci_configure_endpoint(xhci, udev, command,
+ false, true);
+
+ /* xHC rejected the configure endpoint command for some reason, so we
+ * leave the streams rings intact.
+ */
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
+ vdev->eps[ep_index].stream_info = NULL;
+ /* FIXME Unset maxPstreams in endpoint context and
+ * update deq ptr to point to normal string ring.
+ */
+ vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
+ vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ return 0;
+}
+
/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ea389e9..dada2fb 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -26,8 +26,8 @@
#include <linux/usb.h>
#include <linux/timer.h>
#include <linux/kernel.h>
+#include <linux/usb/hcd.h>
-#include "../core/hcd.h"
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
@@ -117,7 +117,7 @@ struct xhci_cap_regs {
/* true: no secondary Stream ID Support */
#define HCC_NSS(p) ((p) & (1 << 7))
/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
-#define HCC_MAX_PSA (1 << ((((p) >> 12) & 0xf) + 1))
+#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
/* Extended Capabilities pointer from PCI base - section 5.3.6 */
#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
@@ -444,6 +444,7 @@ struct xhci_doorbell_array {
/* Endpoint Target - bits 0:7 */
#define EPI_TO_DB(p) (((p) + 1) & 0xff)
+#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16)
/**
@@ -585,6 +586,10 @@ struct xhci_ep_ctx {
/* Interval - period between requests to an endpoint - 125u increments. */
#define EP_INTERVAL(p) ((p & 0xff) << 16)
#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
+#define EP_MAXPSTREAMS_MASK (0x1f << 10)
+#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
+/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
+#define EP_HAS_LSA (1 << 15)
/* ep_info2 bitmasks */
/*
@@ -648,8 +653,50 @@ struct xhci_command {
/* add context bitmasks */
#define ADD_EP(x) (0x1 << x)
+struct xhci_stream_ctx {
+ /* 64-bit stream ring address, cycle state, and stream type */
+ u64 stream_ring;
+ /* offset 0x14 - 0x1f reserved for HC internal use */
+ u32 reserved[2];
+};
+
+/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
+#define SCT_FOR_CTX(p) (((p) << 1) & 0x7)
+/* Secondary stream array type, dequeue pointer is to a transfer ring */
+#define SCT_SEC_TR 0
+/* Primary stream array type, dequeue pointer is to a transfer ring */
+#define SCT_PRI_TR 1
+/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */
+#define SCT_SSA_8 2
+#define SCT_SSA_16 3
+#define SCT_SSA_32 4
+#define SCT_SSA_64 5
+#define SCT_SSA_128 6
+#define SCT_SSA_256 7
+
+/* Assume no secondary streams for now */
+struct xhci_stream_info {
+ struct xhci_ring **stream_rings;
+ /* Number of streams, including stream 0 (which drivers can't use) */
+ unsigned int num_streams;
+ /* The stream context array may be bigger than
+ * the number of streams the driver asked for
+ */
+ struct xhci_stream_ctx *stream_ctx_array;
+ unsigned int num_stream_ctxs;
+ dma_addr_t ctx_array_dma;
+ /* For mapping physical TRB addresses to segments in stream rings */
+ struct radix_tree_root trb_address_map;
+ struct xhci_command *free_streams_command;
+};
+
+#define SMALL_STREAM_ARRAY_SIZE 256
+#define MEDIUM_STREAM_ARRAY_SIZE 1024
+
struct xhci_virt_ep {
struct xhci_ring *ring;
+ /* Related to endpoints that are configured to use stream IDs only */
+ struct xhci_stream_info *stream_info;
/* Temporary storage in case the configure endpoint command fails and we
* have to restore the device state to the previous state
*/
@@ -658,11 +705,17 @@ struct xhci_virt_ep {
#define SET_DEQ_PENDING (1 << 0)
#define EP_HALTED (1 << 1) /* For stall handling */
#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */
+/* Transitioning the endpoint to using streams, don't enqueue URBs */
+#define EP_GETTING_STREAMS (1 << 3)
+#define EP_HAS_STREAMS (1 << 4)
+/* Transitioning the endpoint to not using streams, don't enqueue URBs */
+#define EP_GETTING_NO_STREAMS (1 << 5)
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
/* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
+ unsigned int stopped_stream;
/* Watchdog timer for stop endpoint command to cancel URBs */
struct timer_list stop_cmd_timer;
int stop_cmds_pending;
@@ -710,14 +763,6 @@ struct xhci_device_context_array {
*/
-struct xhci_stream_ctx {
- /* 64-bit stream ring address, cycle state, and stream type */
- u64 stream_ring;
- /* offset 0x14 - 0x1f reserved for HC internal use */
- u32 reserved[2];
-};
-
-
struct xhci_transfer_event {
/* 64-bit buffer address, or immediate data */
u64 buffer;
@@ -828,6 +873,10 @@ struct xhci_event_cmd {
#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
+/* Set TR Dequeue Pointer command TRB fields */
+#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
+#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
+
/* Port Status Change Event TRB fields */
/* Port ID - bits 31:24 */
@@ -952,6 +1001,10 @@ union xhci_trb {
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
+/* SEGMENT_SHIFT should be log2(SEGMENT_SIZE).
+ * Change this if you change TRBS_PER_SEGMENT!
+ */
+#define SEGMENT_SHIFT 10
/* TRB buffer pointers can't cross 64KB boundaries */
#define TRB_MAX_BUFF_SHIFT 16
#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
@@ -993,6 +1046,7 @@ struct xhci_ring {
* if we own the TRB (if we are the consumer). See section 4.9.1.
*/
u32 cycle_state;
+ unsigned int stream_id;
};
struct xhci_erst_entry {
@@ -1088,6 +1142,8 @@ struct xhci_hcd {
/* DMA pools */
struct dma_pool *device_pool;
struct dma_pool *segment_pool;
+ struct dma_pool *small_streams_pool;
+ struct dma_pool *medium_streams_pool;
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Poll the rings - for debugging */
@@ -1216,6 +1272,9 @@ void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
+void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_virt_ep *ep);
/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1242,6 +1301,29 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
+struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs,
+ unsigned int num_streams, gfp_t flags);
+void xhci_free_stream_info(struct xhci_hcd *xhci,
+ struct xhci_stream_info *stream_info);
+void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_stream_info *stream_info);
+void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_virt_ep *ep);
+struct xhci_ring *xhci_dma_to_transfer_ring(
+ struct xhci_virt_ep *ep,
+ u64 address);
+struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb);
+struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id);
+struct xhci_ring *xhci_stream_id_to_ring(
+ struct xhci_virt_device *dev,
+ unsigned int ep_index,
+ unsigned int stream_id);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_in_ctx, bool allocate_completion,
gfp_t mem_flags);
@@ -1266,6 +1348,12 @@ int xhci_get_frame(struct usb_hcd *hcd);
irqreturn_t xhci_irq(struct usb_hcd *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags);
+int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags);
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
@@ -1308,9 +1396,11 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
- struct xhci_td *cur_td, struct xhci_dequeue_state *state);
+ unsigned int stream_id, struct xhci_td *cur_td,
+ struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id,
struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct usb_device *udev, unsigned int ep_index);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 094f91c..1fa6ce3 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -259,7 +259,7 @@ static int appledisplay_probe(struct usb_interface *iface,
}
/* Allocate buffer for interrupt data */
- pdata->urbdata = usb_buffer_alloc(pdata->udev, ACD_URB_BUFFER_LEN,
+ pdata->urbdata = usb_alloc_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
GFP_KERNEL, &pdata->urb->transfer_dma);
if (!pdata->urbdata) {
retval = -ENOMEM;
@@ -316,7 +316,7 @@ error:
if (pdata->urb) {
usb_kill_urb(pdata->urb);
if (pdata->urbdata)
- usb_buffer_free(pdata->udev, ACD_URB_BUFFER_LEN,
+ usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
usb_free_urb(pdata->urb);
}
@@ -337,7 +337,7 @@ static void appledisplay_disconnect(struct usb_interface *iface)
usb_kill_urb(pdata->urb);
cancel_delayed_work(&pdata->work);
backlight_device_unregister(pdata->bd);
- usb_buffer_free(pdata->udev, ACD_URB_BUFFER_LEN,
+ usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
usb_free_urb(pdata->urb);
kfree(pdata->msgdata);
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 1edb6d3..82e1663 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -73,7 +73,7 @@ static struct list_head ftdi_static_list;
*/
#include "usb_u132.h"
#include <asm/io.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
/* FIXME ohci.h is ONLY for internal use by the OHCI driver.
* If you're going to try stuff like this, you need to split
@@ -734,7 +734,7 @@ static void ftdi_elan_write_bulk_callback(struct urb *urb)
dev_err(&ftdi->udev->dev, "urb=%p write bulk status received: %"
"d\n", urb, status);
}
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma);
}
@@ -795,7 +795,7 @@ static int ftdi_elan_command_engine(struct usb_ftdi *ftdi)
total_size);
return -ENOMEM;
}
- buf = usb_buffer_alloc(ftdi->udev, total_size, GFP_KERNEL,
+ buf = usb_alloc_coherent(ftdi->udev, total_size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
dev_err(&ftdi->udev->dev, "could not get a buffer to write %d c"
@@ -829,7 +829,7 @@ static int ftdi_elan_command_engine(struct usb_ftdi *ftdi)
dev_err(&ftdi->udev->dev, "failed %d to submit urb %p to write "
"%d commands totaling %d bytes to the Uxxx\n", retval,
urb, command_size, total_size);
- usb_buffer_free(ftdi->udev, total_size, buf, urb->transfer_dma);
+ usb_free_coherent(ftdi->udev, total_size, buf, urb->transfer_dma);
usb_free_urb(urb);
return retval;
}
@@ -1167,7 +1167,7 @@ static ssize_t ftdi_elan_write(struct file *file,
retval = -ENOMEM;
goto error_1;
}
- buf = usb_buffer_alloc(ftdi->udev, count, GFP_KERNEL,
+ buf = usb_alloc_coherent(ftdi->udev, count, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
@@ -1192,7 +1192,7 @@ static ssize_t ftdi_elan_write(struct file *file,
exit:
return count;
error_3:
- usb_buffer_free(ftdi->udev, count, buf, urb->transfer_dma);
+ usb_free_coherent(ftdi->udev, count, buf, urb->transfer_dma);
error_2:
usb_free_urb(urb);
error_1:
@@ -1968,7 +1968,7 @@ static int ftdi_elan_synchronize_flush(struct usb_ftdi *ftdi)
"ence\n");
return -ENOMEM;
}
- buf = usb_buffer_alloc(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma);
+ buf = usb_alloc_coherent(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma);
if (!buf) {
dev_err(&ftdi->udev->dev, "could not get a buffer for flush seq"
"uence\n");
@@ -1985,7 +1985,7 @@ static int ftdi_elan_synchronize_flush(struct usb_ftdi *ftdi)
if (retval) {
dev_err(&ftdi->udev->dev, "failed to submit urb containing the "
"flush sequence\n");
- usb_buffer_free(ftdi->udev, i, buf, urb->transfer_dma);
+ usb_free_coherent(ftdi->udev, i, buf, urb->transfer_dma);
usb_free_urb(urb);
return -ENOMEM;
}
@@ -2011,7 +2011,7 @@ static int ftdi_elan_synchronize_reset(struct usb_ftdi *ftdi)
"quence\n");
return -ENOMEM;
}
- buf = usb_buffer_alloc(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma);
+ buf = usb_alloc_coherent(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma);
if (!buf) {
dev_err(&ftdi->udev->dev, "could not get a buffer for the reset"
" sequence\n");
@@ -2030,7 +2030,7 @@ static int ftdi_elan_synchronize_reset(struct usb_ftdi *ftdi)
if (retval) {
dev_err(&ftdi->udev->dev, "failed to submit urb containing the "
"reset sequence\n");
- usb_buffer_free(ftdi->udev, i, buf, urb->transfer_dma);
+ usb_free_coherent(ftdi->udev, i, buf, urb->transfer_dma);
usb_free_urb(urb);
return -ENOMEM;
}
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index d3c8523..7dc9d3c 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -239,8 +239,8 @@ static void iowarrior_write_callback(struct urb *urb)
__func__, status);
}
/* free up our allocated buffer */
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
/* tell a waiting writer the interrupt-out-pipe is available again */
atomic_dec(&dev->write_busy);
wake_up_interruptible(&dev->write_wait);
@@ -421,8 +421,8 @@ static ssize_t iowarrior_write(struct file *file,
dbg("%s Unable to allocate urb ", __func__);
goto error_no_urb;
}
- buf = usb_buffer_alloc(dev->udev, dev->report_size,
- GFP_KERNEL, &int_out_urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, dev->report_size,
+ GFP_KERNEL, &int_out_urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
dbg("%s Unable to allocate buffer ", __func__);
@@ -459,8 +459,8 @@ static ssize_t iowarrior_write(struct file *file,
break;
}
error:
- usb_buffer_free(dev->udev, dev->report_size, buf,
- int_out_urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->report_size, buf,
+ int_out_urb->transfer_dma);
error_no_buffer:
usb_free_urb(int_out_urb);
error_no_urb:
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index aae95a0..30d9303 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -47,7 +47,6 @@
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/usb.h>
-#include <linux/smp_lock.h>
#include <linux/vmalloc.h>
#include "sisusb.h"
@@ -2416,14 +2415,11 @@ sisusb_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int subminor = iminor(inode);
- lock_kernel();
if (!(interface = usb_find_interface(&sisusb_driver, subminor))) {
- unlock_kernel();
return -ENODEV;
}
if (!(sisusb = usb_get_intfdata(interface))) {
- unlock_kernel();
return -ENODEV;
}
@@ -2431,13 +2427,11 @@ sisusb_open(struct inode *inode, struct file *file)
if (!sisusb->present || !sisusb->ready) {
mutex_unlock(&sisusb->lock);
- unlock_kernel();
return -ENODEV;
}
if (sisusb->isopen) {
mutex_unlock(&sisusb->lock);
- unlock_kernel();
return -EBUSY;
}
@@ -2446,13 +2440,11 @@ sisusb_open(struct inode *inode, struct file *file)
if (sisusb_init_gfxdevice(sisusb, 0)) {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
- unlock_kernel();
return -EIO;
}
} else {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n");
- unlock_kernel();
return -EIO;
}
}
@@ -2465,7 +2457,6 @@ sisusb_open(struct inode *inode, struct file *file)
file->private_data = sisusb;
mutex_unlock(&sisusb->lock);
- unlock_kernel();
return 0;
}
@@ -2974,13 +2965,12 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct sisusb_usb_data *sisusb;
struct sisusb_info x;
struct sisusb_command y;
- int retval = 0;
+ long retval = 0;
u32 __user *argp = (u32 __user *)arg;
if (!(sisusb = (struct sisusb_usb_data *)file->private_data))
return -ENODEV;
- lock_kernel();
mutex_lock(&sisusb->lock);
/* Sanity check */
@@ -3039,7 +3029,6 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err_out:
mutex_unlock(&sisusb->lock);
- unlock_kernel();
return retval;
}
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index b271b05..411e605 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -1187,9 +1187,9 @@ sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
* And so is the hi_font_mask.
*/
for (i = 0; i < MAX_NR_CONSOLES; i++) {
- struct vc_data *c = vc_cons[i].d;
- if (c && c->vc_sw == &sisusb_con)
- c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
+ struct vc_data *d = vc_cons[i].d;
+ if (d && d->vc_sw == &sisusb_con)
+ d->vc_hi_font_mask = ch512 ? 0x0800 : 0;
}
sisusb->current_font_512 = ch512;
@@ -1249,7 +1249,7 @@ sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
mutex_unlock(&sisusb->lock);
if (dorecalc && c) {
- int i, rows = c->vc_scan_lines / fh;
+ int rows = c->vc_scan_lines / fh;
/* Now adjust our consoles' size */
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 90aede9..7828c76 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -205,8 +205,8 @@ static void lcd_write_bulk_callback(struct urb *urb)
}
/* free up our allocated buffer */
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
up(&dev->limit_sem);
}
@@ -234,7 +234,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, siz
goto err_no_buf;
}
- buf = usb_buffer_alloc(dev->udev, count, GFP_KERNEL, &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL, &urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
goto error;
@@ -268,7 +268,7 @@ exit:
error_unanchor:
usb_unanchor_urb(urb);
error:
- usb_buffer_free(dev->udev, count, buf, urb->transfer_dma);
+ usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
usb_free_urb(urb);
err_no_buf:
up(&dev->limit_sem);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index a21cce6..16dffe9 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -202,7 +202,7 @@ static struct urb *simple_alloc_urb (
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
if (usb_pipein (pipe))
urb->transfer_flags |= URB_SHORT_NOT_OK;
- urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
+ urb->transfer_buffer = usb_alloc_coherent (udev, bytes, GFP_KERNEL,
&urb->transfer_dma);
if (!urb->transfer_buffer) {
usb_free_urb (urb);
@@ -272,8 +272,8 @@ static inline int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
static void simple_free_urb (struct urb *urb)
{
- usb_buffer_free (urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
usb_free_urb (urb);
}
@@ -977,15 +977,13 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
if (!u)
goto cleanup;
- reqp = usb_buffer_alloc (udev, sizeof *reqp, GFP_KERNEL,
- &u->setup_dma);
+ reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
if (!reqp)
goto cleanup;
reqp->setup = req;
reqp->number = i % NUM_SUBCASES;
reqp->expected = expected;
u->setup_packet = (char *) &reqp->setup;
- u->transfer_flags |= URB_NO_SETUP_DMA_MAP;
u->context = &context;
u->complete = ctrl_complete;
@@ -1017,10 +1015,7 @@ cleanup:
if (!urb [i])
continue;
urb [i]->dev = udev;
- if (urb [i]->setup_packet)
- usb_buffer_free (udev, sizeof (struct usb_ctrlrequest),
- urb [i]->setup_packet,
- urb [i]->setup_dma);
+ kfree(urb[i]->setup_packet);
simple_free_urb (urb [i]);
}
kfree (urb);
@@ -1421,7 +1416,7 @@ static struct urb *iso_alloc_urb (
urb->number_of_packets = packets;
urb->transfer_buffer_length = bytes;
- urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
+ urb->transfer_buffer = usb_alloc_coherent (udev, bytes, GFP_KERNEL,
&urb->transfer_dma);
if (!urb->transfer_buffer) {
usb_free_urb (urb);
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index ddf7f9a..e7fa364 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -416,13 +416,13 @@ static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
} else {
/* If IOMMU coalescing occurred, we cannot trust sg_page */
- if (urb->sg->nents != urb->num_sgs) {
+ if (urb->transfer_flags & URB_DMA_SG_COMBINED) {
*flag = 'D';
return length;
}
/* Copy up to the first non-addressable segment */
- for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
if (length == 0 || PageHighMem(sg_page(sg)))
break;
this_len = min_t(unsigned int, sg->length, length);
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index e4af18b..812dc28 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -9,12 +9,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include "usb_mon.h"
-#include "../core/hcd.h"
+
static void mon_stop(struct mon_bus *mbus);
static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus);
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 4d0be13..a545d65 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -159,11 +159,9 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
if (src == NULL)
return 'Z'; /* '0' would be not as pretty. */
} else {
- struct scatterlist *sg = urb->sg->sg;
+ struct scatterlist *sg = urb->sg;
- /* If IOMMU coalescing occurred, we cannot trust sg_page */
- if (urb->sg->nents != urb->num_sgs ||
- PageHighMem(sg_page(sg)))
+ if (PageHighMem(sg_page(sg)))
return 'D';
/* For the text interface we copy only the first sg buffer */
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 07fe490..cfd38ed 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -38,6 +38,7 @@ config USB_MUSB_SOC
default y if ARCH_DAVINCI
default y if ARCH_OMAP2430
default y if ARCH_OMAP3
+ default y if ARCH_OMAP4
default y if (BF54x && !BF544)
default y if (BF52x && !BF522 && !BF523)
@@ -50,6 +51,9 @@ comment "OMAP 243x high speed USB support"
comment "OMAP 343x high speed USB support"
depends on USB_MUSB_HDRC && ARCH_OMAP3
+comment "OMAP 44xx high speed USB support"
+ depends on USB_MUSB_HDRC && ARCH_OMAP4
+
comment "Blackfin high speed USB Support"
depends on USB_MUSB_HDRC && ((BF54x && !BF544) || (BF52x && !BF522 && !BF523))
@@ -153,7 +157,7 @@ config MUSB_PIO_ONLY
config USB_INVENTRA_DMA
bool
depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default ARCH_OMAP2430 || ARCH_OMAP3 || BLACKFIN
+ default ARCH_OMAP2430 || ARCH_OMAP3 || BLACKFIN || ARCH_OMAP4
help
Enable DMA transfers using Mentor's engine.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 3a485da..9705f71 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -22,6 +22,10 @@ ifeq ($(CONFIG_ARCH_OMAP3430),y)
musb_hdrc-objs += omap2430.o
endif
+ifeq ($(CONFIG_ARCH_OMAP4),y)
+ musb_hdrc-objs += omap2430.o
+endif
+
ifeq ($(CONFIG_BF54x),y)
musb_hdrc-objs += blackfin.o
endif
@@ -38,6 +42,10 @@ ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
musb_hdrc-objs += musb_virthub.o musb_host.o
endif
+ifeq ($(CONFIG_DEBUG_FS),y)
+ musb_hdrc-objs += musb_debugfs.o
+endif
+
# the kconfig must guarantee that only one of the
# possible I/O schemes will be enabled at a time ...
# PIO only, or DMA (several potential schemes).
@@ -64,12 +72,6 @@ endif
################################################################################
-# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
-
-ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
- EXTRA_CFLAGS += -DMUSB_AHB_ID
-endif
-
# Debugging
ifeq ($(CONFIG_USB_MUSB_DEBUG),y)
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ec8d324..b611420 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -170,6 +170,13 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
retval = musb_interrupt(musb);
}
+ /* Start sampling ID pin, when plug is removed from MUSB */
+ if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE
+ || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ musb->a_wait_bcon = TIMER_DELAY;
+ }
+
spin_unlock_irqrestore(&musb->lock, flags);
return retval;
@@ -180,6 +187,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
struct musb *musb = (void *)_musb;
unsigned long flags;
u16 val;
+ static u8 toggle;
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->state) {
@@ -187,10 +195,44 @@ static void musb_conn_timer_handler(unsigned long _musb)
case OTG_STATE_A_WAIT_BCON:
/* Start a new session */
val = musb_readw(musb->mregs, MUSB_DEVCTL);
+ val &= ~MUSB_DEVCTL_SESSION;
+ musb_writew(musb->mregs, MUSB_DEVCTL, val);
val |= MUSB_DEVCTL_SESSION;
musb_writew(musb->mregs, MUSB_DEVCTL, val);
+ /* Check if musb is host or peripheral. */
+ val = musb_readw(musb->mregs, MUSB_DEVCTL);
+
+ if (!(val & MUSB_DEVCTL_BDEVICE)) {
+ gpio_set_value(musb->config->gpio_vrsel, 1);
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ } else {
+ gpio_set_value(musb->config->gpio_vrsel, 0);
+ /* Ignore VBUSERROR and SUSPEND IRQ */
+ val = musb_readb(musb->mregs, MUSB_INTRUSBE);
+ val &= ~MUSB_INTR_VBUSERROR;
+ musb_writeb(musb->mregs, MUSB_INTRUSBE, val);
+ val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
+ musb_writeb(musb->mregs, MUSB_INTRUSB, val);
+ if (is_otg_enabled(musb))
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ else
+ musb_writeb(musb->mregs, MUSB_POWER, MUSB_POWER_HSENAB);
+ }
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ break;
+ case OTG_STATE_B_IDLE:
+
+ if (!is_peripheral_enabled(musb))
+ break;
+ /* Start a new session. It seems that MUSB needs taking
+ * some time to recognize the type of the plug inserted?
+ */
+ val = musb_readw(musb->mregs, MUSB_DEVCTL);
+ val |= MUSB_DEVCTL_SESSION;
+ musb_writew(musb->mregs, MUSB_DEVCTL, val);
val = musb_readw(musb->mregs, MUSB_DEVCTL);
+
if (!(val & MUSB_DEVCTL_BDEVICE)) {
gpio_set_value(musb->config->gpio_vrsel, 1);
musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
@@ -205,12 +247,27 @@ static void musb_conn_timer_handler(unsigned long _musb)
val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
musb_writeb(musb->mregs, MUSB_INTRUSB, val);
- val = MUSB_POWER_HSENAB;
- musb_writeb(musb->mregs, MUSB_POWER, val);
+ /* Toggle the Soft Conn bit, so that we can response to
+ * the inserting of either A-plug or B-plug.
+ */
+ if (toggle) {
+ val = musb_readb(musb->mregs, MUSB_POWER);
+ val &= ~MUSB_POWER_SOFTCONN;
+ musb_writeb(musb->mregs, MUSB_POWER, val);
+ toggle = 0;
+ } else {
+ val = musb_readb(musb->mregs, MUSB_POWER);
+ val |= MUSB_POWER_SOFTCONN;
+ musb_writeb(musb->mregs, MUSB_POWER, val);
+ toggle = 1;
+ }
+ /* The delay time is set to 1/4 second by default,
+ * shortening it, if accelerating A-plug detection
+ * is needed in OTG mode.
+ */
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY / 4);
}
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
break;
-
default:
DBG(1, "%s state not handled\n", otg_state_string(musb));
break;
@@ -222,7 +279,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
void musb_platform_enable(struct musb *musb)
{
- if (is_host_enabled(musb)) {
+ if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
musb->a_wait_bcon = TIMER_DELAY;
}
@@ -232,16 +289,12 @@ void musb_platform_disable(struct musb *musb)
{
}
-static void bfin_vbus_power(struct musb *musb, int is_on, int sleeping)
-{
-}
-
static void bfin_set_vbus(struct musb *musb, int is_on)
{
- if (is_on)
- gpio_set_value(musb->config->gpio_vrsel, 1);
- else
- gpio_set_value(musb->config->gpio_vrsel, 0);
+ int value = musb->config->gpio_vrsel_active;
+ if (!is_on)
+ value = !value;
+ gpio_set_value(musb->config->gpio_vrsel, value);
DBG(1, "VBUS %s, devctl %02x "
/* otg %3x conf %08x prcm %08x */ "\n",
@@ -256,7 +309,7 @@ static int bfin_set_power(struct otg_transceiver *x, unsigned mA)
void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
{
- if (is_host_enabled(musb))
+ if (!is_otg_enabled(musb) && is_host_enabled(musb))
mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
}
@@ -270,7 +323,7 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
return -EIO;
}
-int __init musb_platform_init(struct musb *musb)
+int __init musb_platform_init(struct musb *musb, void *board_data)
{
/*
@@ -339,23 +392,10 @@ int __init musb_platform_init(struct musb *musb)
return 0;
}
-int musb_platform_suspend(struct musb *musb)
-{
- return 0;
-}
-
-int musb_platform_resume(struct musb *musb)
-{
- return 0;
-}
-
-
int musb_platform_exit(struct musb *musb)
{
- bfin_vbus_power(musb, 0 /*off*/, 1);
gpio_free(musb->config->gpio_vrsel);
- musb_platform_suspend(musb);
return 0;
}
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index ce2e16f..57624361 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -376,7 +376,7 @@ int musb_platform_set_mode(struct musb *musb, u8 mode)
return -EIO;
}
-int __init musb_platform_init(struct musb *musb)
+int __init musb_platform_init(struct musb *musb, void *board_data)
{
void __iomem *tibase = musb->ctrl_base;
u32 revision;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 705cc4a..fad70bc 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -149,6 +149,87 @@ static inline struct musb *dev_to_musb(struct device *dev)
/*-------------------------------------------------------------------------*/
+#ifndef CONFIG_BLACKFIN
+static int musb_ulpi_read(struct otg_transceiver *otg, u32 offset)
+{
+ void __iomem *addr = otg->io_priv;
+ int i = 0;
+ u8 r;
+ u8 power;
+
+ /* Make sure the transceiver is not in low power mode */
+ power = musb_readb(addr, MUSB_POWER);
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(addr, MUSB_POWER, power);
+
+ /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
+ * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
+ */
+
+ musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
+ MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
+
+ while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
+ & MUSB_ULPI_REG_CMPLT)) {
+ i++;
+ if (i == 10000) {
+ DBG(3, "ULPI read timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ }
+ r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
+ r &= ~MUSB_ULPI_REG_CMPLT;
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
+
+ return musb_readb(addr, MUSB_ULPI_REG_DATA);
+}
+
+static int musb_ulpi_write(struct otg_transceiver *otg,
+ u32 offset, u32 data)
+{
+ void __iomem *addr = otg->io_priv;
+ int i = 0;
+ u8 r = 0;
+ u8 power;
+
+ /* Make sure the transceiver is not in low power mode */
+ power = musb_readb(addr, MUSB_POWER);
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(addr, MUSB_POWER, power);
+
+ musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
+ musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data);
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
+
+ while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
+ & MUSB_ULPI_REG_CMPLT)) {
+ i++;
+ if (i == 10000) {
+ DBG(3, "ULPI write timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
+ r &= ~MUSB_ULPI_REG_CMPLT;
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
+
+ return 0;
+}
+#else
+#define musb_ulpi_read(a, b) NULL
+#define musb_ulpi_write(a, b, c) NULL
+#endif
+
+static struct otg_io_access_ops musb_ulpi_access = {
+ .read = musb_ulpi_read,
+ .write = musb_ulpi_write,
+};
+
+/*-------------------------------------------------------------------------*/
+
#if !defined(CONFIG_USB_TUSB6010) && !defined(CONFIG_BLACKFIN)
/*
@@ -353,8 +434,7 @@ void musb_hnp_stop(struct musb *musb)
* which cause occasional OPT A "Did not receive reset after connect"
* errors.
*/
- musb->port1_status &=
- ~(1 << USB_PORT_FEAT_C_CONNECTION);
+ musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
}
#endif
@@ -530,8 +610,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb_writeb(mbase, MUSB_DEVCTL, devctl);
} else {
musb->port1_status |=
- (1 << USB_PORT_FEAT_OVER_CURRENT)
- | (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+ USB_PORT_STAT_OVERCURRENT
+ | (USB_PORT_STAT_C_OVERCURRENT << 16);
}
break;
default:
@@ -986,7 +1066,8 @@ static void musb_shutdown(struct platform_device *pdev)
* more than selecting one of a bunch of predefined configurations.
*/
#if defined(CONFIG_USB_TUSB6010) || \
- defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
+ || defined(CONFIG_ARCH_OMAP4)
static ushort __initdata fifo_mode = 4;
#else
static ushort __initdata fifo_mode = 2;
@@ -996,24 +1077,13 @@ static ushort __initdata fifo_mode = 2;
module_param(fifo_mode, ushort, 0);
MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
-
-enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
-enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
-
-struct fifo_cfg {
- u8 hw_ep_num;
- enum fifo_style style;
- enum buf_mode mode;
- u16 maxpacket;
-};
-
/*
* tables defining fifo_mode values. define more if you like.
* for host side, make sure both halves of ep1 are set up.
*/
/* mode 0 - fits in 2KB */
-static struct fifo_cfg __initdata mode_0_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_0_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
@@ -1022,7 +1092,7 @@ static struct fifo_cfg __initdata mode_0_cfg[] = {
};
/* mode 1 - fits in 4KB */
-static struct fifo_cfg __initdata mode_1_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_1_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
@@ -1031,7 +1101,7 @@ static struct fifo_cfg __initdata mode_1_cfg[] = {
};
/* mode 2 - fits in 4KB */
-static struct fifo_cfg __initdata mode_2_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_2_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1041,7 +1111,7 @@ static struct fifo_cfg __initdata mode_2_cfg[] = {
};
/* mode 3 - fits in 4KB */
-static struct fifo_cfg __initdata mode_3_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_3_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1051,7 +1121,7 @@ static struct fifo_cfg __initdata mode_3_cfg[] = {
};
/* mode 4 - fits in 16KB */
-static struct fifo_cfg __initdata mode_4_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_4_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1082,7 +1152,7 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
};
/* mode 5 - fits in 8KB */
-static struct fifo_cfg __initdata mode_5_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_5_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1120,7 +1190,7 @@ static struct fifo_cfg __initdata mode_5_cfg[] = {
*/
static int __init
fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
- const struct fifo_cfg *cfg, u16 offset)
+ const struct musb_fifo_cfg *cfg, u16 offset)
{
void __iomem *mbase = musb->mregs;
int size = 0;
@@ -1191,17 +1261,23 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
}
-static struct fifo_cfg __initdata ep0_cfg = {
+static struct musb_fifo_cfg __initdata ep0_cfg = {
.style = FIFO_RXTX, .maxpacket = 64,
};
static int __init ep_config_from_table(struct musb *musb)
{
- const struct fifo_cfg *cfg;
+ const struct musb_fifo_cfg *cfg;
unsigned i, n;
int offset;
struct musb_hw_ep *hw_ep = musb->endpoints;
+ if (musb->config->fifo_cfg) {
+ cfg = musb->config->fifo_cfg;
+ n = musb->config->fifo_cfg_size;
+ goto done;
+ }
+
switch (fifo_mode) {
default:
fifo_mode = 0;
@@ -1236,6 +1312,7 @@ static int __init ep_config_from_table(struct musb *musb)
musb_driver_name, fifo_mode);
+done:
offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
/* assert(offset > 0) */
@@ -1461,7 +1538,8 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \
+ defined(CONFIG_ARCH_OMAP4)
static irqreturn_t generic_interrupt(int irq, void *__hci)
{
@@ -1948,7 +2026,7 @@ bad_config:
* isp1504, non-OTG, etc) mostly hooking up through ULPI.
*/
musb->isr = generic_interrupt;
- status = musb_platform_init(musb);
+ status = musb_platform_init(musb, plat->board_data);
if (status < 0)
goto fail2;
@@ -1957,6 +2035,11 @@ bad_config:
goto fail3;
}
+ if (!musb->xceiv->io_ops) {
+ musb->xceiv->io_priv = musb->mregs;
+ musb->xceiv->io_ops = &musb_ulpi_access;
+ }
+
#ifndef CONFIG_MUSB_PIO_ONLY
if (use_dma && dev->dma_mask) {
struct dma_controller *c;
@@ -2057,10 +2140,14 @@ bad_config:
if (status < 0)
goto fail3;
+ status = musb_init_debugfs(musb);
+ if (status < 0)
+ goto fail4;
+
#ifdef CONFIG_SYSFS
status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
if (status)
- goto fail4;
+ goto fail5;
#endif
dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
@@ -2077,6 +2164,9 @@ bad_config:
return 0;
+fail5:
+ musb_exit_debugfs(musb);
+
fail4:
if (!is_otg_enabled(musb) && is_host_enabled(musb))
usb_remove_hcd(musb_to_hcd(musb));
@@ -2153,6 +2243,7 @@ static int __exit musb_remove(struct platform_device *pdev)
* - Peripheral mode: peripheral is deactivated (or never-activated)
* - OTG mode: both roles are deactivated (or never-activated)
*/
+ musb_exit_debugfs(musb);
musb_shutdown(pdev);
#ifdef CONFIG_USB_MUSB_HDRC_HCD
if (musb->board_mode == MUSB_HOST)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index ac17b00..b22d02d 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -69,7 +69,7 @@ struct musb_ep;
#include "musb_regs.h"
#include "musb_gadget.h"
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "musb_host.h"
@@ -213,7 +213,8 @@ enum musb_g_ep0_state {
*/
#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
- || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN)
+ || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN) \
+ || defined(CONFIG_ARCH_OMAP4)
/* REVISIT indexed access seemed to
* misbehave (on DaVinci) for at least peripheral IN ...
*/
@@ -596,7 +597,8 @@ extern void musb_hnp_stop(struct musb *musb);
extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode);
#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \
- defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
+ defined(CONFIG_ARCH_OMAP4)
extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
#else
#define musb_platform_try_idle(x, y) do {} while (0)
@@ -608,7 +610,7 @@ extern int musb_platform_get_vbus_status(struct musb *musb);
#define musb_platform_get_vbus_status(x) 0
#endif
-extern int __init musb_platform_init(struct musb *musb);
+extern int __init musb_platform_init(struct musb *musb, void *board_data);
extern int musb_platform_exit(struct musb *musb);
#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 9fc1db4..d73afdb 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -59,4 +59,17 @@ static inline int _dbg_level(unsigned l)
extern const char *otg_state_string(struct musb *);
+#ifdef CONFIG_DEBUG_FS
+extern int musb_init_debugfs(struct musb *musb);
+extern void musb_exit_debugfs(struct musb *musb);
+#else
+static inline int musb_init_debugfs(struct musb *musb)
+{
+ return 0;
+}
+static inline void musb_exit_debugfs(struct musb *musb)
+{
+}
+#endif
+
#endif /* __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
new file mode 100644
index 0000000..bba76af
--- /dev/null
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -0,0 +1,294 @@
+/*
+ * MUSB OTG driver debugfs support
+ *
+ * Copyright 2010 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#ifdef CONFIG_ARM
+#include <mach/hardware.h>
+#include <mach/memory.h>
+#include <asm/mach-types.h>
+#endif
+
+#include <asm/uaccess.h>
+
+#include "musb_core.h"
+#include "musb_debug.h"
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include "davinci.h"
+#endif
+
+struct musb_register_map {
+ char *name;
+ unsigned offset;
+ unsigned size;
+};
+
+static const struct musb_register_map musb_regmap[] = {
+ { "FAddr", 0x00, 8 },
+ { "Power", 0x01, 8 },
+ { "Frame", 0x0c, 16 },
+ { "Index", 0x0e, 8 },
+ { "Testmode", 0x0f, 8 },
+ { "TxMaxPp", 0x10, 16 },
+ { "TxCSRp", 0x12, 16 },
+ { "RxMaxPp", 0x14, 16 },
+ { "RxCSR", 0x16, 16 },
+ { "RxCount", 0x18, 16 },
+ { "ConfigData", 0x1f, 8 },
+ { "DevCtl", 0x60, 8 },
+ { "MISC", 0x61, 8 },
+ { "TxFIFOsz", 0x62, 8 },
+ { "RxFIFOsz", 0x63, 8 },
+ { "TxFIFOadd", 0x64, 16 },
+ { "RxFIFOadd", 0x66, 16 },
+ { "VControl", 0x68, 32 },
+ { "HWVers", 0x6C, 16 },
+ { "EPInfo", 0x78, 8 },
+ { "RAMInfo", 0x79, 8 },
+ { "LinkInfo", 0x7A, 8 },
+ { "VPLen", 0x7B, 8 },
+ { "HS_EOF1", 0x7C, 8 },
+ { "FS_EOF1", 0x7D, 8 },
+ { "LS_EOF1", 0x7E, 8 },
+ { "SOFT_RST", 0x7F, 8 },
+ { "DMA_CNTLch0", 0x204, 16 },
+ { "DMA_ADDRch0", 0x208, 16 },
+ { "DMA_COUNTch0", 0x20C, 16 },
+ { "DMA_CNTLch1", 0x214, 16 },
+ { "DMA_ADDRch1", 0x218, 16 },
+ { "DMA_COUNTch1", 0x21C, 16 },
+ { "DMA_CNTLch2", 0x224, 16 },
+ { "DMA_ADDRch2", 0x228, 16 },
+ { "DMA_COUNTch2", 0x22C, 16 },
+ { "DMA_CNTLch3", 0x234, 16 },
+ { "DMA_ADDRch3", 0x238, 16 },
+ { "DMA_COUNTch3", 0x23C, 16 },
+ { "DMA_CNTLch4", 0x244, 16 },
+ { "DMA_ADDRch4", 0x248, 16 },
+ { "DMA_COUNTch4", 0x24C, 16 },
+ { "DMA_CNTLch5", 0x254, 16 },
+ { "DMA_ADDRch5", 0x258, 16 },
+ { "DMA_COUNTch5", 0x25C, 16 },
+ { "DMA_CNTLch6", 0x264, 16 },
+ { "DMA_ADDRch6", 0x268, 16 },
+ { "DMA_COUNTch6", 0x26C, 16 },
+ { "DMA_CNTLch7", 0x274, 16 },
+ { "DMA_ADDRch7", 0x278, 16 },
+ { "DMA_COUNTch7", 0x27C, 16 },
+ { } /* Terminating Entry */
+};
+
+static struct dentry *musb_debugfs_root;
+
+static int musb_regdump_show(struct seq_file *s, void *unused)
+{
+ struct musb *musb = s->private;
+ unsigned i;
+
+ seq_printf(s, "MUSB (M)HDRC Register Dump\n");
+
+ for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
+ switch (musb_regmap[i].size) {
+ case 8:
+ seq_printf(s, "%-12s: %02x\n", musb_regmap[i].name,
+ musb_readb(musb->mregs, musb_regmap[i].offset));
+ break;
+ case 16:
+ seq_printf(s, "%-12s: %04x\n", musb_regmap[i].name,
+ musb_readw(musb->mregs, musb_regmap[i].offset));
+ break;
+ case 32:
+ seq_printf(s, "%-12s: %08x\n", musb_regmap[i].name,
+ musb_readl(musb->mregs, musb_regmap[i].offset));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int musb_regdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, musb_regdump_show, inode->i_private);
+}
+
+static int musb_test_mode_show(struct seq_file *s, void *unused)
+{
+ struct musb *musb = s->private;
+ unsigned test;
+
+ test = musb_readb(musb->mregs, MUSB_TESTMODE);
+
+ if (test & MUSB_TEST_FORCE_HOST)
+ seq_printf(s, "force host\n");
+
+ if (test & MUSB_TEST_FIFO_ACCESS)
+ seq_printf(s, "fifo access\n");
+
+ if (test & MUSB_TEST_FORCE_FS)
+ seq_printf(s, "force full-speed\n");
+
+ if (test & MUSB_TEST_FORCE_HS)
+ seq_printf(s, "force high-speed\n");
+
+ if (test & MUSB_TEST_PACKET)
+ seq_printf(s, "test packet\n");
+
+ if (test & MUSB_TEST_K)
+ seq_printf(s, "test K\n");
+
+ if (test & MUSB_TEST_J)
+ seq_printf(s, "test J\n");
+
+ if (test & MUSB_TEST_SE0_NAK)
+ seq_printf(s, "test SE0 NAK\n");
+
+ return 0;
+}
+
+static const struct file_operations musb_regdump_fops = {
+ .open = musb_regdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int musb_test_mode_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return single_open(file, musb_test_mode_show, inode->i_private);
+}
+
+static ssize_t musb_test_mode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct musb *musb = file->private_data;
+ u8 test = 0;
+ char buf[18];
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "force host", 9))
+ test = MUSB_TEST_FORCE_HOST;
+
+ if (!strncmp(buf, "fifo access", 11))
+ test = MUSB_TEST_FIFO_ACCESS;
+
+ if (!strncmp(buf, "force full-speed", 15))
+ test = MUSB_TEST_FORCE_FS;
+
+ if (!strncmp(buf, "force high-speed", 15))
+ test = MUSB_TEST_FORCE_HS;
+
+ if (!strncmp(buf, "test packet", 10)) {
+ test = MUSB_TEST_PACKET;
+ musb_load_testpacket(musb);
+ }
+
+ if (!strncmp(buf, "test K", 6))
+ test = MUSB_TEST_K;
+
+ if (!strncmp(buf, "test J", 6))
+ test = MUSB_TEST_J;
+
+ if (!strncmp(buf, "test SE0 NAK", 12))
+ test = MUSB_TEST_SE0_NAK;
+
+ musb_writeb(musb->mregs, MUSB_TESTMODE, test);
+
+ return count;
+}
+
+static const struct file_operations musb_test_mode_fops = {
+ .open = musb_test_mode_open,
+ .write = musb_test_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int __init musb_init_debugfs(struct musb *musb)
+{
+ struct dentry *root;
+ struct dentry *file;
+ int ret;
+
+ root = debugfs_create_dir("musb", NULL);
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
+ goto err0;
+ }
+
+ file = debugfs_create_file("regdump", S_IRUGO, root, musb,
+ &musb_regdump_fops);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err1;
+ }
+
+ file = debugfs_create_file("testmode", S_IRUGO | S_IWUSR,
+ root, musb, &musb_test_mode_fops);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err1;
+ }
+
+ musb_debugfs_root = root;
+
+ return 0;
+
+err1:
+ debugfs_remove_recursive(root);
+
+err0:
+ return ret;
+}
+
+void /* __init_or_exit */ musb_exit_debugfs(struct musb *musb)
+{
+ debugfs_remove_recursive(musb_debugfs_root);
+}
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 53d0645..21b9788 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -351,6 +351,31 @@ __acquires(musb->lock)
musb->test_mode_nr =
MUSB_TEST_PACKET;
break;
+
+ case 0xc0:
+ /* TEST_FORCE_HS */
+ pr_debug("TEST_FORCE_HS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_HS;
+ break;
+ case 0xc1:
+ /* TEST_FORCE_FS */
+ pr_debug("TEST_FORCE_FS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_FS;
+ break;
+ case 0xc2:
+ /* TEST_FIFO_ACCESS */
+ pr_debug("TEST_FIFO_ACCESS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FIFO_ACCESS;
+ break;
+ case 0xc3:
+ /* TEST_FORCE_HOST */
+ pr_debug("TEST_FORCE_HOST\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_HOST;
+ break;
default:
goto stall;
}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index fa55aac..2442675 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -75,6 +75,10 @@
/* MUSB ULPI VBUSCONTROL */
#define MUSB_ULPI_USE_EXTVBUS 0x01
#define MUSB_ULPI_USE_EXTVBUSIND 0x02
+/* ULPI_REG_CONTROL */
+#define MUSB_ULPI_REG_REQ (1 << 0)
+#define MUSB_ULPI_REG_CMPLT (1 << 1)
+#define MUSB_ULPI_RDN_WR (1 << 2)
/* TESTMODE */
#define MUSB_TEST_FORCE_HOST 0x80
@@ -251,6 +255,12 @@
/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
#define MUSB_HWVERS 0x6C /* 8 bit */
#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */
+#define MUSB_ULPI_INT_MASK 0x72 /* 8 bit */
+#define MUSB_ULPI_INT_SRC 0x73 /* 8 bit */
+#define MUSB_ULPI_REG_DATA 0x74 /* 8 bit */
+#define MUSB_ULPI_REG_ADDR 0x75 /* 8 bit */
+#define MUSB_ULPI_REG_CONTROL 0x76 /* 8 bit */
+#define MUSB_ULPI_RAW_DATA 0x77 /* 8 bit */
#define MUSB_EPINFO 0x78 /* 8 bit */
#define MUSB_RAMINFO 0x79 /* 8 bit */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 7775e1c..92e85e0 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -183,8 +183,8 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
void musb_root_disconnect(struct musb *musb)
{
- musb->port1_status = (1 << USB_PORT_FEAT_POWER)
- | (1 << USB_PORT_FEAT_C_CONNECTION);
+ musb->port1_status = USB_PORT_STAT_POWER
+ | (USB_PORT_STAT_C_CONNECTION << 16);
usb_hcd_poll_rh_status(musb_to_hcd(musb));
musb->is_active = 0;
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 613f95a..f763d62 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -102,26 +102,16 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
{
- u32 count = musb_readw(mbase,
+ return musb_readl(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
-
- count = count << 16;
-
- count |= musb_readw(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
-
- return count;
}
static inline void musb_write_hsdma_count(void __iomem *mbase,
u8 bchannel, u32 len)
{
- musb_writew(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),
- ((u16)((u32) len & 0xFFFF)));
- musb_writew(mbase,
+ musb_writel(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
- ((u16)(((u32) len >> 16) & 0xFFFF)));
+ len);
}
#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 8259263..e06d65e 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -32,17 +32,11 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <plat/mux.h>
#include "musb_core.h"
#include "omap2430.h"
-#ifdef CONFIG_ARCH_OMAP3430
-#define get_cpu_rev() 2
-#endif
-
static struct timer_list musb_idle_timer;
@@ -145,10 +139,6 @@ void musb_platform_enable(struct musb *musb)
void musb_platform_disable(struct musb *musb)
{
}
-static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
-{
-}
-
static void omap_set_vbus(struct musb *musb, int is_on)
{
u8 devctl;
@@ -199,9 +189,10 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
return 0;
}
-int __init musb_platform_init(struct musb *musb)
+int __init musb_platform_init(struct musb *musb, void *board_data)
{
u32 l;
+ struct omap_musb_board_data *data = board_data;
#if defined(CONFIG_ARCH_OMAP2430)
omap_cfg_reg(AE5_2430_USB0HS_STP);
@@ -235,7 +226,15 @@ int __init musb_platform_init(struct musb *musb)
musb_writel(musb->mregs, OTG_SYSCONFIG, l);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
- l |= ULPI_12PIN;
+
+ if (data->interface_type == MUSB_INTERFACE_UTMI) {
+ /* OMAP4 uses Internal PHY GS70 which uses UTMI interface */
+ l &= ~ULPI_12PIN; /* Disable ULPI */
+ l |= UTMI_8BIT; /* Enable UTMI */
+ } else {
+ l |= ULPI_12PIN;
+ }
+
musb_writel(musb->mregs, OTG_INTERFSEL, l);
pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
@@ -246,8 +245,6 @@ int __init musb_platform_init(struct musb *musb)
musb_readl(musb->mregs, OTG_INTERFSEL),
musb_readl(musb->mregs, OTG_SIMENABLE));
- omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
-
if (is_host_enabled(musb))
musb->board_set_vbus = omap_set_vbus;
@@ -272,7 +269,7 @@ void musb_platform_restore_context(struct musb *musb,
}
#endif
-int musb_platform_suspend(struct musb *musb)
+static int musb_platform_suspend(struct musb *musb)
{
u32 l;
@@ -327,8 +324,6 @@ static int musb_platform_resume(struct musb *musb)
int musb_platform_exit(struct musb *musb)
{
- omap_vbus_power(musb, 0 /*off*/, 1);
-
musb_platform_suspend(musb);
return 0;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 60d3938..05c077f 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1104,7 +1104,7 @@ err:
return -ENODEV;
}
-int __init musb_platform_init(struct musb *musb)
+int __init musb_platform_init(struct musb *musb, void *board_data)
{
struct platform_device *pdev;
struct resource *mem;
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index 78a2097..4569694 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -1654,7 +1654,7 @@ static int __init isp_init(void)
{
return i2c_add_driver(&isp1301_driver);
}
-module_init(isp_init);
+subsys_initcall(isp_init);
static void __exit isp_exit(void)
{
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 223cdf4..0e88885 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
@@ -41,81 +42,7 @@
/* Register defines */
-#define VENDOR_ID_LO 0x00
-#define VENDOR_ID_HI 0x01
-#define PRODUCT_ID_LO 0x02
-#define PRODUCT_ID_HI 0x03
-
-#define FUNC_CTRL 0x04
-#define FUNC_CTRL_SET 0x05
-#define FUNC_CTRL_CLR 0x06
-#define FUNC_CTRL_SUSPENDM (1 << 6)
-#define FUNC_CTRL_RESET (1 << 5)
-#define FUNC_CTRL_OPMODE_MASK (3 << 3) /* bits 3 and 4 */
-#define FUNC_CTRL_OPMODE_NORMAL (0 << 3)
-#define FUNC_CTRL_OPMODE_NONDRIVING (1 << 3)
-#define FUNC_CTRL_OPMODE_DISABLE_BIT_NRZI (2 << 3)
-#define FUNC_CTRL_TERMSELECT (1 << 2)
-#define FUNC_CTRL_XCVRSELECT_MASK (3 << 0) /* bits 0 and 1 */
-#define FUNC_CTRL_XCVRSELECT_HS (0 << 0)
-#define FUNC_CTRL_XCVRSELECT_FS (1 << 0)
-#define FUNC_CTRL_XCVRSELECT_LS (2 << 0)
-#define FUNC_CTRL_XCVRSELECT_FS4LS (3 << 0)
-
-#define IFC_CTRL 0x07
-#define IFC_CTRL_SET 0x08
-#define IFC_CTRL_CLR 0x09
-#define IFC_CTRL_INTERFACE_PROTECT_DISABLE (1 << 7)
-#define IFC_CTRL_AUTORESUME (1 << 4)
-#define IFC_CTRL_CLOCKSUSPENDM (1 << 3)
-#define IFC_CTRL_CARKITMODE (1 << 2)
-#define IFC_CTRL_FSLSSERIALMODE_3PIN (1 << 1)
-
-#define TWL4030_OTG_CTRL 0x0A
-#define TWL4030_OTG_CTRL_SET 0x0B
-#define TWL4030_OTG_CTRL_CLR 0x0C
-#define TWL4030_OTG_CTRL_DRVVBUS (1 << 5)
-#define TWL4030_OTG_CTRL_CHRGVBUS (1 << 4)
-#define TWL4030_OTG_CTRL_DISCHRGVBUS (1 << 3)
-#define TWL4030_OTG_CTRL_DMPULLDOWN (1 << 2)
-#define TWL4030_OTG_CTRL_DPPULLDOWN (1 << 1)
-#define TWL4030_OTG_CTRL_IDPULLUP (1 << 0)
-
-#define USB_INT_EN_RISE 0x0D
-#define USB_INT_EN_RISE_SET 0x0E
-#define USB_INT_EN_RISE_CLR 0x0F
-#define USB_INT_EN_FALL 0x10
-#define USB_INT_EN_FALL_SET 0x11
-#define USB_INT_EN_FALL_CLR 0x12
-#define USB_INT_STS 0x13
-#define USB_INT_LATCH 0x14
-#define USB_INT_IDGND (1 << 4)
-#define USB_INT_SESSEND (1 << 3)
-#define USB_INT_SESSVALID (1 << 2)
-#define USB_INT_VBUSVALID (1 << 1)
-#define USB_INT_HOSTDISCONNECT (1 << 0)
-
-#define CARKIT_CTRL 0x19
-#define CARKIT_CTRL_SET 0x1A
-#define CARKIT_CTRL_CLR 0x1B
-#define CARKIT_CTRL_MICEN (1 << 6)
-#define CARKIT_CTRL_SPKRIGHTEN (1 << 5)
-#define CARKIT_CTRL_SPKLEFTEN (1 << 4)
-#define CARKIT_CTRL_RXDEN (1 << 3)
-#define CARKIT_CTRL_TXDEN (1 << 2)
-#define CARKIT_CTRL_IDGNDDRV (1 << 1)
-#define CARKIT_CTRL_CARKITPWR (1 << 0)
-#define CARKIT_PLS_CTRL 0x22
-#define CARKIT_PLS_CTRL_SET 0x23
-#define CARKIT_PLS_CTRL_CLR 0x24
-#define CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN (1 << 3)
-#define CARKIT_PLS_CTRL_SPKRLEFT_BIASEN (1 << 2)
-#define CARKIT_PLS_CTRL_RXPLSEN (1 << 1)
-#define CARKIT_PLS_CTRL_TXPLSEN (1 << 0)
-
#define MCPC_CTRL 0x30
-#define MCPC_CTRL_SET 0x31
-#define MCPC_CTRL_CLR 0x32
#define MCPC_CTRL_RTSOL (1 << 7)
#define MCPC_CTRL_EXTSWR (1 << 6)
#define MCPC_CTRL_EXTSWC (1 << 5)
@@ -125,8 +52,6 @@
#define MCPC_CTRL_HS_UART (1 << 0)
#define MCPC_IO_CTRL 0x33
-#define MCPC_IO_CTRL_SET 0x34
-#define MCPC_IO_CTRL_CLR 0x35
#define MCPC_IO_CTRL_MICBIASEN (1 << 5)
#define MCPC_IO_CTRL_CTS_NPU (1 << 4)
#define MCPC_IO_CTRL_RXD_PU (1 << 3)
@@ -135,19 +60,13 @@
#define MCPC_IO_CTRL_RTSTYP (1 << 0)
#define MCPC_CTRL2 0x36
-#define MCPC_CTRL2_SET 0x37
-#define MCPC_CTRL2_CLR 0x38
#define MCPC_CTRL2_MCPC_CK_EN (1 << 0)
#define OTHER_FUNC_CTRL 0x80
-#define OTHER_FUNC_CTRL_SET 0x81
-#define OTHER_FUNC_CTRL_CLR 0x82
#define OTHER_FUNC_CTRL_BDIS_ACON_EN (1 << 4)
#define OTHER_FUNC_CTRL_FIVEWIRE_MODE (1 << 2)
#define OTHER_IFC_CTRL 0x83
-#define OTHER_IFC_CTRL_SET 0x84
-#define OTHER_IFC_CTRL_CLR 0x85
#define OTHER_IFC_CTRL_OE_INT_EN (1 << 6)
#define OTHER_IFC_CTRL_CEA2011_MODE (1 << 5)
#define OTHER_IFC_CTRL_FSLSSERIALMODE_4PIN (1 << 4)
@@ -156,11 +75,7 @@
#define OTHER_IFC_CTRL_ALT_INT_REROUTE (1 << 0)
#define OTHER_INT_EN_RISE 0x86
-#define OTHER_INT_EN_RISE_SET 0x87
-#define OTHER_INT_EN_RISE_CLR 0x88
#define OTHER_INT_EN_FALL 0x89
-#define OTHER_INT_EN_FALL_SET 0x8A
-#define OTHER_INT_EN_FALL_CLR 0x8B
#define OTHER_INT_STS 0x8C
#define OTHER_INT_LATCH 0x8D
#define OTHER_INT_VB_SESS_VLD (1 << 7)
@@ -178,13 +93,9 @@
#define ID_RES_GND (1 << 0)
#define POWER_CTRL 0xAC
-#define POWER_CTRL_SET 0xAD
-#define POWER_CTRL_CLR 0xAE
#define POWER_CTRL_OTG_ENAB (1 << 5)
#define OTHER_IFC_CTRL2 0xAF
-#define OTHER_IFC_CTRL2_SET 0xB0
-#define OTHER_IFC_CTRL2_CLR 0xB1
#define OTHER_IFC_CTRL2_ULPI_STP_LOW (1 << 4)
#define OTHER_IFC_CTRL2_ULPI_TXEN_POL (1 << 3)
#define OTHER_IFC_CTRL2_ULPI_4PIN_2430 (1 << 2)
@@ -193,14 +104,10 @@
#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_INT2N (1 << 0)
#define REG_CTRL_EN 0xB2
-#define REG_CTRL_EN_SET 0xB3
-#define REG_CTRL_EN_CLR 0xB4
#define REG_CTRL_ERROR 0xB5
#define ULPI_I2C_CONFLICT_INTEN (1 << 0)
#define OTHER_FUNC_CTRL2 0xB8
-#define OTHER_FUNC_CTRL2_SET 0xB9
-#define OTHER_FUNC_CTRL2_CLR 0xBA
#define OTHER_FUNC_CTRL2_VBAT_TIMER_EN (1 << 0)
/* following registers do not have separate _clr and _set registers */
@@ -328,13 +235,13 @@ static inline int twl4030_usb_read(struct twl4030_usb *twl, u8 address)
static inline int
twl4030_usb_set_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
{
- return twl4030_usb_write(twl, reg + 1, bits);
+ return twl4030_usb_write(twl, ULPI_SET(reg), bits);
}
static inline int
twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
{
- return twl4030_usb_write(twl, reg + 2, bits);
+ return twl4030_usb_write(twl, ULPI_CLR(reg), bits);
}
/*-------------------------------------------------------------------------*/
@@ -393,11 +300,12 @@ static void twl4030_usb_set_mode(struct twl4030_usb *twl, int mode)
switch (mode) {
case T2_USB_MODE_ULPI:
- twl4030_usb_clear_bits(twl, IFC_CTRL, IFC_CTRL_CARKITMODE);
+ twl4030_usb_clear_bits(twl, ULPI_IFC_CTRL,
+ ULPI_IFC_CTRL_CARKITMODE);
twl4030_usb_set_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
- twl4030_usb_clear_bits(twl, FUNC_CTRL,
- FUNC_CTRL_XCVRSELECT_MASK |
- FUNC_CTRL_OPMODE_MASK);
+ twl4030_usb_clear_bits(twl, ULPI_FUNC_CTRL,
+ ULPI_FUNC_CTRL_XCVRSEL_MASK |
+ ULPI_FUNC_CTRL_OPMODE_MASK);
break;
case -1:
/* FIXME: power on defaults */
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
index 9010225..b1b3469 100644
--- a/drivers/usb/otg/ulpi.c
+++ b/drivers/usb/otg/ulpi.c
@@ -29,28 +29,6 @@
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
-/* ULPI register addresses */
-#define ULPI_VID_LOW 0x00 /* Vendor ID low */
-#define ULPI_VID_HIGH 0x01 /* Vendor ID high */
-#define ULPI_PID_LOW 0x02 /* Product ID low */
-#define ULPI_PID_HIGH 0x03 /* Product ID high */
-#define ULPI_ITFCTL 0x07 /* Interface Control */
-#define ULPI_OTGCTL 0x0A /* OTG Control */
-
-/* add to above register address to access Set/Clear functions */
-#define ULPI_REG_SET 0x01
-#define ULPI_REG_CLEAR 0x02
-
-/* ULPI OTG Control Register bits */
-#define ID_PULL_UP (1 << 0) /* enable ID Pull Up */
-#define DP_PULL_DOWN (1 << 1) /* enable DP Pull Down */
-#define DM_PULL_DOWN (1 << 2) /* enable DM Pull Down */
-#define DISCHRG_VBUS (1 << 3) /* Discharge Vbus */
-#define CHRG_VBUS (1 << 4) /* Charge Vbus */
-#define DRV_VBUS (1 << 5) /* Drive Vbus */
-#define DRV_VBUS_EXT (1 << 6) /* Drive Vbus external */
-#define USE_EXT_VBUS_IND (1 << 7) /* Use ext. Vbus indicator */
-
#define ULPI_ID(vendor, product) (((vendor) << 16) | (product))
#define TR_FLAG(flags, a, b) (((flags) & a) ? b : 0)
@@ -65,28 +43,28 @@ static int ulpi_set_flags(struct otg_transceiver *otg)
unsigned int flags = 0;
if (otg->flags & USB_OTG_PULLUP_ID)
- flags |= ID_PULL_UP;
+ flags |= ULPI_OTG_CTRL_ID_PULLUP;
if (otg->flags & USB_OTG_PULLDOWN_DM)
- flags |= DM_PULL_DOWN;
+ flags |= ULPI_OTG_CTRL_DM_PULLDOWN;
if (otg->flags & USB_OTG_PULLDOWN_DP)
- flags |= DP_PULL_DOWN;
+ flags |= ULPI_OTG_CTRL_DP_PULLDOWN;
if (otg->flags & USB_OTG_EXT_VBUS_INDICATOR)
- flags |= USE_EXT_VBUS_IND;
+ flags |= ULPI_OTG_CTRL_EXTVBUSIND;
- return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET);
+ return otg_io_write(otg, flags, ULPI_SET(ULPI_OTG_CTRL));
}
static int ulpi_init(struct otg_transceiver *otg)
{
int i, vid, pid;
- vid = (otg_io_read(otg, ULPI_VID_HIGH) << 8) |
- otg_io_read(otg, ULPI_VID_LOW);
- pid = (otg_io_read(otg, ULPI_PID_HIGH) << 8) |
- otg_io_read(otg, ULPI_PID_LOW);
+ vid = (otg_io_read(otg, ULPI_VENDOR_ID_HIGH) << 8) |
+ otg_io_read(otg, ULPI_VENDOR_ID_LOW);
+ pid = (otg_io_read(otg, ULPI_PRODUCT_ID_HIGH) << 8) |
+ otg_io_read(otg, ULPI_PRODUCT_ID_LOW);
pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid);
@@ -100,19 +78,19 @@ static int ulpi_init(struct otg_transceiver *otg)
static int ulpi_set_vbus(struct otg_transceiver *otg, bool on)
{
- unsigned int flags = otg_io_read(otg, ULPI_OTGCTL);
+ unsigned int flags = otg_io_read(otg, ULPI_OTG_CTRL);
- flags &= ~(DRV_VBUS | DRV_VBUS_EXT);
+ flags &= ~(ULPI_OTG_CTRL_DRVVBUS | ULPI_OTG_CTRL_DRVVBUS_EXT);
if (on) {
if (otg->flags & USB_OTG_DRV_VBUS)
- flags |= DRV_VBUS;
+ flags |= ULPI_OTG_CTRL_DRVVBUS;
if (otg->flags & USB_OTG_DRV_VBUS_EXT)
- flags |= DRV_VBUS_EXT;
+ flags |= ULPI_OTG_CTRL_DRVVBUS_EXT;
}
- return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET);
+ return otg_io_write(otg, flags, ULPI_SET(ULPI_OTG_CTRL));
}
struct otg_transceiver *
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index a0ecb42..bd8aab0 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -425,6 +425,16 @@ config USB_SERIAL_MOS7720
To compile this driver as a module, choose M here: the
module will be called mos7720.
+config USB_SERIAL_MOS7715_PARPORT
+ bool "Support for parallel port on the Moschip 7715"
+ depends on USB_SERIAL_MOS7720
+ depends on PARPORT=y || PARPORT=USB_SERIAL_MOS7720
+ select PARPORT_NOT_PC
+ ---help---
+ Say Y if you have a Moschip 7715 device and would like to use
+ the parallel port it provides. The port will register with
+ the parport subsystem as a low-level driver.
+
config USB_SERIAL_MOS7840
tristate "USB Moschip 7840/7820 USB Serial Driver"
---help---
@@ -485,6 +495,7 @@ config USB_SERIAL_QCAUX
config USB_SERIAL_QUALCOMM
tristate "USB Qualcomm Serial modem"
+ select USB_SERIAL_WWAN
help
Say Y here if you have a Qualcomm USB modem device. These are
usually wireless cellular modems.
@@ -576,8 +587,12 @@ config USB_SERIAL_XIRCOM
To compile this driver as a module, choose M here: the
module will be called keyspan_pda.
+config USB_SERIAL_WWAN
+ tristate
+
config USB_SERIAL_OPTION
tristate "USB driver for GSM and CDMA modems"
+ select USB_SERIAL_WWAN
help
Say Y here if you have a GSM or CDMA modem that's connected to USB.
@@ -619,6 +634,14 @@ config USB_SERIAL_VIVOPAY_SERIAL
To compile this driver as a module, choose M here: the
module will be called vivopay-serial.
+config USB_SERIAL_ZIO
+ tristate "ZIO Motherboard USB serial interface driver"
+ help
+ Say Y here if you want to use ZIO Motherboard.
+
+ To compile this driver as a module, choose M here: the
+ module will be called zio.
+
config USB_SERIAL_DEBUG
tristate "USB Debugging Device"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 83c9e43..e54c728 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -52,9 +52,11 @@ obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o
obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o
obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o
+obj-$(CONFIG_USB_SERIAL_WWAN) += usb_wwan.o
obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o
obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o
+obj-$(CONFIG_USB_SERIAL_ZIO) += zio.o
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 4fd7af9..0db6ace 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -1,7 +1,9 @@
/*
* AIRcable USB Bluetooth Dongle Driver.
*
+ * Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2006 Manuel Francisco Naranjo (naranjo.manuel@gmail.com)
+ *
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
@@ -42,10 +44,10 @@
*
*/
+#include <asm/unaligned.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
-#include <linux/circ_buf.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
@@ -55,16 +57,12 @@ static int debug;
#define AIRCABLE_VID 0x16CA
#define AIRCABLE_USB_PID 0x1502
-/* write buffer size defines */
-#define AIRCABLE_BUF_SIZE 2048
-
/* Protocol Stuff */
#define HCI_HEADER_LENGTH 0x4
#define TX_HEADER_0 0x20
#define TX_HEADER_1 0x29
#define RX_HEADER_0 0x00
#define RX_HEADER_1 0x20
-#define MAX_HCI_FRAMESIZE 60
#define HCI_COMPLETE_FRAME 64
/* rx_flags */
@@ -74,8 +72,8 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.0b2"
-#define DRIVER_AUTHOR "Naranjo, Manuel Francisco <naranjo.manuel@gmail.com>"
+#define DRIVER_VERSION "v2.0"
+#define DRIVER_AUTHOR "Naranjo, Manuel Francisco <naranjo.manuel@gmail.com>, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "AIRcable USB Driver"
/* ID table that will be registered with USB core */
@@ -85,226 +83,21 @@ static const struct usb_device_id id_table[] = {
};
MODULE_DEVICE_TABLE(usb, id_table);
-
-/* Internal Structure */
-struct aircable_private {
- spinlock_t rx_lock; /* spinlock for the receive lines */
- struct circ_buf *tx_buf; /* write buffer */
- struct circ_buf *rx_buf; /* read buffer */
- int rx_flags; /* for throttilng */
- struct work_struct rx_work; /* work cue for the receiving line */
- struct usb_serial_port *port; /* USB port with which associated */
-};
-
-/* Private methods */
-
-/* Circular Buffer Methods, code from ti_usb_3410_5052 used */
-/*
- * serial_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-static void serial_buf_clear(struct circ_buf *cb)
-{
- cb->head = cb->tail = 0;
-}
-
-/*
- * serial_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-static struct circ_buf *serial_buf_alloc(void)
-{
- struct circ_buf *cb;
- cb = kmalloc(sizeof(struct circ_buf), GFP_KERNEL);
- if (cb == NULL)
- return NULL;
- cb->buf = kmalloc(AIRCABLE_BUF_SIZE, GFP_KERNEL);
- if (cb->buf == NULL) {
- kfree(cb);
- return NULL;
- }
- serial_buf_clear(cb);
- return cb;
-}
-
-/*
- * serial_buf_free
- *
- * Free the buffer and all associated memory.
- */
-static void serial_buf_free(struct circ_buf *cb)
-{
- kfree(cb->buf);
- kfree(cb);
-}
-
-/*
- * serial_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-static int serial_buf_data_avail(struct circ_buf *cb)
-{
- return CIRC_CNT(cb->head, cb->tail, AIRCABLE_BUF_SIZE);
-}
-
-/*
- * serial_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-static int serial_buf_put(struct circ_buf *cb, const char *buf, int count)
-{
- int c, ret = 0;
- while (1) {
- c = CIRC_SPACE_TO_END(cb->head, cb->tail, AIRCABLE_BUF_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(cb->buf + cb->head, buf, c);
- cb->head = (cb->head + c) & (AIRCABLE_BUF_SIZE-1);
- buf += c;
- count -= c;
- ret = c;
- }
- return ret;
-}
-
-/*
- * serial_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-static int serial_buf_get(struct circ_buf *cb, char *buf, int count)
-{
- int c, ret = 0;
- while (1) {
- c = CIRC_CNT_TO_END(cb->head, cb->tail, AIRCABLE_BUF_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(buf, cb->buf + cb->tail, c);
- cb->tail = (cb->tail + c) & (AIRCABLE_BUF_SIZE-1);
- buf += c;
- count -= c;
- ret = c;
- }
- return ret;
-}
-
-/* End of circula buffer methods */
-
-static void aircable_send(struct usb_serial_port *port)
+static int aircable_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
{
- int count, result;
- struct aircable_private *priv = usb_get_serial_port_data(port);
- unsigned char *buf;
- __le16 *dbuf;
- dbg("%s - port %d", __func__, port->number);
- if (port->write_urb_busy)
- return;
-
- count = min(serial_buf_data_avail(priv->tx_buf), MAX_HCI_FRAMESIZE);
- if (count == 0)
- return;
-
- buf = kzalloc(count + HCI_HEADER_LENGTH, GFP_ATOMIC);
- if (!buf) {
- dev_err(&port->dev, "%s- kzalloc(%d) failed.\n",
- __func__, count + HCI_HEADER_LENGTH);
- return;
- }
+ int count;
+ unsigned char *buf = dest;
+ count = kfifo_out_locked(&port->write_fifo, buf + HCI_HEADER_LENGTH,
+ size - HCI_HEADER_LENGTH, &port->lock);
buf[0] = TX_HEADER_0;
buf[1] = TX_HEADER_1;
- dbuf = (__le16 *)&buf[2];
- *dbuf = cpu_to_le16((u16)count);
- serial_buf_get(priv->tx_buf, buf + HCI_HEADER_LENGTH,
- MAX_HCI_FRAMESIZE);
-
- memcpy(port->write_urb->transfer_buffer, buf,
- count + HCI_HEADER_LENGTH);
-
- kfree(buf);
- port->write_urb_busy = 1;
- usb_serial_debug_data(debug, &port->dev, __func__,
- count + HCI_HEADER_LENGTH,
- port->write_urb->transfer_buffer);
- port->write_urb->transfer_buffer_length = count + HCI_HEADER_LENGTH;
- port->write_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
-
- if (result) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- port->write_urb_busy = 0;
- }
+ put_unaligned_le16(count, &buf[2]);
- schedule_work(&port->work);
+ return count + HCI_HEADER_LENGTH;
}
-static void aircable_read(struct work_struct *work)
-{
- struct aircable_private *priv =
- container_of(work, struct aircable_private, rx_work);
- struct usb_serial_port *port = priv->port;
- struct tty_struct *tty;
- unsigned char *data;
- int count;
- if (priv->rx_flags & THROTTLED) {
- if (priv->rx_flags & ACTUALLY_THROTTLED)
- schedule_work(&priv->rx_work);
- return;
- }
-
- /* By now I will flush data to the tty in packages of no more than
- * 64 bytes, to ensure I do not get throttled.
- * Ask USB mailing list for better aproach.
- */
- tty = tty_port_tty_get(&port->port);
-
- if (!tty) {
- schedule_work(&priv->rx_work);
- dev_err(&port->dev, "%s - No tty available\n", __func__);
- return ;
- }
-
- count = min(64, serial_buf_data_avail(priv->rx_buf));
-
- if (count <= 0)
- goto out; /* We have finished sending everything. */
-
- tty_prepare_flip_string(tty, &data, count);
- if (!data) {
- dev_err(&port->dev, "%s- kzalloc(%d) failed.",
- __func__, count);
- goto out;
- }
-
- serial_buf_get(priv->rx_buf, data, count);
-
- tty_flip_buffer_push(tty);
-
- if (serial_buf_data_avail(priv->rx_buf))
- schedule_work(&priv->rx_work);
-out:
- tty_kref_put(tty);
- return;
-}
-/* End of private methods */
-
static int aircable_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
@@ -330,247 +123,50 @@ static int aircable_probe(struct usb_serial *serial,
return 0;
}
-static int aircable_attach(struct usb_serial *serial)
-{
- struct usb_serial_port *port = serial->port[0];
- struct aircable_private *priv;
-
- priv = kzalloc(sizeof(struct aircable_private), GFP_KERNEL);
- if (!priv) {
- dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n", __func__,
- sizeof(struct aircable_private));
- return -ENOMEM;
- }
-
- /* Allocation of Circular Buffers */
- priv->tx_buf = serial_buf_alloc();
- if (priv->tx_buf == NULL) {
- kfree(priv);
- return -ENOMEM;
- }
-
- priv->rx_buf = serial_buf_alloc();
- if (priv->rx_buf == NULL) {
- kfree(priv->tx_buf);
- kfree(priv);
- return -ENOMEM;
- }
-
- priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
- priv->port = port;
- INIT_WORK(&priv->rx_work, aircable_read);
-
- usb_set_serial_port_data(serial->port[0], priv);
-
- return 0;
-}
-
-static void aircable_release(struct usb_serial *serial)
+static int aircable_process_packet(struct tty_struct *tty,
+ struct usb_serial_port *port, int has_headers,
+ char *packet, int len)
{
-
- struct usb_serial_port *port = serial->port[0];
- struct aircable_private *priv = usb_get_serial_port_data(port);
-
- dbg("%s", __func__);
-
- if (priv) {
- serial_buf_free(priv->tx_buf);
- serial_buf_free(priv->rx_buf);
- kfree(priv);
+ if (has_headers) {
+ len -= HCI_HEADER_LENGTH;
+ packet += HCI_HEADER_LENGTH;
}
-}
-
-static int aircable_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct aircable_private *priv = usb_get_serial_port_data(port);
- return serial_buf_data_avail(priv->tx_buf);
-}
-
-static int aircable_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *source, int count)
-{
- struct aircable_private *priv = usb_get_serial_port_data(port);
- int temp;
-
- dbg("%s - port %d, %d bytes", __func__, port->number, count);
-
- usb_serial_debug_data(debug, &port->dev, __func__, count, source);
-
- if (!count) {
- dbg("%s - write request of 0 bytes", __func__);
- return count;
+ if (len <= 0) {
+ dbg("%s - malformed packet", __func__);
+ return 0;
}
- temp = serial_buf_put(priv->tx_buf, source, count);
-
- aircable_send(port);
-
- if (count > AIRCABLE_BUF_SIZE)
- count = AIRCABLE_BUF_SIZE;
-
- return count;
+ tty_insert_flip_string(tty, packet, len);
+ return len;
}
-static void aircable_write_bulk_callback(struct urb *urb)
+static void aircable_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- int status = urb->status;
- int result;
-
- dbg("%s - urb status: %d", __func__ , status);
-
- /* This has been taken from cypress_m8.c cypress_write_int_callback */
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d",
- __func__, status);
- port->write_urb_busy = 0;
- return;
- default:
- /* error in the urb, so we have to resubmit it */
- dbg("%s - Overflow in write", __func__);
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
- port->write_urb->transfer_buffer_length = 1;
- port->write_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting write urb, error %d\n",
- __func__, result);
- else
- return;
- }
-
- port->write_urb_busy = 0;
-
- aircable_send(port);
-}
-
-static void aircable_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct aircable_private *priv = usb_get_serial_port_data(port);
+ char *data = (char *)urb->transfer_buffer;
struct tty_struct *tty;
- unsigned long no_packages, remaining, package_length, i;
- int result, shift = 0;
- unsigned char *temp;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - urb status = %d", __func__, status);
- if (status == -EPROTO) {
- dbg("%s - caught -EPROTO, resubmitting the urb",
- __func__);
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- aircable_read_bulk_callback, port);
-
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- return;
- }
- dbg("%s - unable to handle the error, exiting.", __func__);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, urb->transfer_buffer);
+ int has_headers;
+ int count;
+ int len;
+ int i;
tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- if (urb->actual_length <= 2) {
- /* This is an incomplete package */
- serial_buf_put(priv->rx_buf, urb->transfer_buffer,
- urb->actual_length);
- } else {
- temp = urb->transfer_buffer;
- if (temp[0] == RX_HEADER_0)
- shift = HCI_HEADER_LENGTH;
-
- remaining = urb->actual_length;
- no_packages = urb->actual_length / (HCI_COMPLETE_FRAME);
-
- if (urb->actual_length % HCI_COMPLETE_FRAME != 0)
- no_packages++;
+ if (!tty)
+ return;
- for (i = 0; i < no_packages; i++) {
- if (remaining > (HCI_COMPLETE_FRAME))
- package_length = HCI_COMPLETE_FRAME;
- else
- package_length = remaining;
- remaining -= package_length;
+ has_headers = (urb->actual_length > 2 && data[0] == RX_HEADER_0);
- serial_buf_put(priv->rx_buf,
- urb->transfer_buffer + shift +
- (HCI_COMPLETE_FRAME) * (i),
- package_length - shift);
- }
- }
- aircable_read(&priv->rx_work);
+ count = 0;
+ for (i = 0; i < urb->actual_length; i += HCI_COMPLETE_FRAME) {
+ len = min_t(int, urb->actual_length - i, HCI_COMPLETE_FRAME);
+ count += aircable_process_packet(tty, port, has_headers,
+ &data[i], len);
}
- tty_kref_put(tty);
-
- /* Schedule the next read */
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- aircable_read_bulk_callback, port);
-
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result && result != -EPERM)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
-}
-
-/* Based on ftdi_sio.c throttle */
-static void aircable_throttle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct aircable_private *priv = usb_get_serial_port_data(port);
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irq(&priv->rx_lock);
- priv->rx_flags |= THROTTLED;
- spin_unlock_irq(&priv->rx_lock);
-}
-
-/* Based on ftdi_sio.c unthrottle */
-static void aircable_unthrottle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct aircable_private *priv = usb_get_serial_port_data(port);
- int actually_throttled;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irq(&priv->rx_lock);
- actually_throttled = priv->rx_flags & ACTUALLY_THROTTLED;
- priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
- spin_unlock_irq(&priv->rx_lock);
-
- if (actually_throttled)
- schedule_work(&priv->rx_work);
+ if (count)
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
}
static struct usb_driver aircable_driver = {
@@ -589,15 +185,12 @@ static struct usb_serial_driver aircable_device = {
.usb_driver = &aircable_driver,
.id_table = id_table,
.num_ports = 1,
- .attach = aircable_attach,
+ .bulk_out_size = HCI_COMPLETE_FRAME,
.probe = aircable_probe,
- .release = aircable_release,
- .write = aircable_write,
- .write_room = aircable_write_room,
- .write_bulk_callback = aircable_write_bulk_callback,
- .read_bulk_callback = aircable_read_bulk_callback,
- .throttle = aircable_throttle,
- .unthrottle = aircable_unthrottle,
+ .process_read_urb = aircable_process_read_urb,
+ .prepare_write_buffer = aircable_prepare_write_buffer,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
};
static int __init aircable_init(void)
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 9b66bf1..4e41a2a 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -42,7 +42,7 @@ static int debug;
* Version information
*/
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
@@ -355,14 +355,11 @@ static void ark3116_close(struct usb_serial_port *port)
/* deactivate interrupts */
ark3116_write_reg(serial, UART_IER, 0);
- /* shutdown any bulk reads that might be going on */
- if (serial->num_bulk_out)
- usb_kill_urb(port->write_urb);
- if (serial->num_bulk_in)
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
if (serial->num_interrupt_in)
usb_kill_urb(port->interrupt_in_urb);
}
+
}
static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
@@ -675,87 +672,45 @@ static void ark3116_read_int_callback(struct urb *urb)
* error for the next block of data as well...
* For now, let's pretend this can't happen.
*/
-
-static void send_to_tty(struct tty_struct *tty,
- const unsigned char *chars,
- size_t size, char flag)
+static void ark3116_process_read_urb(struct urb *urb)
{
- if (size == 0)
- return;
- if (flag == TTY_NORMAL) {
- tty_insert_flip_string(tty, chars, size);
- } else {
- int i;
- for (i = 0; i < size; ++i)
- tty_insert_flip_char(tty, chars[i], flag);
- }
-}
-
-static void ark3116_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
+ struct usb_serial_port *port = urb->context;
struct ark3116_private *priv = usb_get_serial_port_data(port);
- const __u8 *data = urb->transfer_buffer;
- int status = urb->status;
struct tty_struct *tty;
+ unsigned char *data = urb->transfer_buffer;
+ char tty_flag = TTY_NORMAL;
unsigned long flags;
- int result;
- char flag;
__u32 lsr;
- switch (status) {
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d",
- __func__, status);
+ /* update line status */
+ spin_lock_irqsave(&priv->status_lock, flags);
+ lsr = priv->lsr;
+ priv->lsr &= ~UART_LSR_BRK_ERROR_BITS;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if (!urb->actual_length)
return;
- default:
- dbg("%s - nonzero urb status received: %d",
- __func__, status);
- break;
- case 0: /* success */
- spin_lock_irqsave(&priv->status_lock, flags);
- lsr = priv->lsr;
- /* clear error bits */
- priv->lsr &= ~UART_LSR_BRK_ERROR_BITS;
- spin_unlock_irqrestore(&priv->status_lock, flags);
-
- if (unlikely(lsr & UART_LSR_BI))
- flag = TTY_BREAK;
- else if (unlikely(lsr & UART_LSR_PE))
- flag = TTY_PARITY;
- else if (unlikely(lsr & UART_LSR_FE))
- flag = TTY_FRAME;
- else
- flag = TTY_NORMAL;
-
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- /* overrun is special, not associated with a char */
- if (unlikely(lsr & UART_LSR_OE))
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- send_to_tty(tty, data, urb->actual_length, flag);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
+ if (lsr & UART_LSR_BRK_ERROR_BITS) {
+ if (lsr & UART_LSR_BI)
+ tty_flag = TTY_BREAK;
+ else if (lsr & UART_LSR_PE)
+ tty_flag = TTY_PARITY;
+ else if (lsr & UART_LSR_FE)
+ tty_flag = TTY_FRAME;
- /* Throttle the device if requested by tty */
- spin_lock_irqsave(&port->lock, flags);
- port->throttled = port->throttle_req;
- if (port->throttled) {
- spin_unlock_irqrestore(&port->lock, flags);
- return;
- } else
- spin_unlock_irqrestore(&port->lock, flags);
+ /* overrun is special, not associated with a char */
+ if (lsr & UART_LSR_OE)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
- /* Continue reading from device */
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed resubmitting"
- " read urb, error %d\n", __func__, result);
+ tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
}
static struct usb_driver ark3116_driver = {
@@ -785,7 +740,7 @@ static struct usb_serial_driver ark3116_device = {
.close = ark3116_close,
.break_ctl = ark3116_break_ctl,
.read_int_callback = ark3116_read_int_callback,
- .read_bulk_callback = ark3116_read_bulk_callback,
+ .process_read_urb = ark3116_process_read_urb,
};
static int __init ark3116_init(void)
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 1295e44..36df352 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2000 William Greathouse (wgreathouse@smva.com)
* Copyright (C) 2000-2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
*
* This program is largely derived from work by the linux-usb group
* and associated source files. Please see the usb/serial files for
@@ -84,7 +85,7 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.2"
+#define DRIVER_VERSION "v1.3"
#define DRIVER_AUTHOR "William Greathouse <wgreathouse@smva.com>"
#define DRIVER_DESC "USB Belkin Serial converter driver"
@@ -95,6 +96,7 @@ static int belkin_sa_open(struct tty_struct *tty,
struct usb_serial_port *port);
static void belkin_sa_close(struct usb_serial_port *port);
static void belkin_sa_read_int_callback(struct urb *urb);
+static void belkin_sa_process_read_urb(struct urb *urb);
static void belkin_sa_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios * old);
static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state);
@@ -112,7 +114,6 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BELKIN_DOCKSTATION_VID, BELKIN_DOCKSTATION_PID) },
{ } /* Terminating entry */
};
-
MODULE_DEVICE_TABLE(usb, id_table_combined);
static struct usb_driver belkin_driver = {
@@ -120,7 +121,7 @@ static struct usb_driver belkin_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table_combined,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
/* All of the device info needed for the serial converters */
@@ -136,7 +137,7 @@ static struct usb_serial_driver belkin_device = {
.open = belkin_sa_open,
.close = belkin_sa_close,
.read_int_callback = belkin_sa_read_int_callback,
- /* How we get the status info */
+ .process_read_urb = belkin_sa_process_read_urb,
.set_termios = belkin_sa_set_termios,
.break_ctl = belkin_sa_break_ctl,
.tiocmget = belkin_sa_tiocmget,
@@ -145,7 +146,6 @@ static struct usb_serial_driver belkin_device = {
.release = belkin_sa_release,
};
-
struct belkin_sa_private {
spinlock_t lock;
unsigned long control_state;
@@ -196,62 +196,43 @@ static int belkin_sa_startup(struct usb_serial *serial)
return 0;
}
-
static void belkin_sa_release(struct usb_serial *serial)
{
- struct belkin_sa_private *priv;
int i;
dbg("%s", __func__);
- for (i = 0; i < serial->num_ports; ++i) {
- /* My special items, the standard routines free my urbs */
- priv = usb_get_serial_port_data(serial->port[i]);
- kfree(priv);
- }
+ for (i = 0; i < serial->num_ports; ++i)
+ kfree(usb_get_serial_port_data(serial->port[i]));
}
-
-static int belkin_sa_open(struct tty_struct *tty,
+static int belkin_sa_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
- int retval = 0;
+ int retval;
dbg("%s port %d", __func__, port->number);
- /*Start reading from the device*/
- /* TODO: Look at possibility of submitting multiple URBs to device to
- * enhance buffering. Win trace shows 16 initial read URBs.
- */
- port->read_urb->dev = port->serial->dev;
- retval = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (retval) {
- dev_err(&port->dev, "usb_submit_urb(read bulk) failed\n");
- goto exit;
- }
-
- port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (retval) {
- usb_kill_urb(port->read_urb);
dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
+ return retval;
}
-exit:
- return retval;
-} /* belkin_sa_open */
+ retval = usb_serial_generic_open(tty, port);
+ if (retval)
+ usb_kill_urb(port->interrupt_in_urb);
+ return retval;
+}
static void belkin_sa_close(struct usb_serial_port *port)
{
dbg("%s port %d", __func__, port->number);
- /* shutdown our bulk reads and writes */
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
-} /* belkin_sa_close */
-
+}
static void belkin_sa_read_int_callback(struct urb *urb)
{
@@ -310,31 +291,7 @@ static void belkin_sa_read_int_callback(struct urb *urb)
else
priv->control_state &= ~TIOCM_CD;
- /* Now to report any errors */
priv->last_lsr = data[BELKIN_SA_LSR_INDEX];
-#if 0
- /*
- * fill in the flip buffer here, but I do not know the relation
- * to the current/next receive buffer or characters. I need
- * to look in to this before committing any code.
- */
- if (priv->last_lsr & BELKIN_SA_LSR_ERR) {
- tty = tty_port_tty_get(&port->port);
- /* Overrun Error */
- if (priv->last_lsr & BELKIN_SA_LSR_OE) {
- }
- /* Parity Error */
- if (priv->last_lsr & BELKIN_SA_LSR_PE) {
- }
- /* Framing Error */
- if (priv->last_lsr & BELKIN_SA_LSR_FE) {
- }
- /* Break Indicator */
- if (priv->last_lsr & BELKIN_SA_LSR_BI) {
- }
- tty_kref_put(tty);
- }
-#endif
spin_unlock_irqrestore(&priv->lock, flags);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -343,6 +300,53 @@ exit:
"result %d\n", __func__, retval);
}
+static void belkin_sa_process_read_urb(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ struct belkin_sa_private *priv = usb_get_serial_port_data(port);
+ struct tty_struct *tty;
+ unsigned char *data = urb->transfer_buffer;
+ unsigned long flags;
+ unsigned char status;
+ char tty_flag;
+
+ /* Update line status */
+ tty_flag = TTY_NORMAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->last_lsr;
+ priv->last_lsr &= ~BELKIN_SA_LSR_ERR;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!urb->actual_length)
+ return;
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
+ if (status & BELKIN_SA_LSR_ERR) {
+ /* Break takes precedence over parity, which takes precedence
+ * over framing errors. */
+ if (status & BELKIN_SA_LSR_BI)
+ tty_flag = TTY_BREAK;
+ else if (status & BELKIN_SA_LSR_PE)
+ tty_flag = TTY_PARITY;
+ else if (status & BELKIN_SA_LSR_FE)
+ tty_flag = TTY_FRAME;
+ dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag);
+
+ /* Overrun is special, not associated with a char. */
+ if (status & BELKIN_SA_LSR_OE)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+
+ tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+}
+
static void belkin_sa_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -482,8 +486,7 @@ static void belkin_sa_set_termios(struct tty_struct *tty,
spin_lock_irqsave(&priv->lock, flags);
priv->control_state = control_state;
spin_unlock_irqrestore(&priv->lock, flags);
-} /* belkin_sa_set_termios */
-
+}
static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state)
{
@@ -494,7 +497,6 @@ static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state)
dev_err(&port->dev, "Set break_ctl %d\n", break_state);
}
-
static int belkin_sa_tiocmget(struct tty_struct *tty, struct file *file)
{
struct usb_serial_port *port = tty->driver_data;
@@ -511,7 +513,6 @@ static int belkin_sa_tiocmget(struct tty_struct *tty, struct file *file)
return control_state;
}
-
static int belkin_sa_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear)
{
@@ -583,7 +584,6 @@ failed_usb_serial_register:
return retval;
}
-
static void __exit belkin_sa_exit (void)
{
usb_deregister(&belkin_driver);
diff --git a/drivers/usb/serial/belkin_sa.h b/drivers/usb/serial/belkin_sa.h
index c66a673..c74b58a 100644
--- a/drivers/usb/serial/belkin_sa.h
+++ b/drivers/usb/serial/belkin_sa.h
@@ -8,10 +8,10 @@
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
@@ -66,7 +66,7 @@
#ifdef WHEN_I_LEARN_THIS
#define BELKIN_SA_SET_MAGIC_REQUEST 17 /* I don't know, possibly flush */
/* (always in Wininit sequence before flow control) */
-#define BELKIN_SA_RESET xx /* Reset the port */
+#define BELKIN_SA_RESET xx /* Reset the port */
#define BELKIN_SA_GET_MODEM_STATUS xx /* Force return of modem status register */
#endif
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 7e8e398..63f7cc4 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -305,10 +305,7 @@ static void ch341_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
- /* shutdown our urbs */
- dbg("%s - shutting down urbs", __func__);
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
}
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index f347da2..1ee6b2a 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -66,7 +66,7 @@ static int usb_console_setup(struct console *co, char *options)
struct usb_serial_port *port;
int retval;
struct tty_struct *tty = NULL;
- struct ktermios *termios = NULL, dummy;
+ struct ktermios dummy;
dbg("%s", __func__);
@@ -141,15 +141,14 @@ static int usb_console_setup(struct console *co, char *options)
goto reset_open_count;
}
kref_init(&tty->kref);
- termios = kzalloc(sizeof(*termios), GFP_KERNEL);
- if (!termios) {
+ tty_port_tty_set(&port->port, tty);
+ tty->driver = usb_serial_tty_driver;
+ tty->index = co->index;
+ if (tty_init_termios(tty)) {
retval = -ENOMEM;
err("no more memory");
goto free_tty;
}
- memset(&dummy, 0, sizeof(struct ktermios));
- tty->termios = termios;
- tty_port_tty_set(&port->port, tty);
}
/* only call the device specific open if this
@@ -161,16 +160,16 @@ static int usb_console_setup(struct console *co, char *options)
if (retval) {
err("could not open USB console port");
- goto free_termios;
+ goto fail;
}
if (serial->type->set_termios) {
- termios->c_cflag = cflag;
- tty_termios_encode_baud_rate(termios, baud, baud);
+ tty->termios->c_cflag = cflag;
+ tty_termios_encode_baud_rate(tty->termios, baud, baud);
+ memset(&dummy, 0, sizeof(struct ktermios));
serial->type->set_termios(tty, port, &dummy);
tty_port_tty_set(&port->port, NULL);
- kfree(termios);
kfree(tty);
}
set_bit(ASYNCB_INITIALIZED, &port->port.flags);
@@ -180,14 +179,12 @@ static int usb_console_setup(struct console *co, char *options)
--port->port.count;
/* The console is special in terms of closing the device so
* indicate this port is now acting as a system console. */
- port->console = 1;
port->port.console = 1;
mutex_unlock(&serial->disc_mutex);
return retval;
- free_termios:
- kfree(termios);
+ fail:
tty_port_tty_set(&port->port, NULL);
free_tty:
kfree(tty);
@@ -217,7 +214,7 @@ static void usb_console_write(struct console *co,
dbg("%s - port %d, %d byte(s)", __func__, port->number, count);
- if (!port->console) {
+ if (!port->port.console) {
dbg("%s - port not opened", __func__);
return;
}
@@ -313,7 +310,7 @@ void usb_serial_console_exit(void)
{
if (usbcons_info.port) {
unregister_console(&usbcons);
- usbcons_info.port->console = 0;
+ usbcons_info.port->port.console = 0;
usbcons_info.port = NULL;
}
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ec9b044..8b8c797 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -34,7 +34,6 @@
* Function Prototypes
*/
static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
-static void cp210x_cleanup(struct usb_serial_port *);
static void cp210x_close(struct usb_serial_port *);
static void cp210x_get_termios(struct tty_struct *,
struct usb_serial_port *port);
@@ -49,7 +48,6 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
unsigned int, unsigned int);
static void cp210x_break_ctl(struct tty_struct *, int);
static int cp210x_startup(struct usb_serial *);
-static void cp210x_disconnect(struct usb_serial *);
static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
static int cp210x_carrier_raised(struct usb_serial_port *p);
@@ -61,6 +59,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
@@ -72,9 +72,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */
{ USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
{ USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
+ { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */
+ { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
+ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
{ USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
{ USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
@@ -82,12 +85,15 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
+ { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
+ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
@@ -105,6 +111,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
{ USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
@@ -115,6 +122,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
@@ -138,6 +147,8 @@ static struct usb_serial_driver cp210x_device = {
.usb_driver = &cp210x_driver,
.id_table = id_table,
.num_ports = 1,
+ .bulk_in_size = 256,
+ .bulk_out_size = 256,
.open = cp210x_open,
.close = cp210x_close,
.break_ctl = cp210x_break_ctl,
@@ -145,7 +156,6 @@ static struct usb_serial_driver cp210x_device = {
.tiocmget = cp210x_tiocmget,
.tiocmset = cp210x_tiocmset,
.attach = cp210x_startup,
- .disconnect = cp210x_disconnect,
.dtr_rts = cp210x_dtr_rts,
.carrier_raised = cp210x_carrier_raised
};
@@ -370,7 +380,6 @@ static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct usb_serial *serial = port->serial;
int result;
dbg("%s - port %d", __func__, port->number);
@@ -381,49 +390,20 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
return -EPROTO;
}
- /* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- serial->type->read_bulk_callback,
- port);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev, "%s - failed resubmitting read urb, "
- "error %d\n", __func__, result);
+ result = usb_serial_generic_open(tty, port);
+ if (result)
return result;
- }
/* Configure the termios structure */
cp210x_get_termios(tty, port);
return 0;
}
-static void cp210x_cleanup(struct usb_serial_port *port)
-{
- struct usb_serial *serial = port->serial;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (serial->dev) {
- /* shutdown any bulk reads that might be going on */
- if (serial->num_bulk_out)
- usb_kill_urb(port->write_urb);
- if (serial->num_bulk_in)
- usb_kill_urb(port->read_urb);
- }
-}
-
static void cp210x_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
- /* shutdown our urbs */
- dbg("%s - shutting down urbs", __func__);
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
mutex_lock(&port->serial->disc_mutex);
if (!port->serial->disconnected)
@@ -807,17 +787,6 @@ static int cp210x_startup(struct usb_serial *serial)
return 0;
}
-static void cp210x_disconnect(struct usb_serial *serial)
-{
- int i;
-
- dbg("%s", __func__);
-
- /* Stop reads and writes on all ports */
- for (i = 0; i < serial->num_ports; ++i)
- cp210x_cleanup(serial->port[i]);
-}
-
static int __init cp210x_init(void)
{
int retval;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e23c779..f5d0674 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -64,6 +64,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
+#include <linux/kfifo.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
@@ -79,13 +80,12 @@ static int unstable_bauds;
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.09"
+#define DRIVER_VERSION "v1.10"
#define DRIVER_AUTHOR "Lonnie Mendez <dignome@gmail.com>, Neil Whelchel <koyama@firstlight.net>"
#define DRIVER_DESC "Cypress USB to Serial Driver"
/* write buffer size defines */
#define CYPRESS_BUF_SIZE 1024
-#define CYPRESS_CLOSING_WAIT (30*HZ)
static const struct usb_device_id id_table_earthmate[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
@@ -135,7 +135,7 @@ struct cypress_private {
int bytes_out; /* used for statistics */
int cmd_count; /* used for statistics */
int cmd_ctrl; /* always set this to 1 before issuing a command */
- struct cypress_buf *buf; /* write buffer */
+ struct kfifo write_fifo; /* write fifo */
int write_urb_in_use; /* write urb in use indicator */
int write_urb_interval; /* interval to use for write urb */
int read_urb_interval; /* interval to use for read urb */
@@ -157,14 +157,6 @@ struct cypress_private {
struct ktermios tmp_termios; /* stores the old termios settings */
};
-/* write buffer structure */
-struct cypress_buf {
- unsigned int buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
/* function prototypes for the Cypress USB to serial device */
static int cypress_earthmate_startup(struct usb_serial *serial);
static int cypress_hidcom_startup(struct usb_serial *serial);
@@ -190,17 +182,6 @@ static void cypress_unthrottle(struct tty_struct *tty);
static void cypress_set_dead(struct usb_serial_port *port);
static void cypress_read_int_callback(struct urb *urb);
static void cypress_write_int_callback(struct urb *urb);
-/* write buffer functions */
-static struct cypress_buf *cypress_buf_alloc(unsigned int size);
-static void cypress_buf_free(struct cypress_buf *cb);
-static void cypress_buf_clear(struct cypress_buf *cb);
-static unsigned int cypress_buf_data_avail(struct cypress_buf *cb);
-static unsigned int cypress_buf_space_avail(struct cypress_buf *cb);
-static unsigned int cypress_buf_put(struct cypress_buf *cb,
- const char *buf, unsigned int count);
-static unsigned int cypress_buf_get(struct cypress_buf *cb,
- char *buf, unsigned int count);
-
static struct usb_serial_driver cypress_earthmate_device = {
.driver = {
@@ -503,8 +484,7 @@ static int generic_startup(struct usb_serial *serial)
priv->comm_is_ok = !0;
spin_lock_init(&priv->lock);
- priv->buf = cypress_buf_alloc(CYPRESS_BUF_SIZE);
- if (priv->buf == NULL) {
+ if (kfifo_alloc(&priv->write_fifo, CYPRESS_BUF_SIZE, GFP_KERNEL)) {
kfree(priv);
return -ENOMEM;
}
@@ -627,7 +607,7 @@ static void cypress_release(struct usb_serial *serial)
priv = usb_get_serial_port_data(serial->port[0]);
if (priv) {
- cypress_buf_free(priv->buf);
+ kfifo_free(&priv->write_fifo);
kfree(priv);
}
}
@@ -704,6 +684,7 @@ static void cypress_dtr_rts(struct usb_serial_port *port, int on)
static void cypress_close(struct usb_serial_port *port)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
dbg("%s - port %d", __func__, port->number);
@@ -713,12 +694,14 @@ static void cypress_close(struct usb_serial_port *port)
mutex_unlock(&port->serial->disc_mutex);
return;
}
- cypress_buf_clear(priv->buf);
+ spin_lock_irqsave(&priv->lock, flags);
+ kfifo_reset_out(&priv->write_fifo);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
dbg("%s - stopping urbs", __func__);
usb_kill_urb(port->interrupt_in_urb);
usb_kill_urb(port->interrupt_out_urb);
-
if (stats)
dev_info(&port->dev, "Statistics: %d Bytes In | %d Bytes Out | %d Commands Issued\n",
priv->bytes_in, priv->bytes_out, priv->cmd_count);
@@ -730,7 +713,6 @@ static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
dbg("%s - port %d, %d bytes", __func__, port->number, count);
@@ -745,9 +727,7 @@ static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
if (!count)
return count;
- spin_lock_irqsave(&priv->lock, flags);
- count = cypress_buf_put(priv->buf, buf, count);
- spin_unlock_irqrestore(&priv->lock, flags);
+ count = kfifo_in_locked(&priv->write_fifo, buf, count, &priv->lock);
finish:
cypress_send(port);
@@ -807,9 +787,10 @@ static void cypress_send(struct usb_serial_port *port)
} else
spin_unlock_irqrestore(&priv->lock, flags);
- count = cypress_buf_get(priv->buf, &port->interrupt_out_buffer[offset],
- port->interrupt_out_size-offset);
-
+ count = kfifo_out_locked(&priv->write_fifo,
+ &port->interrupt_out_buffer[offset],
+ port->interrupt_out_size - offset,
+ &priv->lock);
if (count == 0)
return;
@@ -875,7 +856,7 @@ static int cypress_write_room(struct tty_struct *tty)
dbg("%s - port %d", __func__, port->number);
spin_lock_irqsave(&priv->lock, flags);
- room = cypress_buf_space_avail(priv->buf);
+ room = kfifo_avail(&priv->write_fifo);
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -1143,7 +1124,7 @@ static int cypress_chars_in_buffer(struct tty_struct *tty)
dbg("%s - port %d", __func__, port->number);
spin_lock_irqsave(&priv->lock, flags);
- chars = cypress_buf_data_avail(priv->buf);
+ chars = kfifo_len(&priv->write_fifo);
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s - returns %d", __func__, chars);
@@ -1309,7 +1290,7 @@ static void cypress_read_int_callback(struct urb *urb)
/* process read if there is data other than line status */
if (tty && bytes > i) {
tty_insert_flip_string_fixed_flag(tty, data + i,
- bytes - i, tty_flag);
+ tty_flag, bytes - i);
tty_flip_buffer_push(tty);
}
@@ -1397,193 +1378,6 @@ static void cypress_write_int_callback(struct urb *urb)
/*****************************************************************************
- * Write buffer functions - buffering code from pl2303 used
- *****************************************************************************/
-
-/*
- * cypress_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-
-static struct cypress_buf *cypress_buf_alloc(unsigned int size)
-{
-
- struct cypress_buf *cb;
-
-
- if (size == 0)
- return NULL;
-
- cb = kmalloc(sizeof(struct cypress_buf), GFP_KERNEL);
- if (cb == NULL)
- return NULL;
-
- cb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (cb->buf_buf == NULL) {
- kfree(cb);
- return NULL;
- }
-
- cb->buf_size = size;
- cb->buf_get = cb->buf_put = cb->buf_buf;
-
- return cb;
-
-}
-
-
-/*
- * cypress_buf_free
- *
- * Free the buffer and all associated memory.
- */
-
-static void cypress_buf_free(struct cypress_buf *cb)
-{
- if (cb) {
- kfree(cb->buf_buf);
- kfree(cb);
- }
-}
-
-
-/*
- * cypress_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-
-static void cypress_buf_clear(struct cypress_buf *cb)
-{
- if (cb != NULL)
- cb->buf_get = cb->buf_put;
- /* equivalent to a get of all data available */
-}
-
-
-/*
- * cypress_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-
-static unsigned int cypress_buf_data_avail(struct cypress_buf *cb)
-{
- if (cb != NULL)
- return (cb->buf_size + cb->buf_put - cb->buf_get)
- % cb->buf_size;
- else
- return 0;
-}
-
-
-/*
- * cypress_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-
-static unsigned int cypress_buf_space_avail(struct cypress_buf *cb)
-{
- if (cb != NULL)
- return (cb->buf_size + cb->buf_get - cb->buf_put - 1)
- % cb->buf_size;
- else
- return 0;
-}
-
-
-/*
- * cypress_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-
-static unsigned int cypress_buf_put(struct cypress_buf *cb, const char *buf,
- unsigned int count)
-{
-
- unsigned int len;
-
-
- if (cb == NULL)
- return 0;
-
- len = cypress_buf_space_avail(cb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = cb->buf_buf + cb->buf_size - cb->buf_put;
- if (count > len) {
- memcpy(cb->buf_put, buf, len);
- memcpy(cb->buf_buf, buf+len, count - len);
- cb->buf_put = cb->buf_buf + count - len;
- } else {
- memcpy(cb->buf_put, buf, count);
- if (count < len)
- cb->buf_put += count;
- else /* count == len */
- cb->buf_put = cb->buf_buf;
- }
-
- return count;
-
-}
-
-
-/*
- * cypress_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-
-static unsigned int cypress_buf_get(struct cypress_buf *cb, char *buf,
- unsigned int count)
-{
-
- unsigned int len;
-
-
- if (cb == NULL)
- return 0;
-
- len = cypress_buf_data_avail(cb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = cb->buf_buf + cb->buf_size - cb->buf_get;
- if (count > len) {
- memcpy(buf, cb->buf_get, len);
- memcpy(buf+len, cb->buf_buf, count - len);
- cb->buf_get = cb->buf_buf + count - len;
- } else {
- memcpy(buf, cb->buf_get, count);
- if (count < len)
- cb->buf_get += count;
- else /* count == len */
- cb->buf_get = cb->buf_buf;
- }
-
- return count;
-
-}
-
-/*****************************************************************************
* Module functions
*****************************************************************************/
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index 1fd360e..67cf608 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -1,27 +1,32 @@
#ifndef CYPRESS_M8_H
#define CYPRESS_M8_H
-/* definitions and function prototypes used for the cypress USB to Serial controller */
+/*
+ * definitions and function prototypes used for the cypress USB to Serial
+ * controller
+ */
-/* For sending our feature buffer - controlling serial communication states */
-/* Linux HID has no support for serial devices so we do this through the driver */
-#define HID_REQ_GET_REPORT 0x01
-#define HID_REQ_SET_REPORT 0x09
+/*
+ * For sending our feature buffer - controlling serial communication states.
+ * Linux HID has no support for serial devices so we do this through the driver
+ */
+#define HID_REQ_GET_REPORT 0x01
+#define HID_REQ_SET_REPORT 0x09
/* List other cypress USB to Serial devices here, and add them to the id_table */
/* DeLorme Earthmate USB - a GPS device */
-#define VENDOR_ID_DELORME 0x1163
-#define PRODUCT_ID_EARTHMATEUSB 0x0100
-#define PRODUCT_ID_EARTHMATEUSB_LT20 0x0200
+#define VENDOR_ID_DELORME 0x1163
+#define PRODUCT_ID_EARTHMATEUSB 0x0100
+#define PRODUCT_ID_EARTHMATEUSB_LT20 0x0200
/* Cypress HID->COM RS232 Adapter */
-#define VENDOR_ID_CYPRESS 0x04b4
-#define PRODUCT_ID_CYPHIDCOM 0x5500
+#define VENDOR_ID_CYPRESS 0x04b4
+#define PRODUCT_ID_CYPHIDCOM 0x5500
/* Powercom UPS, chip CY7C63723 */
-#define VENDOR_ID_POWERCOM 0x0d9f
-#define PRODUCT_ID_UPS 0x0002
+#define VENDOR_ID_POWERCOM 0x0d9f
+#define PRODUCT_ID_UPS 0x0002
/* Nokia CA-42 USB to serial cable */
#define VENDOR_ID_DAZZLE 0x07d0
@@ -29,17 +34,17 @@
/* End of device listing */
/* Used for setting / requesting serial line settings */
-#define CYPRESS_SET_CONFIG 0x01
-#define CYPRESS_GET_CONFIG 0x02
+#define CYPRESS_SET_CONFIG 0x01
+#define CYPRESS_GET_CONFIG 0x02
/* Used for throttle control */
-#define THROTTLED 0x1
-#define ACTUALLY_THROTTLED 0x2
+#define THROTTLED 0x1
+#define ACTUALLY_THROTTLED 0x2
-/* chiptypes - used in case firmware differs from the generic form ... offering
- * different baud speeds/etc.
+/*
+ * chiptypes - used in case firmware differs from the generic form ... offering
+ * different baud speeds/etc.
*/
-
#define CT_EARTHMATE 0x01
#define CT_CYPHIDCOM 0x02
#define CT_CA42V2 0x03
@@ -50,15 +55,15 @@
/* these are sent / read at byte 0 of the input/output hid reports */
/* You can find these values defined in the CY4601 USB to Serial design notes */
-#define CONTROL_DTR 0x20 /* data terminal ready - flow control - host to device */
+#define CONTROL_DTR 0x20 /* data terminal ready - flow control - host to device */
#define UART_DSR 0x20 /* data set ready - flow control - device to host */
-#define CONTROL_RTS 0x10 /* request to send - flow control - host to device */
+#define CONTROL_RTS 0x10 /* request to send - flow control - host to device */
#define UART_CTS 0x10 /* clear to send - flow control - device to host */
-#define UART_RI 0x10 /* ring indicator - modem - device to host */
+#define UART_RI 0x10 /* ring indicator - modem - device to host */
#define UART_CD 0x40 /* carrier detect - modem - device to host */
-#define CYP_ERROR 0x08 /* received from input report - device to host */
+#define CYP_ERROR 0x08 /* received from input report - device to host */
/* Note - the below has nothing to do with the "feature report" reset */
-#define CONTROL_RESET 0x08 /* sent with output report - host to device */
+#define CONTROL_RESET 0x08 /* sent with output report - host to device */
/* End of RS-232 protocol definitions */
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 68b0aa5..3edda3e 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1703,8 +1703,8 @@ static int digi_read_inb_callback(struct urb *urb)
/* data length is len-1 (one byte of len is port_status) */
--len;
if (len > 0) {
- tty_insert_flip_string_fixed_flag(tty, data, len,
- flag);
+ tty_insert_flip_string_fixed_flag(tty, data, flag,
+ len);
tty_flip_buffer_push(tty);
}
}
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 5f740a1..504b558 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -13,44 +13,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * (07/16/2001) gb
- * remove unused code in empeg_close() (thanks to Oliver Neukum for
- * pointing this out) and rewrote empeg_set_termios().
- *
- * (05/30/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * (01/22/2001) gb
- * Added write_room() and chars_in_buffer() support.
- *
- * (12/21/2000) gb
- * Moved termio stuff inside the port->active check.
- * Moved MOD_DEC_USE_COUNT to end of empeg_close().
- *
- * (12/03/2000) gb
- * Added tty->ldisc.set_termios(port, tty, NULL) to empeg_open().
- * This notifies the tty driver that the termios have changed.
- *
- * (11/13/2000) gb
- * Moved tty->low_latency = 1 from empeg_read_bulk_callback() to
- * empeg_open() (It only needs to be set once - Doh!)
- *
- * (11/11/2000) gb
- * Updated to work with id_table structure.
- *
- * (11/04/2000) gb
- * Forked this from visor.c, and hacked it up to work with an
- * Empeg ltd. empeg-car player. Constructive criticism welcomed.
- * I would like to say, 'Thank You' to Greg Kroah-Hartman for the
- * use of his code, and for his guidance, advice and patience. :)
- * A 'Thank You' is in order for John Ripley of Empeg ltd for his
- * advice, and patience too.
- *
*/
#include <linux/kernel.h>
@@ -71,7 +33,7 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.2"
+#define DRIVER_VERSION "v1.3"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Gary Brubaker <xavyer@ix.netcom.com>"
#define DRIVER_DESC "USB Empeg Mark I/II Driver"
@@ -79,19 +41,8 @@ static int debug;
#define EMPEG_PRODUCT_ID 0x0001
/* function prototypes for an empeg-car player */
-static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port);
-static void empeg_close(struct usb_serial_port *port);
-static int empeg_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf,
- int count);
-static int empeg_write_room(struct tty_struct *tty);
-static int empeg_chars_in_buffer(struct tty_struct *tty);
-static void empeg_throttle(struct tty_struct *tty);
-static void empeg_unthrottle(struct tty_struct *tty);
static int empeg_startup(struct usb_serial *serial);
static void empeg_init_termios(struct tty_struct *tty);
-static void empeg_write_bulk_callback(struct urb *urb);
-static void empeg_read_bulk_callback(struct urb *urb);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) },
@@ -105,7 +56,7 @@ static struct usb_driver empeg_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
static struct usb_serial_driver empeg_device = {
@@ -114,291 +65,16 @@ static struct usb_serial_driver empeg_device = {
.name = "empeg",
},
.id_table = id_table,
- .usb_driver = &empeg_driver,
+ .usb_driver = &empeg_driver,
.num_ports = 1,
- .open = empeg_open,
- .close = empeg_close,
- .throttle = empeg_throttle,
- .unthrottle = empeg_unthrottle,
+ .bulk_out_size = 256,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
.attach = empeg_startup,
.init_termios = empeg_init_termios,
- .write = empeg_write,
- .write_room = empeg_write_room,
- .chars_in_buffer = empeg_chars_in_buffer,
- .write_bulk_callback = empeg_write_bulk_callback,
- .read_bulk_callback = empeg_read_bulk_callback,
};
-#define NUM_URBS 16
-#define URB_TRANSFER_BUFFER_SIZE 4096
-
-static struct urb *write_urb_pool[NUM_URBS];
-static spinlock_t write_urb_pool_lock;
-static int bytes_in;
-static int bytes_out;
-
-/******************************************************************************
- * Empeg specific driver functions
- ******************************************************************************/
-static int empeg_open(struct tty_struct *tty,struct usb_serial_port *port)
-{
- struct usb_serial *serial = port->serial;
- int result = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- bytes_in = 0;
- bytes_out = 0;
-
- /* Start reading from the device */
- usb_fill_bulk_urb(
- port->read_urb,
- serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- empeg_read_bulk_callback,
- port);
-
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
-
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
-
- return result;
-}
-
-
-static void empeg_close(struct usb_serial_port *port)
-{
- dbg("%s - port %d", __func__, port->number);
-
- /* shutdown our bulk read */
- usb_kill_urb(port->read_urb);
- /* Uncomment the following line if you want to see some statistics in your syslog */
- /* dev_info (&port->dev, "Bytes In = %d Bytes Out = %d\n", bytes_in, bytes_out); */
-}
-
-
-static int empeg_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct usb_serial *serial = port->serial;
- struct urb *urb;
- const unsigned char *current_position = buf;
- unsigned long flags;
- int status;
- int i;
- int bytes_sent = 0;
- int transfer_size;
-
- dbg("%s - port %d", __func__, port->number);
-
- while (count > 0) {
- /* try to find a free urb in our list of them */
- urb = NULL;
-
- spin_lock_irqsave(&write_urb_pool_lock, flags);
-
- for (i = 0; i < NUM_URBS; ++i) {
- if (write_urb_pool[i]->status != -EINPROGRESS) {
- urb = write_urb_pool[i];
- break;
- }
- }
-
- spin_unlock_irqrestore(&write_urb_pool_lock, flags);
-
- if (urb == NULL) {
- dbg("%s - no more free urbs", __func__);
- goto exit;
- }
-
- if (urb->transfer_buffer == NULL) {
- urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC);
- if (urb->transfer_buffer == NULL) {
- dev_err(&port->dev,
- "%s no more kernel memory...\n",
- __func__);
- goto exit;
- }
- }
-
- transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
-
- memcpy(urb->transfer_buffer, current_position, transfer_size);
-
- usb_serial_debug_data(debug, &port->dev, __func__, transfer_size, urb->transfer_buffer);
-
- /* build up our urb */
- usb_fill_bulk_urb(
- urb,
- serial->dev,
- usb_sndbulkpipe(serial->dev,
- port->bulk_out_endpointAddress),
- urb->transfer_buffer,
- transfer_size,
- empeg_write_bulk_callback,
- port);
-
- /* send it down the pipe */
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed with status = %d\n", __func__, status);
- bytes_sent = status;
- break;
- }
-
- current_position += transfer_size;
- bytes_sent += transfer_size;
- count -= transfer_size;
- bytes_out += transfer_size;
-
- }
-exit:
- return bytes_sent;
-}
-
-
-static int empeg_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- unsigned long flags;
- int i;
- int room = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&write_urb_pool_lock, flags);
- /* tally up the number of bytes available */
- for (i = 0; i < NUM_URBS; ++i) {
- if (write_urb_pool[i]->status != -EINPROGRESS)
- room += URB_TRANSFER_BUFFER_SIZE;
- }
- spin_unlock_irqrestore(&write_urb_pool_lock, flags);
- dbg("%s - returns %d", __func__, room);
- return room;
-
-}
-
-
-static int empeg_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- unsigned long flags;
- int i;
- int chars = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&write_urb_pool_lock, flags);
-
- /* tally up the number of bytes waiting */
- for (i = 0; i < NUM_URBS; ++i) {
- if (write_urb_pool[i]->status == -EINPROGRESS)
- chars += URB_TRANSFER_BUFFER_SIZE;
- }
-
- spin_unlock_irqrestore(&write_urb_pool_lock, flags);
- dbg("%s - returns %d", __func__, chars);
- return chars;
-}
-
-
-static void empeg_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
- return;
- }
-
- usb_serial_port_softint(port);
-}
-
-
-static void empeg_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
- unsigned char *data = urb->transfer_buffer;
- int result;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
- tty = tty_port_tty_get(&port->port);
-
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- bytes_in += urb->actual_length;
- }
- tty_kref_put(tty);
-
- /* Continue trying to always read */
- usb_fill_bulk_urb(
- port->read_urb,
- port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- empeg_read_bulk_callback,
- port);
-
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
-
- if (result)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
-
- return;
-
-}
-
-
-static void empeg_throttle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- dbg("%s - port %d", __func__, port->number);
- usb_kill_urb(port->read_urb);
-}
-
-
-static void empeg_unthrottle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- int result;
- dbg("%s - port %d", __func__, port->number);
-
- port->read_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
-}
-
-
-static int empeg_startup(struct usb_serial *serial)
+static int empeg_startup(struct usb_serial *serial)
{
int r;
@@ -414,10 +90,8 @@ static int empeg_startup(struct usb_serial *serial)
/* continue on with initialization */
return r;
-
}
-
static void empeg_init_termios(struct tty_struct *tty)
{
struct ktermios *termios = tty->termios;
@@ -462,77 +136,28 @@ static void empeg_init_termios(struct tty_struct *tty)
tty_encode_baud_rate(tty, 115200, 115200);
}
-
static int __init empeg_init(void)
{
- struct urb *urb;
- int i, retval;
-
- /* create our write urb pool and transfer buffers */
- spin_lock_init(&write_urb_pool_lock);
- for (i = 0; i < NUM_URBS; ++i) {
- urb = usb_alloc_urb(0, GFP_KERNEL);
- write_urb_pool[i] = urb;
- if (urb == NULL) {
- printk(KERN_ERR "empeg: No more urbs???\n");
- continue;
- }
-
- urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
- GFP_KERNEL);
- if (!urb->transfer_buffer) {
- printk(KERN_ERR "empeg: %s - out of memory for urb "
- "buffers.", __func__);
- continue;
- }
- }
+ int retval;
retval = usb_serial_register(&empeg_device);
if (retval)
- goto failed_usb_serial_register;
+ return retval;
retval = usb_register(&empeg_driver);
- if (retval)
- goto failed_usb_register;
-
+ if (retval) {
+ usb_serial_deregister(&empeg_device);
+ return retval;
+ }
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
return 0;
-failed_usb_register:
- usb_serial_deregister(&empeg_device);
-failed_usb_serial_register:
- for (i = 0; i < NUM_URBS; ++i) {
- if (write_urb_pool[i]) {
- kfree(write_urb_pool[i]->transfer_buffer);
- usb_free_urb(write_urb_pool[i]);
- }
- }
- return retval;
}
-
static void __exit empeg_exit(void)
{
- int i;
- unsigned long flags;
-
usb_deregister(&empeg_driver);
usb_serial_deregister(&empeg_device);
-
- spin_lock_irqsave(&write_urb_pool_lock, flags);
-
- for (i = 0; i < NUM_URBS; ++i) {
- if (write_urb_pool[i]) {
- /* FIXME - uncomment the following usb_kill_urb call
- * when the host controllers get fixed to set urb->dev
- * = NULL after the urb is finished. Otherwise this
- * call oopses. */
- /* usb_kill_urb(write_urb_pool[i]); */
- kfree(write_urb_pool[i]->transfer_buffer);
- usb_free_urb(write_urb_pool[i]);
- }
- }
- spin_unlock_irqrestore(&write_urb_pool_lock, flags);
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1d7c4fa..050211a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1,6 +1,8 @@
/*
* USB FTDI SIO driver
*
+ * Copyright (C) 2009 - 2010
+ * Johan Hovold (jhovold@gmail.com)
* Copyright (C) 1999 - 2001
* Greg Kroah-Hartman (greg@kroah.com)
* Bill Ryder (bryder@sgi.com)
@@ -49,8 +51,8 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.5.0"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
+#define DRIVER_VERSION "v1.6.0"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
static int debug;
@@ -59,7 +61,7 @@ static __u16 product;
struct ftdi_private {
struct kref kref;
- ftdi_chip_type_t chip_type;
+ enum ftdi_chip_type chip_type;
/* type of device, either SIO or FT8U232AM */
int baud_base; /* baud base clock for divisor setting */
int custom_divisor; /* custom_divisor kludge, this is for
@@ -69,10 +71,6 @@ struct ftdi_private {
/* the last data state set - needed for doing
* a break
*/
- int write_offset; /* This is the offset in the usb data block to
- * write the serial data - it varies between
- * devices
- */
int flags; /* some ASYNC_xxxx flags are supported */
unsigned long last_dtr_rts; /* saved modem control outputs */
wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
@@ -87,9 +85,6 @@ struct ftdi_private {
be enabled */
unsigned int latency; /* latency setting in use */
- spinlock_t tx_lock; /* spinlock for transmit state */
- unsigned long tx_outstanding_bytes;
- unsigned long tx_outstanding_urbs;
unsigned short max_packet_size;
struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */
};
@@ -768,9 +763,6 @@ static const char *ftdi_chip_name[] = {
};
-/* Constants for read urb and write urb */
-#define BUFSZ 512
-
/* Used for TIOCMIWAIT */
#define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD)
#define FTDI_STATUS_B1_MASK (FTDI_RS_BI)
@@ -787,13 +779,9 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port);
static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port);
static void ftdi_close(struct usb_serial_port *port);
static void ftdi_dtr_rts(struct usb_serial_port *port, int on);
-static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static int ftdi_write_room(struct tty_struct *tty);
-static int ftdi_chars_in_buffer(struct tty_struct *tty);
-static void ftdi_write_bulk_callback(struct urb *urb);
-static void ftdi_read_bulk_callback(struct urb *urb);
-static void ftdi_process_read(struct usb_serial_port *port);
+static void ftdi_process_read_urb(struct urb *urb);
+static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size);
static void ftdi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static int ftdi_tiocmget(struct tty_struct *tty, struct file *file);
@@ -802,8 +790,6 @@ static int ftdi_tiocmset(struct tty_struct *tty, struct file *file,
static int ftdi_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
-static void ftdi_throttle(struct tty_struct *tty);
-static void ftdi_unthrottle(struct tty_struct *tty);
static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
static unsigned short int ftdi_232am_baud_to_divisor(int baud);
@@ -821,19 +807,18 @@ static struct usb_serial_driver ftdi_sio_device = {
.usb_driver = &ftdi_driver,
.id_table = id_table_combined,
.num_ports = 1,
+ .bulk_in_size = 512,
+ .bulk_out_size = 256,
.probe = ftdi_sio_probe,
.port_probe = ftdi_sio_port_probe,
.port_remove = ftdi_sio_port_remove,
.open = ftdi_open,
.close = ftdi_close,
.dtr_rts = ftdi_dtr_rts,
- .throttle = ftdi_throttle,
- .unthrottle = ftdi_unthrottle,
- .write = ftdi_write,
- .write_room = ftdi_write_room,
- .chars_in_buffer = ftdi_chars_in_buffer,
- .read_bulk_callback = ftdi_read_bulk_callback,
- .write_bulk_callback = ftdi_write_bulk_callback,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
+ .process_read_urb = ftdi_process_read_urb,
+ .prepare_write_buffer = ftdi_prepare_write_buffer,
.tiocmget = ftdi_tiocmget,
.tiocmset = ftdi_tiocmset,
.ioctl = ftdi_ioctl,
@@ -849,9 +834,6 @@ static struct usb_serial_driver ftdi_sio_device = {
#define HIGH 1
#define LOW 0
-/* number of outstanding urbs to prevent userspace DoS from happening */
-#define URB_UPPER_LIMIT 42
-
/*
* ***************************************************************************
* Utility functions
@@ -987,7 +969,7 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
static __u32 get_ftdi_divisor(struct tty_struct *tty,
struct usb_serial_port *port)
-{ /* get_ftdi_divisor */
+{
struct ftdi_private *priv = usb_get_serial_port_data(port);
__u32 div_value = 0;
int div_okay = 1;
@@ -1211,12 +1193,11 @@ static int get_serial_info(struct usb_serial_port *port,
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
return 0;
-} /* get_serial_info */
-
+}
static int set_serial_info(struct tty_struct *tty,
struct usb_serial_port *port, struct serial_struct __user *newinfo)
-{ /* set_serial_info */
+{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct serial_struct new_serial;
struct ftdi_private old_priv;
@@ -1279,8 +1260,7 @@ check_and_exit:
else
mutex_unlock(&priv->cfg_lock);
return 0;
-
-} /* set_serial_info */
+}
/* Determine type of FTDI chip based on USB config and descriptor. */
@@ -1294,7 +1274,6 @@ static void ftdi_determine_type(struct usb_serial_port *port)
/* Assume it is not the original SIO device for now. */
priv->baud_base = 48000000 / 2;
- priv->write_offset = 0;
version = le16_to_cpu(udev->descriptor.bcdDevice);
interfaces = udev->actconfig->desc.bNumInterfaces;
@@ -1336,7 +1315,6 @@ static void ftdi_determine_type(struct usb_serial_port *port)
/* Old device. Assume it's the original SIO. */
priv->chip_type = SIO;
priv->baud_base = 12000000 / 16;
- priv->write_offset = 1;
} else if (version < 0x400) {
/* Assume it's an FT8U232AM (or FT8U245AM) */
/* (It might be a BM because of the iSerialNumber bug,
@@ -1543,7 +1521,6 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
}
kref_init(&priv->kref);
- spin_lock_init(&priv->tx_lock);
mutex_init(&priv->cfg_lock);
init_waitqueue_head(&priv->delta_msr_wait);
@@ -1552,28 +1529,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
if (quirk && quirk->port_probe)
quirk->port_probe(priv);
- /* Increase the size of read buffers */
- kfree(port->bulk_in_buffer);
- port->bulk_in_buffer = kmalloc(BUFSZ, GFP_KERNEL);
- if (!port->bulk_in_buffer) {
- kfree(priv);
- return -ENOMEM;
- }
- if (port->read_urb) {
- port->read_urb->transfer_buffer = port->bulk_in_buffer;
- port->read_urb->transfer_buffer_length = BUFSZ;
- }
-
priv->port = port;
-
- /* Free port's existing write urb and transfer buffer. */
- if (port->write_urb) {
- usb_free_urb(port->write_urb);
- port->write_urb = NULL;
- }
- kfree(port->bulk_out_buffer);
- port->bulk_out_buffer = NULL;
-
usb_set_serial_port_data(port, priv);
ftdi_determine_type(port);
@@ -1594,7 +1550,7 @@ static void ftdi_USB_UIRT_setup(struct ftdi_private *priv)
priv->flags |= ASYNC_SPD_CUST;
priv->custom_divisor = 77;
priv->force_baud = 38400;
-} /* ftdi_USB_UIRT_setup */
+}
/* Setup for the HE-TIRA1 device, which requires hardwired
* baudrate (38400 gets mapped to 100000) and RTS-CTS enabled. */
@@ -1607,7 +1563,7 @@ static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv)
priv->custom_divisor = 240;
priv->force_baud = 38400;
priv->force_rtscts = 1;
-} /* ftdi_HE_TIRA1_setup */
+}
/*
* Module parameter to control latency timer for NDI FTDI-based USB devices.
@@ -1700,31 +1656,10 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
return 0;
}
-static int ftdi_submit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
-{
- struct urb *urb = port->read_urb;
- struct usb_serial *serial = port->serial;
- int result;
-
- usb_fill_bulk_urb(urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- urb->transfer_buffer,
- urb->transfer_buffer_length,
- ftdi_read_bulk_callback, port);
- result = usb_submit_urb(urb, mem_flags);
- if (result && result != -EPERM)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
- return result;
-}
-
static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
-{ /* ftdi_open */
+{
struct usb_device *dev = port->serial->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
int result;
dbg("%s", __func__);
@@ -1746,20 +1681,13 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
if (tty)
ftdi_set_termios(tty, port, tty->termios);
- /* Not throttled */
- spin_lock_irqsave(&port->lock, flags);
- port->throttled = 0;
- port->throttle_req = 0;
- spin_unlock_irqrestore(&port->lock, flags);
-
/* Start reading from the device */
- result = ftdi_submit_read_urb(port, GFP_KERNEL);
+ result = usb_serial_generic_open(tty, port);
if (!result)
kref_get(&priv->kref);
return result;
-} /* ftdi_open */
-
+}
static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
{
@@ -1789,22 +1717,16 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
* usbserial:__serial_close only calls ftdi_close if the point is open
*
* This only gets called when it is the last close
- *
- *
*/
-
static void ftdi_close(struct usb_serial_port *port)
-{ /* ftdi_close */
+{
struct ftdi_private *priv = usb_get_serial_port_data(port);
dbg("%s", __func__);
- /* shutdown our bulk read */
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
kref_put(&priv->kref, ftdi_sio_priv_release);
-} /* ftdi_close */
-
-
+}
/* The SIO requires the first byte to have:
* B0 1
@@ -1813,211 +1735,39 @@ static void ftdi_close(struct usb_serial_port *port)
*
* The new devices do not require this byte
*/
-static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{ /* ftdi_write */
- struct ftdi_private *priv = usb_get_serial_port_data(port);
- struct urb *urb;
- unsigned char *buffer;
- int data_offset ; /* will be 1 for the SIO and 0 otherwise */
- int status;
- int transfer_size;
+static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
+{
+ struct ftdi_private *priv;
+ int count;
unsigned long flags;
- dbg("%s port %d, %d bytes", __func__, port->number, count);
-
- if (count == 0) {
- dbg("write request of 0 bytes");
- return 0;
- }
- spin_lock_irqsave(&priv->tx_lock, flags);
- if (priv->tx_outstanding_urbs > URB_UPPER_LIMIT) {
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- dbg("%s - write limit hit", __func__);
- return 0;
- }
- priv->tx_outstanding_urbs++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
-
- data_offset = priv->write_offset;
- dbg("data_offset set to %d", data_offset);
-
- /* Determine total transfer size */
- transfer_size = count;
- if (data_offset > 0) {
- /* Original sio needs control bytes too... */
- transfer_size += (data_offset *
- ((count + (priv->max_packet_size - 1 - data_offset)) /
- (priv->max_packet_size - data_offset)));
- }
-
- buffer = kmalloc(transfer_size, GFP_ATOMIC);
- if (!buffer) {
- dev_err(&port->dev,
- "%s ran out of kernel memory for urb ...\n", __func__);
- count = -ENOMEM;
- goto error_no_buffer;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- dev_err(&port->dev, "%s - no more free urbs\n", __func__);
- count = -ENOMEM;
- goto error_no_urb;
- }
+ priv = usb_get_serial_port_data(port);
- /* Copy data */
- if (data_offset > 0) {
- /* Original sio requires control byte at start of
- each packet. */
- int user_pktsz = priv->max_packet_size - data_offset;
- int todo = count;
- unsigned char *first_byte = buffer;
- const unsigned char *current_position = buf;
-
- while (todo > 0) {
- if (user_pktsz > todo)
- user_pktsz = todo;
- /* Write the control byte at the front of the packet*/
- *first_byte = 1 | ((user_pktsz) << 2);
- /* Copy data for packet */
- memcpy(first_byte + data_offset,
- current_position, user_pktsz);
- first_byte += user_pktsz + data_offset;
- current_position += user_pktsz;
- todo -= user_pktsz;
+ if (priv->chip_type == SIO) {
+ unsigned char *buffer = dest;
+ int i, len, c;
+
+ count = 0;
+ spin_lock_irqsave(&port->lock, flags);
+ for (i = 0; i < size - 1; i += priv->max_packet_size) {
+ len = min_t(int, size - i, priv->max_packet_size) - 1;
+ c = kfifo_out(&port->write_fifo, &buffer[i + 1], len);
+ if (!c)
+ break;
+ buffer[i] = (c << 2) + 1;
+ count += c + 1;
}
+ spin_unlock_irqrestore(&port->lock, flags);
} else {
- /* No control byte required. */
- /* Copy in the data to send */
- memcpy(buffer, buf, count);
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- transfer_size, buffer);
-
- /* fill the buffer and send it */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- buffer, transfer_size,
- ftdi_write_bulk_callback, port);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, status);
- count = status;
- goto error;
- } else {
- spin_lock_irqsave(&priv->tx_lock, flags);
- priv->tx_outstanding_bytes += count;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ count = kfifo_out_locked(&port->write_fifo, dest, size,
+ &port->lock);
}
- /* we are done with this urb, so let the host driver
- * really free it when it is finished with it */
- usb_free_urb(urb);
-
- dbg("%s write returning: %d", __func__, count);
- return count;
-error:
- usb_free_urb(urb);
-error_no_urb:
- kfree(buffer);
-error_no_buffer:
- spin_lock_irqsave(&priv->tx_lock, flags);
- priv->tx_outstanding_urbs--;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
return count;
-} /* ftdi_write */
-
-
-/* This function may get called when the device is closed */
-
-static void ftdi_write_bulk_callback(struct urb *urb)
-{
- unsigned long flags;
- struct usb_serial_port *port = urb->context;
- struct ftdi_private *priv;
- int data_offset; /* will be 1 for the SIO and 0 otherwise */
- unsigned long countback;
- int status = urb->status;
-
- /* free up the transfer buffer, as usb_free_urb() does not do this */
- kfree(urb->transfer_buffer);
-
- dbg("%s - port %d", __func__, port->number);
-
- priv = usb_get_serial_port_data(port);
- if (!priv) {
- dbg("%s - bad port private data pointer - exiting", __func__);
- return;
- }
- /* account for transferred data */
- countback = urb->transfer_buffer_length;
- data_offset = priv->write_offset;
- if (data_offset > 0) {
- /* Subtract the control bytes */
- countback -= (data_offset * DIV_ROUND_UP(countback, priv->max_packet_size));
- }
- spin_lock_irqsave(&priv->tx_lock, flags);
- --priv->tx_outstanding_urbs;
- priv->tx_outstanding_bytes -= countback;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
-
- if (status) {
- dbg("nonzero write bulk status received: %d", status);
- }
-
- usb_serial_port_softint(port);
-} /* ftdi_write_bulk_callback */
-
-
-static int ftdi_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct ftdi_private *priv = usb_get_serial_port_data(port);
- int room;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->tx_lock, flags);
- if (priv->tx_outstanding_urbs < URB_UPPER_LIMIT) {
- /*
- * We really can take anything the user throws at us
- * but let's pick a nice big number to tell the tty
- * layer that we have lots of free space
- */
- room = 2048;
- } else {
- room = 0;
- }
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return room;
}
-static int ftdi_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct ftdi_private *priv = usb_get_serial_port_data(port);
- int buffered;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->tx_lock, flags);
- buffered = (int)priv->tx_outstanding_bytes;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- if (buffered < 0) {
- dev_err(&port->dev, "%s outstanding tx bytes is negative!\n",
- __func__);
- buffered = 0;
- }
- return buffered;
-}
+#define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE)
static int ftdi_process_packet(struct tty_struct *tty,
struct usb_serial_port *port, struct ftdi_private *priv,
@@ -2045,28 +1795,21 @@ static int ftdi_process_packet(struct tty_struct *tty,
priv->prev_status = status;
}
- /*
- * Although the device uses a bitmask and hence can have multiple
- * errors on a packet - the order here sets the priority the error is
- * returned to the tty layer.
- */
flag = TTY_NORMAL;
- if (packet[1] & FTDI_RS_OE) {
- flag = TTY_OVERRUN;
- dbg("OVERRRUN error");
- }
- if (packet[1] & FTDI_RS_BI) {
- flag = TTY_BREAK;
- dbg("BREAK received");
- usb_serial_handle_break(port);
- }
- if (packet[1] & FTDI_RS_PE) {
- flag = TTY_PARITY;
- dbg("PARITY error");
- }
- if (packet[1] & FTDI_RS_FE) {
- flag = TTY_FRAME;
- dbg("FRAMING error");
+ if (packet[1] & FTDI_RS_ERR_MASK) {
+ /* Break takes precedence over parity, which takes precedence
+ * over framing errors */
+ if (packet[1] & FTDI_RS_BI) {
+ flag = TTY_BREAK;
+ usb_serial_handle_break(port);
+ } else if (packet[1] & FTDI_RS_PE) {
+ flag = TTY_PARITY;
+ } else if (packet[1] & FTDI_RS_FE) {
+ flag = TTY_FRAME;
+ }
+ /* Overrun is special, not associated with a char */
+ if (packet[1] & FTDI_RS_OE)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
len -= 2;
@@ -2074,20 +1817,21 @@ static int ftdi_process_packet(struct tty_struct *tty,
return 0; /* status only */
ch = packet + 2;
- if (!(port->console && port->sysrq) && flag == TTY_NORMAL)
- tty_insert_flip_string(tty, ch, len);
- else {
+ if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
if (!usb_serial_handle_sysrq_char(tty, port, *ch))
tty_insert_flip_char(tty, *ch, flag);
}
+ } else {
+ tty_insert_flip_string_fixed_flag(tty, ch, flag, len);
}
+
return len;
}
-static void ftdi_process_read(struct usb_serial_port *port)
+static void ftdi_process_read_urb(struct urb *urb)
{
- struct urb *urb = port->read_urb;
+ struct usb_serial_port *port = urb->context;
struct tty_struct *tty;
struct ftdi_private *priv = usb_get_serial_port_data(port);
char *data = (char *)urb->transfer_buffer;
@@ -2109,32 +1853,6 @@ static void ftdi_process_read(struct usb_serial_port *port)
tty_kref_put(tty);
}
-static void ftdi_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, urb->status);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, urb->transfer_buffer);
- ftdi_process_read(port);
-
- spin_lock_irqsave(&port->lock, flags);
- port->throttled = port->throttle_req;
- if (!port->throttled) {
- spin_unlock_irqrestore(&port->lock, flags);
- ftdi_submit_read_urb(port, GFP_ATOMIC);
- } else
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
@@ -2165,15 +1883,13 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
}
-
/* old_termios contains the original termios settings and tty->termios contains
* the new setting to be used
* WARNING: set_termios calls this with old_termios in kernel space
*/
-
static void ftdi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
-{ /* ftdi_termios */
+{
struct usb_device *dev = port->serial->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = tty->termios;
@@ -2401,7 +2117,6 @@ static int ftdi_tiocmset(struct tty_struct *tty, struct file *file,
return update_mctrl(port, set, clear);
}
-
static int ftdi_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -2470,35 +2185,6 @@ static int ftdi_ioctl(struct tty_struct *tty, struct file *file,
return -ENOIOCTLCMD;
}
-static void ftdi_throttle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&port->lock, flags);
- port->throttle_req = 1;
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-void ftdi_unthrottle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- int was_throttled;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&port->lock, flags);
- was_throttled = port->throttled;
- port->throttled = port->throttle_req = 0;
- spin_unlock_irqrestore(&port->lock, flags);
-
- if (was_throttled)
- ftdi_submit_read_urb(port, GFP_KERNEL);
-}
-
static int __init ftdi_init(void)
{
int retval;
@@ -2529,15 +2215,12 @@ failed_sio_register:
return retval;
}
-
static void __exit ftdi_exit(void)
{
-
dbg("%s", __func__);
usb_deregister(&ftdi_driver);
usb_serial_deregister(&ftdi_sio_device);
-
}
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index ff9bf80..213fe3d 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -23,14 +23,16 @@
*/
/* Commands */
-#define FTDI_SIO_RESET 0 /* Reset the port */
-#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */
-#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */
-#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */
-#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of the port */
-#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modem status register */
-#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
-#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
+#define FTDI_SIO_RESET 0 /* Reset the port */
+#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */
+#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */
+#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */
+#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of
+ the port */
+#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modem
+ status register */
+#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
+#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
@@ -52,7 +54,7 @@
*/
/* Port Identifier Table */
-#define PIT_DEFAULT 0 /* SIOA */
+#define PIT_DEFAULT 0 /* SIOA */
#define PIT_SIOA 1 /* SIOA */
/* The device this driver is tested with one has only one port */
#define PIT_SIOB 2 /* SIOB */
@@ -103,20 +105,21 @@
* wLength: 0
* Data: None
* The BaudDivisor values are calculated as follows:
- * - BaseClock is either 12000000 or 48000000 depending on the device. FIXME: I wish
- * I knew how to detect old chips to select proper base clock!
+ * - BaseClock is either 12000000 or 48000000 depending on the device.
+ * FIXME: I wish I knew how to detect old chips to select proper base clock!
* - BaudDivisor is a fixed point number encoded in a funny way.
* (--WRONG WAY OF THINKING--)
* BaudDivisor is a fixed point number encoded with following bit weighs:
* (-2)(-1)(13..0). It is a radical with a denominator of 4, so values
* end with 0.0 (00...), 0.25 (10...), 0.5 (01...), and 0.75 (11...).
* (--THE REALITY--)
- * The both-bits-set has quite different meaning from 0.75 - the chip designers
- * have decided it to mean 0.125 instead of 0.75.
+ * The both-bits-set has quite different meaning from 0.75 - the chip
+ * designers have decided it to mean 0.125 instead of 0.75.
* This info looked up in FTDI application note "FT8U232 DEVICES \ Data Rates
* and Flow Control Consideration for USB to RS232".
* - BaudDivisor = (BaseClock / 16) / BaudRate, where the (=) operation should
- * automagically re-encode the resulting value to take fractions into consideration.
+ * automagically re-encode the resulting value to take fractions into
+ * consideration.
* As all values are integers, some bit twiddling is in order:
* BaudDivisor = (BaseClock / 16 / BaudRate) |
* (((BaseClock / 2 / BaudRate) & 4) ? 0x4000 // 0.5
@@ -146,7 +149,7 @@
* not supported by the FT8U232AM).
*/
-typedef enum {
+enum ftdi_chip_type {
SIO = 1,
FT8U232AM = 2,
FT232BM = 3,
@@ -154,37 +157,36 @@ typedef enum {
FT232RL = 5,
FT2232H = 6,
FT4232H = 7
-} ftdi_chip_type_t;
-
-typedef enum {
- ftdi_sio_b300 = 0,
- ftdi_sio_b600 = 1,
- ftdi_sio_b1200 = 2,
- ftdi_sio_b2400 = 3,
- ftdi_sio_b4800 = 4,
- ftdi_sio_b9600 = 5,
- ftdi_sio_b19200 = 6,
- ftdi_sio_b38400 = 7,
- ftdi_sio_b57600 = 8,
- ftdi_sio_b115200 = 9
-} FTDI_SIO_baudrate_t;
+};
+
+enum ftdi_sio_baudrate {
+ ftdi_sio_b300 = 0,
+ ftdi_sio_b600 = 1,
+ ftdi_sio_b1200 = 2,
+ ftdi_sio_b2400 = 3,
+ ftdi_sio_b4800 = 4,
+ ftdi_sio_b9600 = 5,
+ ftdi_sio_b19200 = 6,
+ ftdi_sio_b38400 = 7,
+ ftdi_sio_b57600 = 8,
+ ftdi_sio_b115200 = 9
+};
/*
- * The ftdi_8U232AM_xxMHz_byyy constants have been removed. The encoded divisor values
- * are calculated internally.
+ * The ftdi_8U232AM_xxMHz_byyy constants have been removed. The encoded divisor
+ * values are calculated internally.
*/
-
-#define FTDI_SIO_SET_DATA_REQUEST FTDI_SIO_SET_DATA
-#define FTDI_SIO_SET_DATA_REQUEST_TYPE 0x40
-#define FTDI_SIO_SET_DATA_PARITY_NONE (0x0 << 8)
-#define FTDI_SIO_SET_DATA_PARITY_ODD (0x1 << 8)
-#define FTDI_SIO_SET_DATA_PARITY_EVEN (0x2 << 8)
-#define FTDI_SIO_SET_DATA_PARITY_MARK (0x3 << 8)
-#define FTDI_SIO_SET_DATA_PARITY_SPACE (0x4 << 8)
-#define FTDI_SIO_SET_DATA_STOP_BITS_1 (0x0 << 11)
-#define FTDI_SIO_SET_DATA_STOP_BITS_15 (0x1 << 11)
-#define FTDI_SIO_SET_DATA_STOP_BITS_2 (0x2 << 11)
-#define FTDI_SIO_SET_BREAK (0x1 << 14)
+#define FTDI_SIO_SET_DATA_REQUEST FTDI_SIO_SET_DATA
+#define FTDI_SIO_SET_DATA_REQUEST_TYPE 0x40
+#define FTDI_SIO_SET_DATA_PARITY_NONE (0x0 << 8)
+#define FTDI_SIO_SET_DATA_PARITY_ODD (0x1 << 8)
+#define FTDI_SIO_SET_DATA_PARITY_EVEN (0x2 << 8)
+#define FTDI_SIO_SET_DATA_PARITY_MARK (0x3 << 8)
+#define FTDI_SIO_SET_DATA_PARITY_SPACE (0x4 << 8)
+#define FTDI_SIO_SET_DATA_STOP_BITS_1 (0x0 << 11)
+#define FTDI_SIO_SET_DATA_STOP_BITS_15 (0x1 << 11)
+#define FTDI_SIO_SET_DATA_STOP_BITS_2 (0x2 << 11)
+#define FTDI_SIO_SET_BREAK (0x1 << 14)
/* FTDI_SIO_SET_DATA */
/*
@@ -287,8 +289,8 @@ typedef enum {
*
* A value of zero in the hIndex field disables handshaking
*
- * If Xon/Xoff handshaking is specified, the hValue field should contain the XOFF character
- * and the lValue field contains the XON character.
+ * If Xon/Xoff handshaking is specified, the hValue field should contain the
+ * XOFF character and the lValue field contains the XON character.
*/
/*
@@ -373,7 +375,10 @@ typedef enum {
/* FTDI_SIO_SET_ERROR_CHAR */
-/* Set the parity error replacement character for the specified communications port */
+/*
+ * Set the parity error replacement character for the specified communications
+ * port
+ */
/*
* BmRequestType: 0100 0000b
@@ -496,9 +501,10 @@ typedef enum {
*
* IN Endpoint
*
- * The device reserves the first two bytes of data on this endpoint to contain the current
- * values of the modem and line status registers. In the absence of data, the device
- * generates a message consisting of these two status bytes every 40 ms
+ * The device reserves the first two bytes of data on this endpoint to contain
+ * the current values of the modem and line status registers. In the absence of
+ * data, the device generates a message consisting of these two status bytes
+ * every 40 ms
*
* Byte 0: Modem Status
*
@@ -530,21 +536,21 @@ typedef enum {
#define FTDI_RS0_RI (1 << 6)
#define FTDI_RS0_RLSD (1 << 7)
-#define FTDI_RS_DR 1
-#define FTDI_RS_OE (1<<1)
-#define FTDI_RS_PE (1<<2)
-#define FTDI_RS_FE (1<<3)
-#define FTDI_RS_BI (1<<4)
-#define FTDI_RS_THRE (1<<5)
-#define FTDI_RS_TEMT (1<<6)
-#define FTDI_RS_FIFO (1<<7)
+#define FTDI_RS_DR 1
+#define FTDI_RS_OE (1<<1)
+#define FTDI_RS_PE (1<<2)
+#define FTDI_RS_FE (1<<3)
+#define FTDI_RS_BI (1<<4)
+#define FTDI_RS_THRE (1<<5)
+#define FTDI_RS_TEMT (1<<6)
+#define FTDI_RS_FIFO (1<<7)
/*
* OUT Endpoint
*
- * This device reserves the first bytes of data on this endpoint contain the length
- * and port identifier of the message. For the FTDI USB Serial converter the port
- * identifier is always 1.
+ * This device reserves the first bytes of data on this endpoint contain the
+ * length and port identifier of the message. For the FTDI USB Serial converter
+ * the port identifier is always 1.
*
* Byte 0: Line Status
*
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 75482cb..94d86c3 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -275,8 +275,8 @@
/*
* Hameg HO820 and HO870 interface (using VID 0x0403)
*/
-#define HAMEG_HO820_PID 0xed74
-#define HAMEG_HO870_PID 0xed71
+#define HAMEG_HO820_PID 0xed74
+#define HAMEG_HO870_PID 0xed71
/*
* MaxStream devices www.maxstream.net
@@ -289,14 +289,14 @@
* and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
* Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
*/
-#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */
-#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */
-#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */
-#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */
-#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */
-#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */
-#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */
-#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */
+#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */
+#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */
+#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */
+#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */
+#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */
+#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */
+#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */
+#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */
/* Domintell products http://www.domintell.com */
#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
@@ -483,9 +483,9 @@
* Blackfin gnICE JTAG
* http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
*/
-#define ADI_VID 0x0456
-#define ADI_GNICE_PID 0xF000
-#define ADI_GNICEPLUS_PID 0xF001
+#define ADI_VID 0x0456
+#define ADI_GNICE_PID 0xF000
+#define ADI_GNICEPLUS_PID 0xF001
/*
* RATOC REX-USB60F
@@ -611,13 +611,13 @@
#define SEALEVEL_2802_7_PID 0X2872 /* SeaLINK+8/485 (2802) Port 7 */
#define SEALEVEL_2802_8_PID 0X2882 /* SeaLINK+8/485 (2802) Port 8 */
#define SEALEVEL_2803_1_PID 0X2813 /* SeaLINK+8 (2803) Port 1 */
-#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */
-#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */
-#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */
-#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */
-#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
-#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
-#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
+#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */
+#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */
+#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */
+#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */
+#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
+#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
+#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
/*
* JETI SPECTROMETER SPECBOS 1201
@@ -1013,7 +1013,7 @@
*/
#define EVOLUTION_VID 0xDEEE /* Vendor ID */
#define EVOLUTION_ER1_PID 0x0300 /* ER1 Control Module */
-#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
+#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index f804acb..a817ced 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -1,6 +1,7 @@
/*
* USB Serial Converter Generic functions
*
+ * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
@@ -12,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
@@ -117,7 +119,6 @@ void usb_serial_generic_deregister(void)
int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct usb_serial *serial = port->serial;
int result = 0;
unsigned long flags;
@@ -130,23 +131,8 @@ int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port
spin_unlock_irqrestore(&port->lock, flags);
/* if we have a bulk endpoint, start reading from it */
- if (port->bulk_in_size) {
- /* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ((serial->type->read_bulk_callback) ?
- serial->type->read_bulk_callback :
- usb_serial_generic_read_bulk_callback),
- port);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- }
+ if (port->bulk_in_size)
+ result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
return result;
}
@@ -155,13 +141,22 @@ EXPORT_SYMBOL_GPL(usb_serial_generic_open);
static void generic_cleanup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
+ unsigned long flags;
+ int i;
dbg("%s - port %d", __func__, port->number);
if (serial->dev) {
/* shutdown any bulk transfers that might be going on */
- if (port->bulk_out_size)
+ if (port->bulk_out_size) {
usb_kill_urb(port->write_urb);
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
+ usb_kill_urb(port->write_urbs[i]);
+
+ spin_lock_irqsave(&port->lock, flags);
+ kfifo_reset_out(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
if (port->bulk_in_size)
usb_kill_urb(port->read_urb);
}
@@ -172,146 +167,68 @@ void usb_serial_generic_close(struct usb_serial_port *port)
dbg("%s - port %d", __func__, port->number);
generic_cleanup(port);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_close);
-static int usb_serial_multi_urb_write(struct tty_struct *tty,
- struct usb_serial_port *port, const unsigned char *buf, int count)
+int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
{
- unsigned long flags;
- struct urb *urb;
- unsigned char *buffer;
- int status;
- int towrite;
- int bwrite = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (count == 0)
- dbg("%s - write request of 0 bytes", __func__);
-
- while (count > 0) {
- towrite = (count > port->bulk_out_size) ?
- port->bulk_out_size : count;
- spin_lock_irqsave(&port->lock, flags);
- if (port->urbs_in_flight >
- port->serial->type->max_in_flight_urbs) {
- spin_unlock_irqrestore(&port->lock, flags);
- dbg("%s - write limit hit", __func__);
- return bwrite;
- }
- port->tx_bytes_flight += towrite;
- port->urbs_in_flight++;
- spin_unlock_irqrestore(&port->lock, flags);
-
- buffer = kmalloc(towrite, GFP_ATOMIC);
- if (!buffer) {
- dev_err(&port->dev,
- "%s ran out of kernel memory for urb ...\n", __func__);
- goto error_no_buffer;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- dev_err(&port->dev, "%s - no more free urbs\n",
- __func__);
- goto error_no_urb;
- }
-
- /* Copy data */
- memcpy(buffer, buf + bwrite, towrite);
- usb_serial_debug_data(debug, &port->dev, __func__,
- towrite, buffer);
- /* fill the buffer and send it */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- buffer, towrite,
- usb_serial_generic_write_bulk_callback, port);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, status);
- goto error;
- }
-
- /* This urb is the responsibility of the host driver now */
- usb_free_urb(urb);
- dbg("%s write: %d", __func__, towrite);
- count -= towrite;
- bwrite += towrite;
- }
- return bwrite;
-
-error:
- usb_free_urb(urb);
-error_no_urb:
- kfree(buffer);
-error_no_buffer:
- spin_lock_irqsave(&port->lock, flags);
- port->urbs_in_flight--;
- port->tx_bytes_flight -= towrite;
- spin_unlock_irqrestore(&port->lock, flags);
- return bwrite;
+ return kfifo_out_locked(&port->write_fifo, dest, size, &port->lock);
}
/**
* usb_serial_generic_write_start - kick off an URB write
* @port: Pointer to the &struct usb_serial_port data
*
- * Returns the number of bytes queued on success. This will be zero if there
- * was nothing to send. Otherwise, it returns a negative errno value
+ * Returns zero on success, or a negative errno value
*/
static int usb_serial_generic_write_start(struct usb_serial_port *port)
{
- struct usb_serial *serial = port->serial;
- unsigned char *data;
- int result;
- int count;
+ struct urb *urb;
+ int count, result;
unsigned long flags;
- bool start_io;
+ int i;
- /* Atomically determine whether we can and need to start a USB
- * operation. */
+ if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags))
+ return 0;
+retry:
spin_lock_irqsave(&port->lock, flags);
- if (port->write_urb_busy)
- start_io = false;
- else {
- start_io = (kfifo_len(&port->write_fifo) != 0);
- port->write_urb_busy = start_io;
+ if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) {
+ clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
}
+ i = (int)find_first_bit(&port->write_urbs_free,
+ ARRAY_SIZE(port->write_urbs));
spin_unlock_irqrestore(&port->lock, flags);
- if (!start_io)
- return 0;
-
- data = port->write_urb->transfer_buffer;
- count = kfifo_out_locked(&port->write_fifo, data, port->bulk_out_size, &port->lock);
- usb_serial_debug_data(debug, &port->dev, __func__, count, data);
-
- /* set up our urb */
- usb_fill_bulk_urb(port->write_urb, serial->dev,
- usb_sndbulkpipe(serial->dev,
- port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, count,
- ((serial->type->write_bulk_callback) ?
- serial->type->write_bulk_callback :
- usb_serial_generic_write_bulk_callback),
- port);
-
- /* send the data out the bulk port */
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
+ urb = port->write_urbs[i];
+ count = port->serial->type->prepare_write_buffer(port,
+ urb->transfer_buffer,
+ port->bulk_out_size);
+ urb->transfer_buffer_length = count;
+ usb_serial_debug_data(debug, &port->dev, __func__, count,
+ urb->transfer_buffer);
+ result = usb_submit_urb(urb, GFP_ATOMIC);
if (result) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
+ dev_err(&port->dev, "%s - error submitting urb: %d\n",
__func__, result);
- /* don't have to grab the lock here, as we will
- retry if != 0 */
- port->write_urb_busy = 0;
- } else
- result = count;
+ clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
+ return result;
+ }
+ clear_bit(i, &port->write_urbs_free);
- return result;
+ spin_lock_irqsave(&port->lock, flags);
+ port->tx_bytes += count;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Try sending off another urb, unless in irq context (in which case
+ * there will be no free urb). */
+ if (!in_irq())
+ goto retry;
+
+ clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
+
+ return 0;
}
/**
@@ -328,7 +245,6 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port)
int usb_serial_generic_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
- struct usb_serial *serial = port->serial;
int result;
dbg("%s - port %d", __func__, port->number);
@@ -337,31 +253,23 @@ int usb_serial_generic_write(struct tty_struct *tty,
if (!port->bulk_out_size)
return -ENODEV;
- if (count == 0) {
- dbg("%s - write request of 0 bytes", __func__);
+ if (!count)
return 0;
- }
-
- if (serial->type->max_in_flight_urbs)
- return usb_serial_multi_urb_write(tty, port,
- buf, count);
count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
result = usb_serial_generic_write_start(port);
+ if (result)
+ return result;
- if (result >= 0)
- result = count;
-
- return result;
+ return count;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write);
int usb_serial_generic_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct usb_serial *serial = port->serial;
unsigned long flags;
- int room = 0;
+ int room;
dbg("%s - port %d", __func__, port->number);
@@ -369,14 +277,7 @@ int usb_serial_generic_write_room(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&port->lock, flags);
- if (serial->type->max_in_flight_urbs) {
- if (port->urbs_in_flight < serial->type->max_in_flight_urbs)
- room = port->bulk_out_size *
- (serial->type->max_in_flight_urbs -
- port->urbs_in_flight);
- } else {
- room = kfifo_avail(&port->write_fifo);
- }
+ room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -386,7 +287,6 @@ int usb_serial_generic_write_room(struct tty_struct *tty)
int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct usb_serial *serial = port->serial;
unsigned long flags;
int chars;
@@ -396,61 +296,47 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&port->lock, flags);
- if (serial->type->max_in_flight_urbs)
- chars = port->tx_bytes_flight;
- else
- chars = kfifo_len(&port->write_fifo);
+ chars = kfifo_len(&port->write_fifo) + port->tx_bytes;
spin_unlock_irqrestore(&port->lock, flags);
dbg("%s - returns %d", __func__, chars);
return chars;
}
-
-void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
- gfp_t mem_flags)
+int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
+ gfp_t mem_flags)
{
- struct urb *urb = port->read_urb;
- struct usb_serial *serial = port->serial;
int result;
- /* Continue reading from device */
- usb_fill_bulk_urb(urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- urb->transfer_buffer,
- urb->transfer_buffer_length,
- ((serial->type->read_bulk_callback) ?
- serial->type->read_bulk_callback :
- usb_serial_generic_read_bulk_callback), port);
-
- result = usb_submit_urb(urb, mem_flags);
+ result = usb_submit_urb(port->read_urb, mem_flags);
if (result && result != -EPERM) {
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
+ dev_err(&port->dev, "%s - error submitting urb: %d\n",
__func__, result);
}
+ return result;
}
-EXPORT_SYMBOL_GPL(usb_serial_generic_resubmit_read_urb);
+EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urb);
-/* Push data to tty layer and resubmit the bulk read URB */
-static void flush_and_resubmit_read_urb(struct usb_serial_port *port)
+void usb_serial_generic_process_read_urb(struct urb *urb)
{
- struct urb *urb = port->read_urb;
- struct tty_struct *tty = tty_port_tty_get(&port->port);
+ struct usb_serial_port *port = urb->context;
+ struct tty_struct *tty;
char *ch = (char *)urb->transfer_buffer;
int i;
+ if (!urb->actual_length)
+ return;
+
+ tty = tty_port_tty_get(&port->port);
if (!tty)
- goto done;
+ return;
/* The per character mucking around with sysrq path it too slow for
stuff like 3G modems, so shortcircuit it in the 99.9999999% of cases
where the USB serial is not a console anyway */
- if (!port->console || !port->sysrq)
+ if (!port->port.console || !port->sysrq)
tty_insert_flip_string(tty, ch, urb->actual_length);
else {
- /* Push data to tty */
for (i = 0; i < urb->actual_length; i++, ch++) {
if (!usb_serial_handle_sysrq_char(tty, port, *ch))
tty_insert_flip_char(tty, *ch, TTY_NORMAL);
@@ -458,9 +344,8 @@ static void flush_and_resubmit_read_urb(struct usb_serial_port *port)
}
tty_flip_buffer_push(tty);
tty_kref_put(tty);
-done:
- usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_process_read_urb);
void usb_serial_generic_read_bulk_callback(struct urb *urb)
{
@@ -479,13 +364,14 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
usb_serial_debug_data(debug, &port->dev, __func__,
urb->actual_length, data);
+ port->serial->type->process_read_urb(urb);
/* Throttle the device if requested by tty */
spin_lock_irqsave(&port->lock, flags);
port->throttled = port->throttle_req;
if (!port->throttled) {
spin_unlock_irqrestore(&port->lock, flags);
- flush_and_resubmit_read_urb(port);
+ usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
} else
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -496,30 +382,29 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
unsigned long flags;
struct usb_serial_port *port = urb->context;
int status = urb->status;
+ int i;
dbg("%s - port %d", __func__, port->number);
- if (port->serial->type->max_in_flight_urbs) {
- kfree(urb->transfer_buffer);
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
+ if (port->write_urbs[i] == urb)
+ break;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->tx_bytes -= urb->transfer_buffer_length;
+ set_bit(i, &port->write_urbs_free);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (status) {
+ dbg("%s - non-zero urb status: %d", __func__, status);
spin_lock_irqsave(&port->lock, flags);
- --port->urbs_in_flight;
- port->tx_bytes_flight -= urb->transfer_buffer_length;
- if (port->urbs_in_flight < 0)
- port->urbs_in_flight = 0;
+ kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
} else {
- port->write_urb_busy = 0;
-
- if (status)
- kfifo_reset_out(&port->write_fifo);
- else
- usb_serial_generic_write_start(port);
+ usb_serial_generic_write_start(port);
}
- if (status)
- dbg("%s - non-zero urb status: %d", __func__, status);
-
usb_serial_port_softint(port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback);
@@ -537,31 +422,31 @@ void usb_serial_generic_throttle(struct tty_struct *tty)
port->throttle_req = 1;
spin_unlock_irqrestore(&port->lock, flags);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_throttle);
void usb_serial_generic_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int was_throttled;
- unsigned long flags;
dbg("%s - port %d", __func__, port->number);
/* Clear the throttle flags */
- spin_lock_irqsave(&port->lock, flags);
+ spin_lock_irq(&port->lock);
was_throttled = port->throttled;
port->throttled = port->throttle_req = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock_irq(&port->lock);
- if (was_throttled) {
- /* Resume reading from device */
- flush_and_resubmit_read_urb(port);
- }
+ if (was_throttled)
+ usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
+#ifdef CONFIG_MAGIC_SYSRQ
int usb_serial_handle_sysrq_char(struct tty_struct *tty,
struct usb_serial_port *port, unsigned int ch)
{
- if (port->sysrq && port->console) {
+ if (port->sysrq && port->port.console) {
if (ch && time_before(jiffies, port->sysrq)) {
handle_sysrq(ch, tty);
port->sysrq = 0;
@@ -571,6 +456,13 @@ int usb_serial_handle_sysrq_char(struct tty_struct *tty,
}
return 0;
}
+#else
+int usb_serial_handle_sysrq_char(struct tty_struct *tty,
+ struct usb_serial_port *port, unsigned int ch)
+{
+ return 0;
+}
+#endif
EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char);
int usb_serial_handle_break(struct usb_serial_port *port)
@@ -600,7 +492,7 @@ int usb_serial_generic_resume(struct usb_serial *serial)
c++;
}
- if (port->write_urb) {
+ if (port->bulk_out_size) {
r = usb_serial_generic_write_start(port);
if (r < 0)
c++;
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 3ef8df0..76e6fb3 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -3020,7 +3020,7 @@ static int edge_startup(struct usb_serial *serial)
/* set up our port private structures */
for (i = 0; i < serial->num_ports; ++i) {
- edge_port = kmalloc(sizeof(struct edgeport_port), GFP_KERNEL);
+ edge_port = kzalloc(sizeof(struct edgeport_port), GFP_KERNEL);
if (edge_port == NULL) {
dev_err(&serial->dev->dev, "%s - Out of memory\n",
__func__);
@@ -3033,7 +3033,6 @@ static int edge_startup(struct usb_serial *serial)
kfree(edge_serial);
return -ENOMEM;
}
- memset(edge_port, 0, sizeof(struct edgeport_port));
spin_lock_init(&edge_port->ep_lock);
edge_port->port = serial->port[i];
usb_set_serial_port_data(serial->port[i], edge_port);
diff --git a/drivers/usb/serial/io_edgeport.h b/drivers/usb/serial/io_edgeport.h
index cb201c1..dced7ec 100644
--- a/drivers/usb/serial/io_edgeport.h
+++ b/drivers/usb/serial/io_edgeport.h
@@ -34,15 +34,15 @@
-/* The following table is used to map the USBx port number to
+/* The following table is used to map the USBx port number to
* the device serial number (or physical USB path), */
#define MAX_EDGEPORTS 64
struct comMapper {
char SerialNumber[MAX_SERIALNUMBER_LEN+1]; /* Serial number/usb path */
- int numPorts; /* Number of ports */
- int Original[MAX_RS232_PORTS]; /* Port numbers set by IOCTL */
- int Port[MAX_RS232_PORTS]; /* Actual used port numbers */
+ int numPorts; /* Number of ports */
+ int Original[MAX_RS232_PORTS]; /* Port numbers set by IOCTL */
+ int Port[MAX_RS232_PORTS]; /* Actual used port numbers */
};
@@ -51,7 +51,7 @@ struct comMapper {
/* /proc/edgeport Interface
* This interface uses read/write/lseek interface to talk to the edgeport driver
* the following read functions are supported: */
-#define PROC_GET_MAPPING_TO_PATH 1
+#define PROC_GET_MAPPING_TO_PATH 1
#define PROC_GET_COM_ENTRY 2
#define PROC_GET_EDGE_MANUF_DESCRIPTOR 3
#define PROC_GET_BOOT_DESCRIPTOR 4
@@ -64,7 +64,7 @@ struct comMapper {
/* the following write functions are supported: */
-#define PROC_SET_COM_MAPPING 1
+#define PROC_SET_COM_MAPPING 1
#define PROC_SET_COM_ENTRY 2
@@ -97,8 +97,8 @@ struct edgeport_product_info {
__u8 BoardRev; /* PCB revision level (chg only if s/w visible) */
__u8 BootMajorVersion; /* Boot Firmware version: xx. */
- __u8 BootMinorVersion; /* yy. */
- __le16 BootBuildNumber; /* zzzz (LE format) */
+ __u8 BootMinorVersion; /* yy. */
+ __le16 BootBuildNumber; /* zzzz (LE format) */
__u8 FirmwareMajorVersion; /* Operational Firmware version:xx. */
__u8 FirmwareMinorVersion; /* yy. */
diff --git a/drivers/usb/serial/io_ionsp.h b/drivers/usb/serial/io_ionsp.h
index 092e03d..5cc591b 100644
--- a/drivers/usb/serial/io_ionsp.h
+++ b/drivers/usb/serial/io_ionsp.h
@@ -89,10 +89,10 @@ All 16-bit fields are sent in little-endian (Intel) format.
//
struct int_status_pkt {
- __u16 RxBytesAvail; // Additional bytes available to
- // be read from Bulk IN pipe
- __u16 TxCredits[ MAX_RS232_PORTS ]; // Additional space available in
- // given port's TxBuffer
+ __u16 RxBytesAvail; // Additional bytes available to
+ // be read from Bulk IN pipe
+ __u16 TxCredits[MAX_RS232_PORTS]; // Additional space available in
+ // given port's TxBuffer
};
@@ -115,24 +115,24 @@ struct int_status_pkt {
#define IOSP_CMD_STAT_BIT 0x80 // If set, this is command/status header
#define IS_CMD_STAT_HDR(Byte1) ((Byte1) & IOSP_CMD_STAT_BIT)
-#define IS_DATA_HDR(Byte1) (! IS_CMD_STAT_HDR(Byte1))
+#define IS_DATA_HDR(Byte1) (!IS_CMD_STAT_HDR(Byte1))
#define IOSP_GET_HDR_PORT(Byte1) ((__u8) ((Byte1) & IOSP_PORT_MASK))
-#define IOSP_GET_HDR_DATA_LEN(Byte1, Byte2) ((__u16) ( ((__u16)((Byte1) & 0x78)) << 5) | (Byte2))
+#define IOSP_GET_HDR_DATA_LEN(Byte1, Byte2) ((__u16) (((__u16)((Byte1) & 0x78)) << 5) | (Byte2))
#define IOSP_GET_STATUS_CODE(Byte1) ((__u8) (((Byte1) & 0x78) >> 3))
//
// These macros build the 1st and 2nd bytes for a data header
//
-#define IOSP_BUILD_DATA_HDR1(Port, Len) ((__u8) (((Port) | ((__u8) (((__u16) (Len)) >> 5) & 0x78 ))))
+#define IOSP_BUILD_DATA_HDR1(Port, Len) ((__u8) (((Port) | ((__u8) (((__u16) (Len)) >> 5) & 0x78))))
#define IOSP_BUILD_DATA_HDR2(Port, Len) ((__u8) (Len))
//
// These macros build the 1st and 2nd bytes for a command header
//
-#define IOSP_BUILD_CMD_HDR1(Port, Cmd) ((__u8) ( IOSP_CMD_STAT_BIT | (Port) | ((__u8) ((Cmd) << 3)) ))
+#define IOSP_BUILD_CMD_HDR1(Port, Cmd) ((__u8) (IOSP_CMD_STAT_BIT | (Port) | ((__u8) ((Cmd) << 3))))
//--------------------------------------------------------------
@@ -194,24 +194,25 @@ struct int_status_pkt {
// Define macros to simplify building of IOSP cmds
//
-#define MAKE_CMD_WRITE_REG(ppBuf, pLen, Port, Reg, Val) \
- do { \
- (*(ppBuf))[0] = IOSP_BUILD_CMD_HDR1( (Port), IOSP_WRITE_UART_REG(Reg) ); \
- (*(ppBuf))[1] = (Val); \
- \
- *ppBuf += 2; \
- *pLen += 2; \
- } while (0)
+#define MAKE_CMD_WRITE_REG(ppBuf, pLen, Port, Reg, Val) \
+do { \
+ (*(ppBuf))[0] = IOSP_BUILD_CMD_HDR1((Port), \
+ IOSP_WRITE_UART_REG(Reg)); \
+ (*(ppBuf))[1] = (Val); \
+ \
+ *ppBuf += 2; \
+ *pLen += 2; \
+} while (0)
-#define MAKE_CMD_EXT_CMD(ppBuf, pLen, Port, ExtCmd, Param) \
- do { \
- (*(ppBuf))[0] = IOSP_BUILD_CMD_HDR1( (Port), IOSP_EXT_CMD ); \
- (*(ppBuf))[1] = (ExtCmd); \
- (*(ppBuf))[2] = (Param); \
- \
- *ppBuf += 3; \
- *pLen += 3; \
- } while (0)
+#define MAKE_CMD_EXT_CMD(ppBuf, pLen, Port, ExtCmd, Param) \
+do { \
+ (*(ppBuf))[0] = IOSP_BUILD_CMD_HDR1((Port), IOSP_EXT_CMD); \
+ (*(ppBuf))[1] = (ExtCmd); \
+ (*(ppBuf))[2] = (Param); \
+ \
+ *ppBuf += 3; \
+ *pLen += 3; \
+} while (0)
@@ -310,16 +311,16 @@ struct int_status_pkt {
//
// IOSP_CMD_RX_CHECK_REQ
//
-// This command is used to assist in the implementation of the
-// IOCTL_SERIAL_PURGE Windows IOCTL.
-// This IOSP command tries to place a marker at the end of the RX
-// queue in the Edgeport. If the Edgeport RX queue is full then
-// the Check will be discarded.
-// It is up to the device driver to timeout waiting for the
-// RX_CHECK_RSP. If a RX_CHECK_RSP is received, the driver is
-// sure that all data has been received from the edgeport and
+// This command is used to assist in the implementation of the
+// IOCTL_SERIAL_PURGE Windows IOCTL.
+// This IOSP command tries to place a marker at the end of the RX
+// queue in the Edgeport. If the Edgeport RX queue is full then
+// the Check will be discarded.
+// It is up to the device driver to timeout waiting for the
+// RX_CHECK_RSP. If a RX_CHECK_RSP is received, the driver is
+// sure that all data has been received from the edgeport and
// may now purge any internal RX buffers.
-// Note tat the sequence numbers may be used to detect lost
+// Note tat the sequence numbers may be used to detect lost
// CHECK_REQs.
// Example for Port 0
@@ -341,7 +342,7 @@ struct int_status_pkt {
//
// 1ssssPPP P1P1P1P1 [ P2P2P2P2P2 ]...
//
-// ssss: 00-07 2-byte status. ssss identifies which UART register
+// ssss: 00-07 2-byte status. ssss identifies which UART register
// has changed value, and the new value is in P1.
// Note that the ssss values do not correspond to the
// 16554 register numbers given in 16554.H. Instead,
@@ -383,14 +384,14 @@ struct int_status_pkt {
// returns this in order to report
// changes in modem status lines
// (CTS, DSR, RI, CD)
-//
+//
// 0x02 // Available for future expansion
-// 0x03 //
-// 0x04 //
-// 0x05 //
-// 0x06 //
-// 0x07 //
+// 0x03 //
+// 0x04 //
+// 0x05 //
+// 0x06 //
+// 0x07 //
/****************************************************
@@ -400,7 +401,7 @@ struct int_status_pkt {
#define IOSP_STATUS_LSR_DATA 0x08 // P1 is new value of LSR register (same as STATUS_LSR)
// P2 is errored character read from
-// RxFIFO after LSR reported an error.
+// RxFIFO after LSR reported an error.
#define IOSP_EXT_STATUS 0x09 // P1 is status/response code, param in P2.
@@ -408,7 +409,7 @@ struct int_status_pkt {
// Response Codes (P1 values) for 3-byte status messages
#define IOSP_EXT_STATUS_CHASE_RSP 0 // Reply to CHASE_PORT cmd. P2 is outcome:
-#define IOSP_EXT_STATUS_CHASE_PASS 0 // P2 = 0: All Tx data drained successfully
+#define IOSP_EXT_STATUS_CHASE_PASS 0 // P2 = 0: All Tx data drained successfully
#define IOSP_EXT_STATUS_CHASE_FAIL 1 // P2 = 1: Timed out (stuck due to flow
// control from remote device).
@@ -446,9 +447,9 @@ struct int_status_pkt {
// Macros to parse status messages
//
-#define IOSP_GET_STATUS_LEN(code) ( (code) < 8 ? 2 : ((code) < 0x0A ? 3 : 4) )
+#define IOSP_GET_STATUS_LEN(code) ((code) < 8 ? 2 : ((code) < 0x0A ? 3 : 4))
-#define IOSP_STATUS_IS_2BYTE(code) ( (code) < 0x08 )
-#define IOSP_STATUS_IS_3BYTE(code) ( ((code) >= 0x08) && ((code) <= 0x0B) )
-#define IOSP_STATUS_IS_4BYTE(code) ( ((code) >= 0x0C) && ((code) <= 0x0D) )
+#define IOSP_STATUS_IS_2BYTE(code) ((code) < 0x08)
+#define IOSP_STATUS_IS_3BYTE(code) (((code) >= 0x08) && ((code) <= 0x0B))
+#define IOSP_STATUS_IS_4BYTE(code) (((code) >= 0x0C) && ((code) <= 0x0D))
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index aa876f7..0fca265 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -36,6 +36,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/serial.h>
+#include <linux/kfifo.h>
#include <linux/ioctl.h>
#include <linux/firmware.h>
#include <linux/uaccess.h>
@@ -56,10 +57,6 @@
#define EPROM_PAGE_SIZE 64
-struct edgeport_uart_buf_desc {
- __u32 count; /* Number of bytes currently in buffer */
-};
-
/* different hardware types */
#define HARDWARE_TYPE_930 0
#define HARDWARE_TYPE_TIUMP 1
@@ -87,14 +84,6 @@ struct product_info {
__u8 hardware_type; /* Type of hardware */
} __attribute__((packed));
-/* circular buffer */
-struct edge_buf {
- unsigned int buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
struct edgeport_port {
__u16 uart_base;
__u16 dma_address;
@@ -108,7 +97,6 @@ struct edgeport_port {
int baud_rate;
int close_pending;
int lsr_event;
- struct edgeport_uart_buf_desc tx;
struct async_icount icount;
wait_queue_head_t delta_msr_wait; /* for handling sleeping while
waiting for msr change to
@@ -119,7 +107,7 @@ struct edgeport_port {
spinlock_t ep_lock;
int ep_read_urb_state;
int ep_write_urb_in_use;
- struct edge_buf *ep_out_buf;
+ struct kfifo write_fifo;
};
struct edgeport_serial {
@@ -249,17 +237,6 @@ static void edge_send(struct tty_struct *tty);
static int edge_create_sysfs_attrs(struct usb_serial_port *port);
static int edge_remove_sysfs_attrs(struct usb_serial_port *port);
-/* circular buffer */
-static struct edge_buf *edge_buf_alloc(unsigned int size);
-static void edge_buf_free(struct edge_buf *eb);
-static void edge_buf_clear(struct edge_buf *eb);
-static unsigned int edge_buf_data_avail(struct edge_buf *eb);
-static unsigned int edge_buf_space_avail(struct edge_buf *eb);
-static unsigned int edge_buf_put(struct edge_buf *eb, const char *buf,
- unsigned int count);
-static unsigned int edge_buf_get(struct edge_buf *eb, char *buf,
- unsigned int count);
-
static int ti_vread_sync(struct usb_device *dev, __u8 request,
__u16 value, __u16 index, u8 *data, int size)
@@ -590,7 +567,7 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
add_wait_queue(&tty->write_wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (edge_buf_data_avail(port->ep_out_buf) == 0
+ if (kfifo_len(&port->write_fifo) == 0
|| timeout == 0 || signal_pending(current)
|| !usb_get_intfdata(port->port->serial->interface))
/* disconnect */
@@ -602,7 +579,7 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
if (flush)
- edge_buf_clear(port->ep_out_buf);
+ kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->ep_lock, flags);
tty_kref_put(tty);
@@ -2089,7 +2066,6 @@ static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- unsigned long flags;
dbg("%s - port %d", __func__, port->number);
@@ -2103,10 +2079,8 @@ static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
if (edge_port->close_pending == 1)
return -ENODEV;
- spin_lock_irqsave(&edge_port->ep_lock, flags);
- count = edge_buf_put(edge_port->ep_out_buf, data, count);
- spin_unlock_irqrestore(&edge_port->ep_lock, flags);
-
+ count = kfifo_in_locked(&edge_port->write_fifo, data, count,
+ &edge_port->ep_lock);
edge_send(tty);
return count;
@@ -2129,7 +2103,7 @@ static void edge_send(struct tty_struct *tty)
return;
}
- count = edge_buf_get(edge_port->ep_out_buf,
+ count = kfifo_out(&edge_port->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
@@ -2185,7 +2159,7 @@ static int edge_write_room(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
- room = edge_buf_space_avail(edge_port->ep_out_buf);
+ room = kfifo_avail(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -2207,7 +2181,7 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
- chars = edge_buf_data_avail(edge_port->ep_out_buf);
+ chars = kfifo_len(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dbg("%s - returns %d", __func__, chars);
@@ -2664,8 +2638,8 @@ static int edge_startup(struct usb_serial *serial)
goto cleanup;
}
spin_lock_init(&edge_port->ep_lock);
- edge_port->ep_out_buf = edge_buf_alloc(EDGE_OUT_BUF_SIZE);
- if (edge_port->ep_out_buf == NULL) {
+ if (kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE,
+ GFP_KERNEL)) {
dev_err(&serial->dev->dev, "%s - Out of memory\n",
__func__);
kfree(edge_port);
@@ -2682,7 +2656,7 @@ static int edge_startup(struct usb_serial *serial)
cleanup:
for (--i; i >= 0; --i) {
edge_port = usb_get_serial_port_data(serial->port[i]);
- edge_buf_free(edge_port->ep_out_buf);
+ kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
usb_set_serial_port_data(serial->port[i], NULL);
}
@@ -2713,7 +2687,7 @@ static void edge_release(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
edge_port = usb_get_serial_port_data(serial->port[i]);
- edge_buf_free(edge_port->ep_out_buf);
+ kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
}
kfree(usb_get_serial_data(serial));
@@ -2763,182 +2737,6 @@ static int edge_remove_sysfs_attrs(struct usb_serial_port *port)
}
-/* Circular Buffer */
-
-/*
- * edge_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-
-static struct edge_buf *edge_buf_alloc(unsigned int size)
-{
- struct edge_buf *eb;
-
-
- if (size == 0)
- return NULL;
-
- eb = kmalloc(sizeof(struct edge_buf), GFP_KERNEL);
- if (eb == NULL)
- return NULL;
-
- eb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (eb->buf_buf == NULL) {
- kfree(eb);
- return NULL;
- }
-
- eb->buf_size = size;
- eb->buf_get = eb->buf_put = eb->buf_buf;
-
- return eb;
-}
-
-
-/*
- * edge_buf_free
- *
- * Free the buffer and all associated memory.
- */
-
-static void edge_buf_free(struct edge_buf *eb)
-{
- if (eb) {
- kfree(eb->buf_buf);
- kfree(eb);
- }
-}
-
-
-/*
- * edge_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-
-static void edge_buf_clear(struct edge_buf *eb)
-{
- if (eb != NULL)
- eb->buf_get = eb->buf_put;
- /* equivalent to a get of all data available */
-}
-
-
-/*
- * edge_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-
-static unsigned int edge_buf_data_avail(struct edge_buf *eb)
-{
- if (eb == NULL)
- return 0;
- return ((eb->buf_size + eb->buf_put - eb->buf_get) % eb->buf_size);
-}
-
-
-/*
- * edge_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-
-static unsigned int edge_buf_space_avail(struct edge_buf *eb)
-{
- if (eb == NULL)
- return 0;
- return ((eb->buf_size + eb->buf_get - eb->buf_put - 1) % eb->buf_size);
-}
-
-
-/*
- * edge_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-
-static unsigned int edge_buf_put(struct edge_buf *eb, const char *buf,
- unsigned int count)
-{
- unsigned int len;
-
-
- if (eb == NULL)
- return 0;
-
- len = edge_buf_space_avail(eb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = eb->buf_buf + eb->buf_size - eb->buf_put;
- if (count > len) {
- memcpy(eb->buf_put, buf, len);
- memcpy(eb->buf_buf, buf+len, count - len);
- eb->buf_put = eb->buf_buf + count - len;
- } else {
- memcpy(eb->buf_put, buf, count);
- if (count < len)
- eb->buf_put += count;
- else /* count == len */
- eb->buf_put = eb->buf_buf;
- }
-
- return count;
-}
-
-
-/*
- * edge_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-
-static unsigned int edge_buf_get(struct edge_buf *eb, char *buf,
- unsigned int count)
-{
- unsigned int len;
-
-
- if (eb == NULL)
- return 0;
-
- len = edge_buf_data_avail(eb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = eb->buf_buf + eb->buf_size - eb->buf_get;
- if (count > len) {
- memcpy(buf, eb->buf_get, len);
- memcpy(buf+len, eb->buf_buf, count - len);
- eb->buf_get = eb->buf_buf + count - len;
- } else {
- memcpy(buf, eb->buf_get, count);
- if (count < len)
- eb->buf_get += count;
- else /* count == len */
- eb->buf_get = eb->buf_buf;
- }
-
- return count;
-}
-
-
static struct usb_serial_driver edgeport_1port_device = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index cab84f2..1bd67b2 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -1,4 +1,4 @@
-/*****************************************************************************
+/*****************************************************************************
*
* Copyright (C) 1997-2002 Inside Out Networks, Inc.
*
@@ -22,10 +22,10 @@
#define DTK_ADDR_SPACE_I2C_TYPE_II 0x82 /* Addr is placed in I2C area */
#define DTK_ADDR_SPACE_I2C_TYPE_III 0x83 /* Addr is placed in I2C area */
-// UART Defines
-#define UMPMEM_BASE_UART1 0xFFA0 /* UMP UART1 base address */
-#define UMPMEM_BASE_UART2 0xFFB0 /* UMP UART2 base address */
-#define UMPMEM_OFFS_UART_LSR 0x05 /* UMP UART LSR register offset */
+/* UART Defines */
+#define UMPMEM_BASE_UART1 0xFFA0 /* UMP UART1 base address */
+#define UMPMEM_BASE_UART2 0xFFB0 /* UMP UART2 base address */
+#define UMPMEM_OFFS_UART_LSR 0x05 /* UMP UART LSR register offset */
/* Bits per character */
#define UMP_UART_CHAR5BITS 0x00
@@ -54,7 +54,7 @@
#define UMP_UART_LSR_RX_MASK 0x10
#define UMP_UART_LSR_TX_MASK 0x20
-#define UMP_UART_LSR_DATA_MASK ( LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK )
+#define UMP_UART_LSR_DATA_MASK (LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK)
/* Port Settings Constants) */
#define UMP_MASK_UART_FLAGS_RTS_FLOW 0x0001
@@ -79,50 +79,57 @@
#define UMP_PORT_DIR_OUT 0x01
#define UMP_PORT_DIR_IN 0x02
-// Address of Port 0
-#define UMPM_UART1_PORT 0x03
-
-// Commands
-#define UMPC_SET_CONFIG 0x05
-#define UMPC_OPEN_PORT 0x06
-#define UMPC_CLOSE_PORT 0x07
-#define UMPC_START_PORT 0x08
-#define UMPC_STOP_PORT 0x09
-#define UMPC_TEST_PORT 0x0A
-#define UMPC_PURGE_PORT 0x0B
-
-#define UMPC_COMPLETE_READ 0x80 // Force the Firmware to complete the current Read
-#define UMPC_HARDWARE_RESET 0x81 // Force UMP back into BOOT Mode
-#define UMPC_COPY_DNLD_TO_I2C 0x82 // Copy current download image to type 0xf2 record in 16k I2C
- // firmware will change 0xff record to type 2 record when complete
+/* Address of Port 0 */
+#define UMPM_UART1_PORT 0x03
+
+/* Commands */
+#define UMPC_SET_CONFIG 0x05
+#define UMPC_OPEN_PORT 0x06
+#define UMPC_CLOSE_PORT 0x07
+#define UMPC_START_PORT 0x08
+#define UMPC_STOP_PORT 0x09
+#define UMPC_TEST_PORT 0x0A
+#define UMPC_PURGE_PORT 0x0B
+
+/* Force the Firmware to complete the current Read */
+#define UMPC_COMPLETE_READ 0x80
+/* Force UMP back into BOOT Mode */
+#define UMPC_HARDWARE_RESET 0x81
+/*
+ * Copy current download image to type 0xf2 record in 16k I2C
+ * firmware will change 0xff record to type 2 record when complete
+ */
+#define UMPC_COPY_DNLD_TO_I2C 0x82
- // Special function register commands
- // wIndex is register address
- // wValue is MSB/LSB mask/data
-#define UMPC_WRITE_SFR 0x83 // Write SFR Register
+/*
+ * Special function register commands
+ * wIndex is register address
+ * wValue is MSB/LSB mask/data
+ */
+#define UMPC_WRITE_SFR 0x83 /* Write SFR Register */
- // wIndex is register address
-#define UMPC_READ_SFR 0x84 // Read SRF Register
+/* wIndex is register address */
+#define UMPC_READ_SFR 0x84 /* Read SRF Register */
- // Set or Clear DTR (wValue bit 0 Set/Clear) wIndex ModuleID (port)
+/* Set or Clear DTR (wValue bit 0 Set/Clear) wIndex ModuleID (port) */
#define UMPC_SET_CLR_DTR 0x85
- // Set or Clear RTS (wValue bit 0 Set/Clear) wIndex ModuleID (port)
+/* Set or Clear RTS (wValue bit 0 Set/Clear) wIndex ModuleID (port) */
#define UMPC_SET_CLR_RTS 0x86
- // Set or Clear LOOPBACK (wValue bit 0 Set/Clear) wIndex ModuleID (port)
+/* Set or Clear LOOPBACK (wValue bit 0 Set/Clear) wIndex ModuleID (port) */
#define UMPC_SET_CLR_LOOPBACK 0x87
- // Set or Clear BREAK (wValue bit 0 Set/Clear) wIndex ModuleID (port)
+/* Set or Clear BREAK (wValue bit 0 Set/Clear) wIndex ModuleID (port) */
#define UMPC_SET_CLR_BREAK 0x88
- // Read MSR wIndex ModuleID (port)
+/* Read MSR wIndex ModuleID (port) */
#define UMPC_READ_MSR 0x89
- /* Toolkit commands */
- /* Read-write group */
-#define UMPC_MEMORY_READ 0x92
-#define UMPC_MEMORY_WRITE 0x93
+/* Toolkit commands */
+/* Read-write group */
+#define UMPC_MEMORY_READ 0x92
+#define UMPC_MEMORY_WRITE 0x93
/*
* UMP DMA Definitions
@@ -130,8 +137,7 @@
#define UMPD_OEDB1_ADDRESS 0xFF08
#define UMPD_OEDB2_ADDRESS 0xFF10
-struct out_endpoint_desc_block
-{
+struct out_endpoint_desc_block {
__u8 Configuration;
__u8 XBufAddr;
__u8 XByteCount;
@@ -147,8 +153,8 @@ struct out_endpoint_desc_block
* TYPE DEFINITIONS
* Structures for Firmware commands
*/
-struct ump_uart_config /* UART settings */
-{
+/* UART settings */
+struct ump_uart_config {
__u16 wBaudRate; /* Baud rate */
__u16 wFlags; /* Bitmap mask of flags */
__u8 bDataBits; /* 5..8 - data bits per character */
@@ -165,8 +171,8 @@ struct ump_uart_config /* UART settings */
* TYPE DEFINITIONS
* Structures for USB interrupts
*/
-struct ump_interrupt /* Interrupt packet structure */
-{
+/* Interrupt packet structure */
+struct ump_interrupt {
__u8 bICode; /* Interrupt code (interrupt num) */
__u8 bIInfo; /* Interrupt information */
} __attribute__((packed));
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 8e1a491..51f83fb 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -26,7 +26,7 @@
//
// Definitions of USB product IDs
-//
+//
#define USB_VENDOR_ID_ION 0x1608 // Our VID
#define USB_VENDOR_ID_TI 0x0451 // TI VID
@@ -54,7 +54,7 @@
// Product IDs - assigned to match middle digit of serial number (No longer true)
#define ION_DEVICE_ID_80251_NETCHIP 0x020 // This bit is set in the PID if this edgeport hardware$
- // is based on the 80251+Netchip.
+ // is based on the 80251+Netchip.
#define ION_DEVICE_ID_GENERATION_1 0x00 // Value for 930 based edgeports
#define ION_DEVICE_ID_GENERATION_2 0x01 // Value for 80251+Netchip.
@@ -134,7 +134,7 @@
#define ION_DEVICE_ID_TI_EDGEPORT_416 0x0212 // Edgeport/416
#define ION_DEVICE_ID_TI_EDGEPORT_1 0x0215 // Edgeport/1 RS232
#define ION_DEVICE_ID_TI_EDGEPORT_42 0x0217 // Edgeport/42 4 hub 2 RS232
-#define ION_DEVICE_ID_TI_EDGEPORT_22I 0x021A // Edgeport/22I is an Edgeport/4 with ports 1&2 RS422 and ports 3&4 RS232
+#define ION_DEVICE_ID_TI_EDGEPORT_22I 0x021A // Edgeport/22I is an Edgeport/4 with ports 1&2 RS422 and ports 3&4 RS232
#define ION_DEVICE_ID_TI_EDGEPORT_2C 0x021B // Edgeport/2c RS232
#define ION_DEVICE_ID_TI_EDGEPORT_221C 0x021C // Edgeport/221c is a TI based Edgeport/2 with lucent chip and
// 2 external hub ports - Large I2C
@@ -142,7 +142,7 @@
// 2 external hub ports - Large I2C
#define ION_DEVICE_ID_TI_EDGEPORT_21C 0x021E // Edgeport/21c is a TI based Edgeport/2 with lucent chip
-// Generation 3 devices -- 3410 based edgport/1 (256 byte I2C)
+// Generation 3 devices -- 3410 based edgport/1 (256 byte I2C)
#define ION_DEVICE_ID_TI_TI3410_EDGEPORT_1 0x0240 // Edgeport/1 RS232
#define ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I 0x0241 // Edgeport/1i- RS422 model
@@ -176,7 +176,7 @@
// Default to /P function
#define ION_DEVICE_ID_PLUS_PWR_HP4CD 0x30C // 5052 Plus Power HubPort/4CD+ (for Dell)
-#define ION_DEVICE_ID_PLUS_PWR_HP4C 0x30D // 5052 Plus Power HubPort/4C+
+#define ION_DEVICE_ID_PLUS_PWR_HP4C 0x30D // 5052 Plus Power HubPort/4C+
#define ION_DEVICE_ID_PLUS_PWR_PCI 0x30E // 3410 Plus Power PCI Host Controller 4 port
@@ -217,17 +217,17 @@
#define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device
-#define GENERATION_ID_FROM_USB_PRODUCT_ID( ProductId ) \
- ( (__u16) ((ProductId >> 8) & (ION_GENERATION_MASK)) )
+#define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \
+ ((__u16) ((ProductId >> 8) & (ION_GENERATION_MASK)))
-#define MAKE_USB_PRODUCT_ID( OemId, DeviceId ) \
- ( (__u16) (((OemId) << 10) || (DeviceId)) )
+#define MAKE_USB_PRODUCT_ID(OemId, DeviceId) \
+ ((__u16) (((OemId) << 10) || (DeviceId)))
-#define DEVICE_ID_FROM_USB_PRODUCT_ID( ProductId ) \
- ( (__u16) ((ProductId) & (EDGEPORT_DEVICE_ID_MASK)) )
+#define DEVICE_ID_FROM_USB_PRODUCT_ID(ProductId) \
+ ((__u16) ((ProductId) & (EDGEPORT_DEVICE_ID_MASK)))
-#define OEM_ID_FROM_USB_PRODUCT_ID( ProductId ) \
- ( (__u16) (((ProductId) >> 10) & 0x3F) )
+#define OEM_ID_FROM_USB_PRODUCT_ID(ProductId) \
+ ((__u16) (((ProductId) >> 10) & 0x3F))
//
// Definitions of parameters for download code. Note that these are
@@ -237,7 +237,7 @@
// TxCredits value below which driver won't bother sending (to prevent too many small writes).
// Send only if above 25%
-#define EDGE_FW_GET_TX_CREDITS_SEND_THRESHOLD(InitialCredit, MaxPacketSize) (max( ((InitialCredit) / 4), (MaxPacketSize) ))
+#define EDGE_FW_GET_TX_CREDITS_SEND_THRESHOLD(InitialCredit, MaxPacketSize) (max(((InitialCredit) / 4), (MaxPacketSize)))
#define EDGE_FW_BULK_MAX_PACKET_SIZE 64 // Max Packet Size for Bulk In Endpoint (EP1)
#define EDGE_FW_BULK_READ_BUFFER_SIZE 1024 // Size to use for Bulk reads
@@ -263,7 +263,7 @@
// wValue = 16-bit address
// wIndex = unused (though we could put segment 00: or FF: here)
// wLength = # bytes to read/write (max 64)
-//
+//
#define USB_REQUEST_ION_RESET_DEVICE 0 // Warm reboot Edgeport, retaining USB address
#define USB_REQUEST_ION_GET_EPIC_DESC 1 // Get Edgeport Compatibility Descriptor
@@ -278,7 +278,7 @@
#define USB_REQUEST_ION_ENABLE_SUSPEND 9 // Enable/Disable suspend feature
// (wValue != 0: Enable; wValue = 0: Disable)
-#define USB_REQUEST_ION_SEND_IOSP 10 // Send an IOSP command to the edgeport over the control pipe
+#define USB_REQUEST_ION_SEND_IOSP 10 // Send an IOSP command to the edgeport over the control pipe
#define USB_REQUEST_ION_RECV_IOSP 11 // Receive an IOSP command from the edgeport over the control pipe
@@ -301,8 +301,7 @@
// this is a "real" Edgeport.
//
-struct edge_compatibility_bits
-{
+struct edge_compatibility_bits {
// This __u32 defines which Vendor-specific commands/functionality
// the device supports on the default EP0 pipe.
@@ -334,24 +333,22 @@ struct edge_compatibility_bits
__u32 TrueEdgeport : 1; // 0001 Set if device is a 'real' Edgeport
// (Used only by driver, NEVER set by an EPiC device)
__u32 GenUnused : 31; // Available for future expansion, must be 0
-
};
#define EDGE_COMPATIBILITY_MASK0 0x0001
#define EDGE_COMPATIBILITY_MASK1 0x3FFF
#define EDGE_COMPATIBILITY_MASK2 0x0001
-struct edge_compatibility_descriptor
-{
+struct edge_compatibility_descriptor {
__u8 Length; // Descriptor Length (per USB spec)
__u8 DescType; // Descriptor Type (per USB spec, =DEVICE type)
__u8 EpicVer; // Version of EPiC spec supported
- // (Currently must be 1)
+ // (Currently must be 1)
__u8 NumPorts; // Number of serial ports supported
__u8 iDownloadFile; // Index of string containing download code filename
- // 0=no download, FF=download compiled into driver.
- __u8 Unused[ 3 ]; // Available for future expansion, must be 0
- // (Currently must be 0).
+ // 0=no download, FF=download compiled into driver.
+ __u8 Unused[3]; // Available for future expansion, must be 0
+ // (Currently must be 0).
__u8 MajorVersion; // Firmware version: xx.
__u8 MinorVersion; // yy.
__le16 BuildNumber; // zzzz (LE format)
@@ -359,9 +356,7 @@ struct edge_compatibility_descriptor
// The following structure contains __u32s, with each bit
// specifying whether the EPiC device supports the given
// command or functionality.
-
struct edge_compatibility_bits Supports;
-
};
// Values for iDownloadFile
@@ -391,8 +386,8 @@ struct edge_compatibility_descriptor
// Define the max block size that may be read or written
// in a read/write RAM/ROM command.
-#define MAX_SIZE_REQ_ION_READ_MEM ( (__u16) 64 )
-#define MAX_SIZE_REQ_ION_WRITE_MEM ( (__u16) 64 )
+#define MAX_SIZE_REQ_ION_READ_MEM ((__u16)64)
+#define MAX_SIZE_REQ_ION_WRITE_MEM ((__u16)64)
//
@@ -545,7 +540,7 @@ struct edge_boot_descriptor {
__u8 MajorVersion; // C6 Firmware version: xx.
__u8 MinorVersion; // C7 yy.
__le16 BuildNumber; // C8 zzzz (LE format)
-
+
__u16 EnumRootDescTable; // CA Root of ROM-based descriptor table
__u8 NumDescTypes; // CC Number of supported descriptor types
@@ -597,41 +592,36 @@ struct edge_boot_descriptor {
#define I2C_DESC_TYPE_ION 0 // Not defined by TI
-struct ti_i2c_desc
-{
+struct ti_i2c_desc {
__u8 Type; // Type of descriptor
__u16 Size; // Size of data only not including header
__u8 CheckSum; // Checksum (8 bit sum of data only)
__u8 Data[0]; // Data starts here
-}__attribute__((packed));
+} __attribute__((packed));
// for 5152 devices only (type 2 record)
// for 3410 the version is stored in the WATCHPORT_FIRMWARE_VERSION descriptor
-struct ti_i2c_firmware_rec
-{
+struct ti_i2c_firmware_rec {
__u8 Ver_Major; // Firmware Major version number
__u8 Ver_Minor; // Firmware Minor version number
__u8 Data[0]; // Download starts here
-}__attribute__((packed));
+} __attribute__((packed));
-struct watchport_firmware_version
-{
+struct watchport_firmware_version {
// Added 2 bytes for version number
__u8 Version_Major; // Download Version (for Watchport)
__u8 Version_Minor;
-}__attribute__((packed));
+} __attribute__((packed));
// Structure of header of download image in fw_down.h
-struct ti_i2c_image_header
-{
+struct ti_i2c_image_header {
__le16 Length;
__u8 CheckSum;
-}__attribute__((packed));
+} __attribute__((packed));
-struct ti_basic_descriptor
-{
+struct ti_basic_descriptor {
__u8 Power; // Self powered
// bit 7: 1 - power switching supported
// 0 - power switching not supported
@@ -663,9 +653,9 @@ struct ti_basic_descriptor
#define TI_I2C_SIZE_MASK 0x1f // 5 bits
#define TI_GET_I2C_SIZE(x) ((((x) & TI_I2C_SIZE_MASK)+1)*256)
-#define TI_MAX_I2C_SIZE ( 16 * 1024 )
+#define TI_MAX_I2C_SIZE (16 * 1024)
-#define TI_MANUF_VERSION_0 0
+#define TI_MANUF_VERSION_0 0
// IonConig2 flags
#define TI_CONFIG2_RS232 0x01
@@ -676,8 +666,7 @@ struct ti_basic_descriptor
#define TI_CONFIG2_WATCHPORT 0x10
-struct edge_ti_manuf_descriptor
-{
+struct edge_ti_manuf_descriptor {
__u8 IonConfig; // Config byte for ION manufacturing use
__u8 IonConfig2; // Expansion
__u8 Version; // Version
@@ -688,7 +677,7 @@ struct edge_ti_manuf_descriptor
__u8 HubConfig2; // Used to configure the Hub
__u8 TotalPorts; // Total Number of Com Ports for the entire device (All UMPs)
__u8 Reserved; // Reserved
-}__attribute__((packed));
+} __attribute__((packed));
#endif // if !defined(_USBVEND_H)
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 3fea929..28913fa 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -56,7 +56,6 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#include "ipaq.h"
#define KP_RETRIES 100
@@ -64,7 +63,7 @@
* Version Information
*/
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v1.0"
#define DRIVER_AUTHOR "Ganesh Varadarajan <ganesh@veritas.com>"
#define DRIVER_DESC "USB PocketPC PDA driver"
@@ -76,20 +75,8 @@ static int initial_wait;
/* Function prototypes for an ipaq */
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port);
-static void ipaq_close(struct usb_serial_port *port);
static int ipaq_calc_num_ports(struct usb_serial *serial);
static int ipaq_startup(struct usb_serial *serial);
-static int ipaq_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static int ipaq_write_bulk(struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static void ipaq_write_gather(struct usb_serial_port *port);
-static void ipaq_read_bulk_callback(struct urb *urb);
-static void ipaq_write_bulk_callback(struct urb *urb);
-static int ipaq_write_room(struct tty_struct *tty);
-static int ipaq_chars_in_buffer(struct tty_struct *tty);
-static void ipaq_destroy_lists(struct usb_serial_port *port);
-
static struct usb_device_id ipaq_id_table [] = {
/* The first entry is a placeholder for the insmod-specified device */
@@ -558,7 +545,7 @@ static struct usb_driver ipaq_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = ipaq_id_table,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
@@ -569,91 +556,24 @@ static struct usb_serial_driver ipaq_device = {
.name = "ipaq",
},
.description = "PocketPC PDA",
- .usb_driver = &ipaq_driver,
+ .usb_driver = &ipaq_driver,
.id_table = ipaq_id_table,
+ .bulk_in_size = 256,
+ .bulk_out_size = 256,
.open = ipaq_open,
- .close = ipaq_close,
.attach = ipaq_startup,
.calc_num_ports = ipaq_calc_num_ports,
- .write = ipaq_write,
- .write_room = ipaq_write_room,
- .chars_in_buffer = ipaq_chars_in_buffer,
- .read_bulk_callback = ipaq_read_bulk_callback,
- .write_bulk_callback = ipaq_write_bulk_callback,
};
-static spinlock_t write_list_lock;
-static int bytes_in;
-static int bytes_out;
-
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- struct ipaq_private *priv;
- struct ipaq_packet *pkt;
- int i, result = 0;
+ int result = 0;
int retries = connect_retries;
dbg("%s - port %d", __func__, port->number);
- bytes_in = 0;
- bytes_out = 0;
- priv = kmalloc(sizeof(struct ipaq_private), GFP_KERNEL);
- if (priv == NULL) {
- dev_err(&port->dev, "%s - Out of memory\n", __func__);
- return -ENOMEM;
- }
- usb_set_serial_port_data(port, priv);
- priv->active = 0;
- priv->queue_len = 0;
- priv->free_len = 0;
- INIT_LIST_HEAD(&priv->queue);
- INIT_LIST_HEAD(&priv->freelist);
-
- for (i = 0; i < URBDATA_QUEUE_MAX / PACKET_SIZE; i++) {
- pkt = kmalloc(sizeof(struct ipaq_packet), GFP_KERNEL);
- if (pkt == NULL)
- goto enomem;
-
- pkt->data = kmalloc(PACKET_SIZE, GFP_KERNEL);
- if (pkt->data == NULL) {
- kfree(pkt);
- goto enomem;
- }
- pkt->len = 0;
- pkt->written = 0;
- INIT_LIST_HEAD(&pkt->list);
- list_add(&pkt->list, &priv->freelist);
- priv->free_len += PACKET_SIZE;
- }
-
- /*
- * Lose the small buffers usbserial provides. Make larger ones.
- */
-
- kfree(port->bulk_in_buffer);
- kfree(port->bulk_out_buffer);
- /* make sure the generic serial code knows */
- port->bulk_out_buffer = NULL;
-
- port->bulk_in_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL);
- if (port->bulk_in_buffer == NULL)
- goto enomem;
-
- port->bulk_out_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL);
- if (port->bulk_out_buffer == NULL) {
- /* the buffer is useless, free it */
- kfree(port->bulk_in_buffer);
- port->bulk_in_buffer = NULL;
- goto enomem;
- }
- port->read_urb->transfer_buffer = port->bulk_in_buffer;
- port->write_urb->transfer_buffer = port->bulk_out_buffer;
- port->read_urb->transfer_buffer_length = URBDATA_SIZE;
- port->bulk_out_size = port->write_urb->transfer_buffer_length
- = URBDATA_SIZE;
-
msleep(1000*initial_wait);
/*
@@ -663,7 +583,6 @@ static int ipaq_open(struct tty_struct *tty,
* through. Since this has a reasonably high failure rate, we retry
* several times.
*/
-
while (retries--) {
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
@@ -673,269 +592,15 @@ static int ipaq_open(struct tty_struct *tty,
msleep(1000);
}
-
if (!retries && result) {
- dev_err(&port->dev, "%s - failed doing control urb, error %d\n", __func__, result);
- goto error;
- }
-
- /* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev, port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ipaq_read_bulk_callback, port);
-
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
- goto error;
- }
-
- return 0;
-
-enomem:
- result = -ENOMEM;
- dev_err(&port->dev, "%s - Out of memory\n", __func__);
-error:
- ipaq_destroy_lists(port);
- kfree(priv);
- return result;
-}
-
-
-static void ipaq_close(struct usb_serial_port *port)
-{
- struct ipaq_private *priv = usb_get_serial_port_data(port);
-
- dbg("%s - port %d", __func__, port->number);
-
- /*
- * shut down bulk read and write
- */
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
- ipaq_destroy_lists(port);
- kfree(priv);
- usb_set_serial_port_data(port, NULL);
-
- /* Uncomment the following line if you want to see some statistics
- * in your syslog */
- /* info ("Bytes In = %d Bytes Out = %d", bytes_in, bytes_out); */
-}
-
-static void ipaq_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
- unsigned char *data = urb->transfer_buffer;
- int result;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
-
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- bytes_in += urb->actual_length;
- }
- tty_kref_put(tty);
-
- /* Continue trying to always read */
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev, port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ipaq_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- return;
-}
-
-static int ipaq_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- const unsigned char *current_position = buf;
- int bytes_sent = 0;
- int transfer_size;
-
- dbg("%s - port %d", __func__, port->number);
-
- while (count > 0) {
- transfer_size = min(count, PACKET_SIZE);
- if (ipaq_write_bulk(port, current_position, transfer_size))
- break;
- current_position += transfer_size;
- bytes_sent += transfer_size;
- count -= transfer_size;
- bytes_out += transfer_size;
+ dev_err(&port->dev, "%s - failed doing control urb, error %d\n",
+ __func__, result);
+ return result;
}
- return bytes_sent;
+ return usb_serial_generic_open(tty, port);
}
-static int ipaq_write_bulk(struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct ipaq_private *priv = usb_get_serial_port_data(port);
- struct ipaq_packet *pkt = NULL;
- int result = 0;
- unsigned long flags;
-
- if (priv->free_len <= 0) {
- dbg("%s - we're stuffed", __func__);
- return -EAGAIN;
- }
-
- spin_lock_irqsave(&write_list_lock, flags);
- if (!list_empty(&priv->freelist)) {
- pkt = list_entry(priv->freelist.next, struct ipaq_packet, list);
- list_del(&pkt->list);
- priv->free_len -= PACKET_SIZE;
- }
- spin_unlock_irqrestore(&write_list_lock, flags);
- if (pkt == NULL) {
- dbg("%s - we're stuffed", __func__);
- return -EAGAIN;
- }
-
- memcpy(pkt->data, buf, count);
- usb_serial_debug_data(debug, &port->dev, __func__, count, pkt->data);
-
- pkt->len = count;
- pkt->written = 0;
- spin_lock_irqsave(&write_list_lock, flags);
- list_add_tail(&pkt->list, &priv->queue);
- priv->queue_len += count;
- if (priv->active == 0) {
- priv->active = 1;
- ipaq_write_gather(port);
- spin_unlock_irqrestore(&write_list_lock, flags);
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- } else {
- spin_unlock_irqrestore(&write_list_lock, flags);
- }
- return result;
-}
-
-static void ipaq_write_gather(struct usb_serial_port *port)
-{
- struct ipaq_private *priv = usb_get_serial_port_data(port);
- struct usb_serial *serial = port->serial;
- int count, room;
- struct ipaq_packet *pkt, *tmp;
- struct urb *urb = port->write_urb;
-
- room = URBDATA_SIZE;
- list_for_each_entry_safe(pkt, tmp, &priv->queue, list) {
- count = min(room, (int)(pkt->len - pkt->written));
- memcpy(urb->transfer_buffer + (URBDATA_SIZE - room),
- pkt->data + pkt->written, count);
- room -= count;
- pkt->written += count;
- priv->queue_len -= count;
- if (pkt->written == pkt->len) {
- list_move(&pkt->list, &priv->freelist);
- priv->free_len += PACKET_SIZE;
- }
- if (room == 0)
- break;
- }
-
- count = URBDATA_SIZE - room;
- usb_fill_bulk_urb(port->write_urb, serial->dev,
- usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, count,
- ipaq_write_bulk_callback, port);
- return;
-}
-
-static void ipaq_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct ipaq_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- int result;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
- return;
- }
-
- spin_lock_irqsave(&write_list_lock, flags);
- if (!list_empty(&priv->queue)) {
- ipaq_write_gather(port);
- spin_unlock_irqrestore(&write_list_lock, flags);
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- } else {
- priv->active = 0;
- spin_unlock_irqrestore(&write_list_lock, flags);
- }
-
- usb_serial_port_softint(port);
-}
-
-static int ipaq_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct ipaq_private *priv = usb_get_serial_port_data(port);
-
- dbg("%s - freelen %d", __func__, priv->free_len);
- return priv->free_len;
-}
-
-static int ipaq_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct ipaq_private *priv = usb_get_serial_port_data(port);
-
- dbg("%s - queuelen %d", __func__, priv->queue_len);
- return priv->queue_len;
-}
-
-static void ipaq_destroy_lists(struct usb_serial_port *port)
-{
- struct ipaq_private *priv = usb_get_serial_port_data(port);
- struct ipaq_packet *pkt, *tmp;
-
- list_for_each_entry_safe(pkt, tmp, &priv->queue, list) {
- kfree(pkt->data);
- kfree(pkt);
- }
- list_for_each_entry_safe(pkt, tmp, &priv->freelist, list) {
- kfree(pkt->data);
- kfree(pkt);
- }
-}
-
-
static int ipaq_calc_num_ports(struct usb_serial *serial)
{
/*
@@ -994,7 +659,6 @@ static int ipaq_startup(struct usb_serial *serial)
static int __init ipaq_init(void)
{
int retval;
- spin_lock_init(&write_list_lock);
retval = usb_serial_register(&ipaq_device);
if (retval)
goto failed_usb_serial_register;
@@ -1015,7 +679,6 @@ failed_usb_serial_register:
return retval;
}
-
static void __exit ipaq_exit(void)
{
usb_deregister(&ipaq_driver);
diff --git a/drivers/usb/serial/ipaq.h b/drivers/usb/serial/ipaq.h
deleted file mode 100644
index 2b90359..0000000
--- a/drivers/usb/serial/ipaq.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * USB Compaq iPAQ driver
- *
- * Copyright (C) 2001 - 2002
- * Ganesh Varadarajan <ganesh@veritas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#ifndef __LINUX_USB_SERIAL_IPAQ_H
-#define __LINUX_USB_SERIAL_IPAQ_H
-
-/*
- * Since we can't queue our bulk write urbs (don't know why - it just
- * doesn't work), we can send down only one write urb at a time. The simplistic
- * approach taken by the generic usbserial driver will work, but it's not good
- * for performance. Therefore, we buffer upto URBDATA_QUEUE_MAX bytes of write
- * requests coming from the line discipline. This is done by chaining them
- * in lists of struct ipaq_packet, each packet holding a maximum of
- * PACKET_SIZE bytes.
- *
- * ipaq_write() can be called from bottom half context; hence we can't
- * allocate memory for packets there. So we initialize a pool of packets at
- * the first open and maintain a freelist.
- *
- * The value of PACKET_SIZE was empirically determined by
- * checking the maximum write sizes sent down by the ppp ldisc.
- * URBDATA_QUEUE_MAX is set to 64K, which is the maximum TCP window size.
- */
-
-struct ipaq_packet {
- char *data;
- size_t len;
- size_t written;
- struct list_head list;
-};
-
-struct ipaq_private {
- int active;
- int queue_len;
- int free_len;
- struct list_head queue;
- struct list_head freelist;
-};
-
-#define URBDATA_SIZE 4096
-#define URBDATA_QUEUE_MAX (64 * 1024)
-#define PACKET_SIZE 256
-
-#endif
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index e1d0784..ca77e88 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -34,7 +34,6 @@
* DCD, DTR, RTS, CTS which are currently faked.
* It's good enough for PPP at this point. It's based off all kinds of
* code found in usb/serial and usb/class
- *
*/
#include <linux/kernel.h>
@@ -52,7 +51,7 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.3"
+#define DRIVER_VERSION "v0.4"
#define DRIVER_AUTHOR "Roelf Diedericks"
#define DRIVER_DESC "IPWireless tty driver"
@@ -65,8 +64,6 @@
/* Message sizes */
#define EVENT_BUFFER_SIZE 0xFF
#define CHAR2INT16(c1, c0) (((u32)((c1) & 0xff) << 8) + (u32)((c0) & 0xff))
-#define NUM_BULK_URBS 24
-#define NUM_CONTROL_URBS 16
/* vendor/product pairs that are known work with this driver*/
#define IPW_VID 0x0bc3
@@ -151,47 +148,6 @@ static struct usb_driver usb_ipw_driver = {
static int debug;
-static void ipw_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
- int result;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
-
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- }
- tty_kref_put(tty);
-
- /* Continue trying to always read */
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ipw_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- return;
-}
-
static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_device *dev = port->serial->dev;
@@ -229,15 +185,7 @@ static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
/*--2: Start reading from the device */
dbg("%s: setting up bulk read callback", __func__);
- usb_fill_bulk_urb(port->read_urb, dev,
- usb_rcvbulkpipe(dev, port->bulk_in_endpointAddress),
- port->bulk_in_buffer,
- port->bulk_in_size,
- ipw_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result < 0)
- dbg("%s - usb_submit_urb(read bulk) failed with status %d",
- __func__, result);
+ usb_serial_generic_open(tty, port);
/*--3: Tell the modem to open the floodgates on the rx bulk channel */
dbg("%s:asking modem for RxRead (RXBULK_ON)", __func__);
@@ -267,35 +215,6 @@ static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
dev_err(&port->dev,
"initial flowcontrol failed (error = %d)\n", result);
-
- /*--5: raise the dtr */
- dbg("%s:raising dtr", __func__);
- result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- IPW_SIO_SET_PIN,
- USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
- IPW_PIN_SETDTR,
- 0,
- NULL,
- 0,
- 200000);
- if (result < 0)
- dev_err(&port->dev,
- "setting dtr failed (error = %d)\n", result);
-
- /*--6: raise the rts */
- dbg("%s:raising rts", __func__);
- result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- IPW_SIO_SET_PIN,
- USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
- IPW_PIN_SETRTS,
- 0,
- NULL,
- 0,
- 200000);
- if (result < 0)
- dev_err(&port->dev,
- "setting dtr failed (error = %d)\n", result);
-
kfree(buf_flow_init);
return 0;
}
@@ -305,8 +224,8 @@ static void ipw_dtr_rts(struct usb_serial_port *port, int on)
struct usb_device *dev = port->serial->dev;
int result;
- /*--1: drop the dtr */
- dbg("%s:dropping dtr", __func__);
+ dbg("%s: on = %d", __func__, on);
+
result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
IPW_SIO_SET_PIN,
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
@@ -316,22 +235,20 @@ static void ipw_dtr_rts(struct usb_serial_port *port, int on)
0,
200000);
if (result < 0)
- dev_err(&port->dev, "dropping dtr failed (error = %d)\n",
+ dev_err(&port->dev, "setting dtr failed (error = %d)\n",
result);
- /*--2: drop the rts */
- dbg("%s:dropping rts", __func__);
result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
IPW_SIO_SET_PIN, USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE | USB_DIR_OUT,
+ USB_RECIP_INTERFACE | USB_DIR_OUT,
on ? IPW_PIN_SETRTS : IPW_PIN_CLRRTS,
0,
NULL,
0,
200000);
if (result < 0)
- dev_err(&port->dev,
- "dropping rts failed (error = %d)\n", result);
+ dev_err(&port->dev, "setting rts failed (error = %d)\n",
+ result);
}
static void ipw_close(struct usb_serial_port *port)
@@ -368,83 +285,7 @@ static void ipw_close(struct usb_serial_port *port)
dev_err(&port->dev,
"Disabling bulk RxRead failed (error = %d)\n", result);
- /* shutdown any in-flight urbs that we know about */
- usb_kill_urb(port->read_urb);
- usb_kill_urb(port->write_urb);
-}
-
-static void ipw_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- int status = urb->status;
-
- dbg("%s", __func__);
-
- port->write_urb_busy = 0;
-
- if (status)
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
-
- usb_serial_port_softint(port);
-}
-
-static int ipw_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct usb_device *dev = port->serial->dev;
- int ret;
-
- dbg("%s: TOP: count=%d, in_interrupt=%ld", __func__,
- count, in_interrupt());
-
- if (count == 0) {
- dbg("%s - write request of 0 bytes", __func__);
- return 0;
- }
-
- spin_lock_bh(&port->lock);
- if (port->write_urb_busy) {
- spin_unlock_bh(&port->lock);
- dbg("%s - already writing", __func__);
- return 0;
- }
- port->write_urb_busy = 1;
- spin_unlock_bh(&port->lock);
-
- count = min(count, port->bulk_out_size);
- memcpy(port->bulk_out_buffer, buf, count);
-
- dbg("%s count now:%d", __func__, count);
-
- usb_fill_bulk_urb(port->write_urb, dev,
- usb_sndbulkpipe(dev, port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer,
- count,
- ipw_write_bulk_callback,
- port);
-
- ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (ret != 0) {
- port->write_urb_busy = 0;
- dbg("%s - usb_submit_urb(write bulk) failed with error = %d",
- __func__, ret);
- return ret;
- }
-
- dbg("%s returning %d", __func__, count);
- return count;
-}
-
-static int ipw_probe(struct usb_serial_port *port)
-{
- return 0;
-}
-
-static int ipw_disconnect(struct usb_serial_port *port)
-{
- usb_set_serial_port_data(port, NULL);
- return 0;
+ usb_serial_generic_close(port);
}
static struct usb_serial_driver ipw_device = {
@@ -453,17 +294,12 @@ static struct usb_serial_driver ipw_device = {
.name = "ipw",
},
.description = "IPWireless converter",
- .usb_driver = &usb_ipw_driver,
+ .usb_driver = &usb_ipw_driver,
.id_table = usb_ipw_ids,
.num_ports = 1,
.open = ipw_open,
.close = ipw_close,
.dtr_rts = ipw_dtr_rts,
- .port_probe = ipw_probe,
- .port_remove = ipw_disconnect,
- .write = ipw_write,
- .write_bulk_callback = ipw_write_bulk_callback,
- .read_bulk_callback = ipw_read_bulk_callback,
};
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 4a0f519..ccbce40 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2001-2002 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2002 Gary Brubaker (xavyer@ix.netcom.com)
+ * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -72,8 +73,8 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.4"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
+#define DRIVER_VERSION "v0.5"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB IR Dongle driver"
static int debug;
@@ -87,11 +88,9 @@ static int xbof = -1;
static int ir_startup (struct usb_serial *serial);
static int ir_open(struct tty_struct *tty, struct usb_serial_port *port);
-static void ir_close(struct usb_serial_port *port);
-static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static void ir_write_bulk_callback (struct urb *urb);
-static void ir_read_bulk_callback (struct urb *urb);
+static int ir_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size);
+static void ir_process_read_urb(struct urb *urb);
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
@@ -130,10 +129,8 @@ static struct usb_serial_driver ir_device = {
.set_termios = ir_set_termios,
.attach = ir_startup,
.open = ir_open,
- .close = ir_close,
- .write = ir_write,
- .write_bulk_callback = ir_write_bulk_callback,
- .read_bulk_callback = ir_read_bulk_callback,
+ .prepare_write_buffer = ir_prepare_write_buffer,
+ .process_read_urb = ir_process_read_urb,
};
static inline void irda_usb_dump_class_desc(struct usb_irda_cs_descriptor *desc)
@@ -198,7 +195,6 @@ error:
return NULL;
}
-
static u8 ir_xbof_change(u8 xbof)
{
u8 result;
@@ -237,7 +233,6 @@ static u8 ir_xbof_change(u8 xbof)
return(result);
}
-
static int ir_startup(struct usb_serial *serial)
{
struct usb_irda_cs_descriptor *irda_desc;
@@ -297,83 +292,22 @@ static int ir_startup(struct usb_serial *serial)
static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- char *buffer;
- int result = 0;
+ int i;
dbg("%s - port %d", __func__, port->number);
- if (buffer_size) {
- /* override the default buffer sizes */
- buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!buffer) {
- dev_err(&port->dev, "%s - out of memory.\n", __func__);
- return -ENOMEM;
- }
- kfree(port->read_urb->transfer_buffer);
- port->read_urb->transfer_buffer = buffer;
- port->read_urb->transfer_buffer_length = buffer_size;
-
- buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!buffer) {
- dev_err(&port->dev, "%s - out of memory.\n", __func__);
- return -ENOMEM;
- }
- kfree(port->write_urb->transfer_buffer);
- port->write_urb->transfer_buffer = buffer;
- port->write_urb->transfer_buffer_length = buffer_size;
- port->bulk_out_size = buffer_size;
- }
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
+ port->write_urbs[i]->transfer_flags = URB_ZERO_PACKET;
/* Start reading from the device */
- usb_fill_bulk_urb(
- port->read_urb,
- port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ir_read_bulk_callback,
- port);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
-
- return result;
+ return usb_serial_generic_open(tty, port);
}
-static void ir_close(struct usb_serial_port *port)
+static int ir_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
{
- dbg("%s - port %d", __func__, port->number);
-
- /* shutdown our bulk read */
- usb_kill_urb(port->read_urb);
-}
-
-static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- unsigned char *transfer_buffer;
- int result;
- int transfer_size;
-
- dbg("%s - port = %d, count = %d", __func__, port->number, count);
-
- if (count == 0)
- return 0;
-
- spin_lock_bh(&port->lock);
- if (port->write_urb_busy) {
- spin_unlock_bh(&port->lock);
- dbg("%s - already writing", __func__);
- return 0;
- }
- port->write_urb_busy = 1;
- spin_unlock_bh(&port->lock);
-
- transfer_buffer = port->write_urb->transfer_buffer;
- transfer_size = min(count, port->bulk_out_size - 1);
+ unsigned char *buf = dest;
+ int count;
/*
* The first byte of the packet we send to the device contains an
@@ -382,119 +316,57 @@ static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
*
* See section 5.4.2.2 of the USB IrDA spec.
*/
- *transfer_buffer = ir_xbof | ir_baud;
- ++transfer_buffer;
-
- memcpy(transfer_buffer, buf, transfer_size);
+ *buf = ir_xbof | ir_baud;
- usb_fill_bulk_urb(
- port->write_urb,
- port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer,
- transfer_size + 1,
- ir_write_bulk_callback,
- port);
-
- port->write_urb->transfer_flags = URB_ZERO_PACKET;
-
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result) {
- port->write_urb_busy = 0;
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- } else
- result = transfer_size;
-
- return result;
+ count = kfifo_out_locked(&port->write_fifo, buf + 1, size - 1,
+ &port->lock);
+ return count + 1;
}
-static void ir_write_bulk_callback(struct urb *urb)
+static void ir_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
+ unsigned char *data = urb->transfer_buffer;
+ struct tty_struct *tty;
- port->write_urb_busy = 0;
- if (status) {
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
+ if (!urb->actual_length)
return;
- }
+ /*
+ * The first byte of the packet we get from the device
+ * contains a busy indicator and baud rate change.
+ * See section 5.4.1.2 of the USB IrDA spec.
+ */
+ if (*data & 0x0f)
+ ir_baud = *data & 0x0f;
- usb_serial_debug_data(
- debug,
- &port->dev,
- __func__,
- urb->actual_length,
- urb->transfer_buffer);
+ if (urb->actual_length == 1)
+ return;
- usb_serial_port_softint(port);
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+ tty_insert_flip_string(tty, data + 1, urb->actual_length - 1);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
}
-static void ir_read_bulk_callback(struct urb *urb)
+static void ir_set_termios_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
- unsigned char *data = urb->transfer_buffer;
- int result;
int status = urb->status;
dbg("%s - port %d", __func__, port->number);
- switch (status) {
- case 0: /* Successful */
- /*
- * The first byte of the packet we get from the device
- * contains a busy indicator and baud rate change.
- * See section 5.4.1.2 of the USB IrDA spec.
- */
- if ((*data & 0x0f) > 0)
- ir_baud = *data & 0x0f;
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
- tty = tty_port_tty_get(&port->port);
- tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
-
- /*
- * No break here.
- * We want to resubmit the urb so we can read
- * again.
- */
-
- case -EPROTO: /* taking inspiration from pl2303.c */
- /* Continue trying to always read */
- usb_fill_bulk_urb(
- port->read_urb,
- port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ir_read_bulk_callback,
- port);
-
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- break ;
- default:
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
- break ;
- }
- return;
+ kfree(urb->transfer_buffer);
+
+ if (status)
+ dbg("%s - non-zero urb status: %d", __func__, status);
}
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
+ struct urb *urb;
unsigned char *transfer_buffer;
int result;
speed_t baud;
@@ -548,41 +420,63 @@ static void ir_set_termios(struct tty_struct *tty,
else
ir_xbof = ir_xbof_change(xbof) ;
- /* FIXME need to check to see if our write urb is busy right
- * now, or use a urb pool.
- *
+ /* Only speed changes are supported */
+ tty_termios_copy_hw(tty->termios, old_termios);
+ tty_encode_baud_rate(tty, baud, baud);
+
+ /*
* send the baud change out on an "empty" data packet
*/
- transfer_buffer = port->write_urb->transfer_buffer;
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dev_err(&port->dev, "%s - no more urbs\n", __func__);
+ return;
+ }
+ transfer_buffer = kmalloc(1, GFP_KERNEL);
+ if (!transfer_buffer) {
+ dev_err(&port->dev, "%s - out of memory\n", __func__);
+ goto err_buf;
+ }
+
*transfer_buffer = ir_xbof | ir_baud;
usb_fill_bulk_urb(
- port->write_urb,
+ urb,
port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer,
+ transfer_buffer,
1,
- ir_write_bulk_callback,
+ ir_set_termios_callback,
port);
- port->write_urb->transfer_flags = URB_ZERO_PACKET;
+ urb->transfer_flags = URB_ZERO_PACKET;
- result = usb_submit_urb(port->write_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
+ result = usb_submit_urb(urb, GFP_KERNEL);
+ if (result) {
+ dev_err(&port->dev, "%s - failed to submit urb: %d\n",
+ __func__, result);
+ goto err_subm;
+ }
- /* Only speed changes are supported */
- tty_termios_copy_hw(tty->termios, old_termios);
- tty_encode_baud_rate(tty, baud, baud);
+ usb_free_urb(urb);
+
+ return;
+err_subm:
+ kfree(transfer_buffer);
+err_buf:
+ usb_free_urb(urb);
}
static int __init ir_init(void)
{
int retval;
+ if (buffer_size) {
+ ir_device.bulk_in_size = buffer_size;
+ ir_device.bulk_out_size = buffer_size;
+ }
+
retval = usb_serial_register(&ir_device);
if (retval)
goto failed_usb_serial_register;
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 43f13cf..74551cb 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1044,34 +1044,6 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
if (buf == NULL)
return -ENOMEM;
- /* fixup the endpoint buffer size */
- kfree(port->bulk_out_buffer);
- port->bulk_out_buffer = kmalloc(512, GFP_KERNEL);
- port->bulk_out_size = 512;
- kfree(port->bulk_in_buffer);
- port->bulk_in_buffer = kmalloc(512, GFP_KERNEL);
- port->bulk_in_size = 512;
-
- if (!port->bulk_out_buffer || !port->bulk_in_buffer) {
- kfree(port->bulk_out_buffer);
- kfree(port->bulk_in_buffer);
- kfree(buf);
- return -ENOMEM;
- }
-
- usb_fill_bulk_urb(port->write_urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- port->bulk_out_buffer, 512,
- NULL, NULL);
-
-
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->bulk_in_buffer, 512,
- NULL, NULL);
-
priv->poll = 0;
/* initialize writebuf */
@@ -1277,6 +1249,8 @@ static struct usb_serial_driver iuu_device = {
},
.id_table = id_table,
.num_ports = 1,
+ .bulk_in_size = 512,
+ .bulk_out_size = 512,
.port_probe = iuu_create_sysfs_attrs,
.port_remove = iuu_remove_sysfs_attrs,
.open = iuu_open,
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 8eef91b..cdbe8bf 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -1,6 +1,7 @@
/*
* KLSI KL5KUSB105 chip RS232 converter driver
*
+ * Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2001 Utz-Uwe Haus <haus@uuhaus.de>
*
* This program is free software; you can redistribute it and/or modify
@@ -34,17 +35,6 @@
* implement handshaking or decide that we do not support it
*/
-/* History:
- * 0.3a - implemented pools of write URBs
- * 0.3 - alpha version for public testing
- * 0.2 - TIOCMGET works, so autopilot(1) can be used!
- * 0.1 - can be used to do pilot-xfer -p /dev/ttyUSB0 -l
- *
- * The driver skeleton is mainly based on mct_u232.c and various other
- * pieces of code shamelessly copied from the drivers/usb/serial/ directory.
- */
-
-
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -64,8 +54,8 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.3a"
-#define DRIVER_AUTHOR "Utz-Uwe Haus <haus@uuhaus.de>"
+#define DRIVER_VERSION "v0.4"
+#define DRIVER_AUTHOR "Utz-Uwe Haus <haus@uuhaus.de>, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "KLSI KL5KUSB105 chipset USB->Serial Converter driver"
@@ -73,23 +63,17 @@ static int debug;
* Function prototypes
*/
static int klsi_105_startup(struct usb_serial *serial);
-static void klsi_105_disconnect(struct usb_serial *serial);
static void klsi_105_release(struct usb_serial *serial);
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port);
static void klsi_105_close(struct usb_serial_port *port);
-static int klsi_105_write(struct tty_struct *tty,
- struct usb_serial_port *port, const unsigned char *buf, int count);
-static void klsi_105_write_bulk_callback(struct urb *urb);
-static int klsi_105_chars_in_buffer(struct tty_struct *tty);
-static int klsi_105_write_room(struct tty_struct *tty);
-static void klsi_105_read_bulk_callback(struct urb *urb);
static void klsi_105_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
-static void klsi_105_throttle(struct tty_struct *tty);
-static void klsi_105_unthrottle(struct tty_struct *tty);
static int klsi_105_tiocmget(struct tty_struct *tty, struct file *file);
static int klsi_105_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
+static void klsi_105_process_read_urb(struct urb *urb);
+static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size);
/*
* All of the device info needed for the KLSI converters.
@@ -107,7 +91,7 @@ static struct usb_driver kl5kusb105d_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
static struct usb_serial_driver kl5kusb105d_device = {
@@ -115,26 +99,23 @@ static struct usb_serial_driver kl5kusb105d_device = {
.owner = THIS_MODULE,
.name = "kl5kusb105d",
},
- .description = "KL5KUSB105D / PalmConnect",
- .usb_driver = &kl5kusb105d_driver,
- .id_table = id_table,
- .num_ports = 1,
- .open = klsi_105_open,
- .close = klsi_105_close,
- .write = klsi_105_write,
- .write_bulk_callback = klsi_105_write_bulk_callback,
- .chars_in_buffer = klsi_105_chars_in_buffer,
- .write_room = klsi_105_write_room,
- .read_bulk_callback = klsi_105_read_bulk_callback,
- .set_termios = klsi_105_set_termios,
- /*.break_ctl = klsi_105_break_ctl,*/
- .tiocmget = klsi_105_tiocmget,
- .tiocmset = klsi_105_tiocmset,
- .attach = klsi_105_startup,
- .disconnect = klsi_105_disconnect,
- .release = klsi_105_release,
- .throttle = klsi_105_throttle,
- .unthrottle = klsi_105_unthrottle,
+ .description = "KL5KUSB105D / PalmConnect",
+ .usb_driver = &kl5kusb105d_driver,
+ .id_table = id_table,
+ .num_ports = 1,
+ .bulk_out_size = 64,
+ .open = klsi_105_open,
+ .close = klsi_105_close,
+ .set_termios = klsi_105_set_termios,
+ /*.break_ctl = klsi_105_break_ctl,*/
+ .tiocmget = klsi_105_tiocmget,
+ .tiocmset = klsi_105_tiocmset,
+ .attach = klsi_105_startup,
+ .release = klsi_105_release,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
+ .process_read_urb = klsi_105_process_read_urb,
+ .prepare_write_buffer = klsi_105_prepare_write_buffer,
};
struct klsi_105_port_settings {
@@ -145,18 +126,11 @@ struct klsi_105_port_settings {
__u8 unknown2;
} __attribute__ ((packed));
-/* we implement a pool of NUM_URBS urbs per usb_serial */
-#define NUM_URBS 1
-#define URB_TRANSFER_BUFFER_SIZE 64
struct klsi_105_private {
struct klsi_105_port_settings cfg;
struct ktermios termios;
unsigned long line_state; /* modem line settings */
- /* write pool */
- struct urb *write_urb_pool[NUM_URBS];
spinlock_t lock;
- unsigned long bytes_in;
- unsigned long bytes_out;
};
@@ -189,7 +163,7 @@ static int klsi_105_chg_port_settings(struct usb_serial_port *port,
settings->pktlen, settings->baudrate, settings->databits,
settings->unknown1, settings->unknown2);
return rc;
-} /* klsi_105_chg_port_settings */
+}
/* translate a 16-bit status value from the device to linux's TIO bits */
static unsigned long klsi_105_status2linestate(const __u16 status)
@@ -202,6 +176,7 @@ static unsigned long klsi_105_status2linestate(const __u16 status)
return res;
}
+
/*
* Read line control via vendor command and return result through
* *line_state_p
@@ -258,7 +233,7 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
static int klsi_105_startup(struct usb_serial *serial)
{
struct klsi_105_private *priv;
- int i, j;
+ int i;
/* check if we support the product id (see keyspan.c)
* FIXME
@@ -282,29 +257,9 @@ static int klsi_105_startup(struct usb_serial *serial)
priv->line_state = 0;
- priv->bytes_in = 0;
- priv->bytes_out = 0;
usb_set_serial_port_data(serial->port[i], priv);
spin_lock_init(&priv->lock);
- for (j = 0; j < NUM_URBS; j++) {
- struct urb *urb = usb_alloc_urb(0, GFP_KERNEL);
-
- priv->write_urb_pool[j] = urb;
- if (urb == NULL) {
- dev_err(&serial->dev->dev, "No more urbs???\n");
- goto err_cleanup;
- }
-
- urb->transfer_buffer =
- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
- if (!urb->transfer_buffer) {
- dev_err(&serial->dev->dev,
- "%s - out of memory for urb buffers.\n",
- __func__);
- goto err_cleanup;
- }
- }
/* priv->termios is left uninitalized until port opening */
init_waitqueue_head(&serial->port[i]->write_wait);
@@ -315,44 +270,11 @@ static int klsi_105_startup(struct usb_serial *serial)
err_cleanup:
for (; i >= 0; i--) {
priv = usb_get_serial_port_data(serial->port[i]);
- for (j = 0; j < NUM_URBS; j++) {
- if (priv->write_urb_pool[j]) {
- kfree(priv->write_urb_pool[j]->transfer_buffer);
- usb_free_urb(priv->write_urb_pool[j]);
- }
- }
+ kfree(priv);
usb_set_serial_port_data(serial->port[i], NULL);
}
return -ENOMEM;
-} /* klsi_105_startup */
-
-
-static void klsi_105_disconnect(struct usb_serial *serial)
-{
- int i;
-
- dbg("%s", __func__);
-
- /* stop reads and writes on all ports */
- for (i = 0; i < serial->num_ports; ++i) {
- struct klsi_105_private *priv =
- usb_get_serial_port_data(serial->port[i]);
-
- if (priv) {
- /* kill our write urb pool */
- int j;
- struct urb **write_urbs = priv->write_urb_pool;
-
- for (j = 0; j < NUM_URBS; j++) {
- if (write_urbs[j]) {
- usb_kill_urb(write_urbs[j]);
- usb_free_urb(write_urbs[j]);
- }
- }
- }
- }
-} /* klsi_105_disconnect */
-
+}
static void klsi_105_release(struct usb_serial *serial)
{
@@ -360,13 +282,9 @@ static void klsi_105_release(struct usb_serial *serial)
dbg("%s", __func__);
- for (i = 0; i < serial->num_ports; ++i) {
- struct klsi_105_private *priv =
- usb_get_serial_port_data(serial->port[i]);
-
- kfree(priv);
- }
-} /* klsi_105_release */
+ for (i = 0; i < serial->num_ports; ++i)
+ kfree(usb_get_serial_port_data(serial->port[i]));
+}
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
{
@@ -416,18 +334,8 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
spin_unlock_irqrestore(&priv->lock, flags);
/* READ_ON and urb submission */
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- klsi_105_read_bulk_callback,
- port);
-
- rc = usb_submit_urb(port->read_urb, GFP_KERNEL);
+ rc = usb_serial_generic_open(tty, port);
if (rc) {
- dev_err(&port->dev, "%s - failed submitting read urb, "
- "error %d\n", __func__, rc);
retval = rc;
goto exit;
}
@@ -460,12 +368,10 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
exit:
kfree(cfg);
return retval;
-} /* klsi_105_open */
-
+}
static void klsi_105_close(struct usb_serial_port *port)
{
- struct klsi_105_private *priv = usb_get_serial_port_data(port);
int rc;
dbg("%s port %d", __func__, port->number);
@@ -488,239 +394,62 @@ static void klsi_105_close(struct usb_serial_port *port)
mutex_unlock(&port->serial->disc_mutex);
/* shutdown our bulk reads and writes */
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
- /* unlink our write pool */
- /* FIXME */
+ usb_serial_generic_close(port);
+
/* wgg - do I need this? I think so. */
usb_kill_urb(port->interrupt_in_urb);
- dev_info(&port->serial->dev->dev,
- "port stats: %ld bytes in, %ld bytes out\n",
- priv->bytes_in, priv->bytes_out);
-} /* klsi_105_close */
-
+}
/* We need to write a complete 64-byte data block and encode the
* number actually sent in the first double-byte, LSB-order. That
* leaves at most 62 bytes of payload.
*/
-#define KLSI_105_DATA_OFFSET 2 /* in the bulk urb data block */
-
-
-static int klsi_105_write(struct tty_struct *tty,
- struct usb_serial_port *port, const unsigned char *buf, int count)
+#define KLSI_HDR_LEN 2
+static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
{
- struct klsi_105_private *priv = usb_get_serial_port_data(port);
- int result, size;
- int bytes_sent = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- while (count > 0) {
- /* try to find a free urb (write 0 bytes if none) */
- struct urb *urb = NULL;
- unsigned long flags;
- int i;
- /* since the pool is per-port we might not need
- the spin lock !? */
- spin_lock_irqsave(&priv->lock, flags);
- for (i = 0; i < NUM_URBS; i++) {
- if (priv->write_urb_pool[i]->status != -EINPROGRESS) {
- urb = priv->write_urb_pool[i];
- dbg("%s - using pool URB %d", __func__, i);
- break;
- }
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (urb == NULL) {
- dbg("%s - no more free urbs", __func__);
- goto exit;
- }
-
- if (urb->transfer_buffer == NULL) {
- urb->transfer_buffer =
- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC);
- if (urb->transfer_buffer == NULL) {
- dev_err(&port->dev,
- "%s - no more kernel memory...\n",
- __func__);
- goto exit;
- }
- }
-
- size = min(count, port->bulk_out_size - KLSI_105_DATA_OFFSET);
- size = min(size, URB_TRANSFER_BUFFER_SIZE -
- KLSI_105_DATA_OFFSET);
-
- memcpy(urb->transfer_buffer + KLSI_105_DATA_OFFSET, buf, size);
-
- /* write payload size into transfer buffer */
- ((__u8 *)urb->transfer_buffer)[0] = (__u8) (size & 0xFF);
- ((__u8 *)urb->transfer_buffer)[1] = (__u8) ((size & 0xFF00)>>8);
-
- /* set up our urb */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- urb->transfer_buffer,
- URB_TRANSFER_BUFFER_SIZE,
- klsi_105_write_bulk_callback,
- port);
-
- /* send the data out the bulk port */
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- goto exit;
- }
- buf += size;
- bytes_sent += size;
- count -= size;
- }
-exit:
- /* lockless, but it's for debug info only... */
- priv->bytes_out += bytes_sent;
-
- return bytes_sent; /* that's how much we wrote */
-} /* klsi_105_write */
-
-static void klsi_105_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero write bulk status received: %d", __func__,
- status);
- return;
- }
+ unsigned char *buf = dest;
+ int count;
- usb_serial_port_softint(port);
-} /* klsi_105_write_bulk_completion_callback */
+ count = kfifo_out_locked(&port->write_fifo, buf + KLSI_HDR_LEN, size,
+ &port->lock);
+ put_unaligned_le16(count, buf);
-
-/* return number of characters currently in the writing process */
-static int klsi_105_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- int chars = 0;
- int i;
- unsigned long flags;
- struct klsi_105_private *priv = usb_get_serial_port_data(port);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for (i = 0; i < NUM_URBS; ++i) {
- if (priv->write_urb_pool[i]->status == -EINPROGRESS)
- chars += URB_TRANSFER_BUFFER_SIZE;
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- dbg("%s - returns %d", __func__, chars);
- return chars;
+ return count + KLSI_HDR_LEN;
}
-static int klsi_105_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- unsigned long flags;
- int i;
- int room = 0;
- struct klsi_105_private *priv = usb_get_serial_port_data(port);
-
- spin_lock_irqsave(&priv->lock, flags);
- for (i = 0; i < NUM_URBS; ++i) {
- if (priv->write_urb_pool[i]->status != -EINPROGRESS)
- room += URB_TRANSFER_BUFFER_SIZE;
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- dbg("%s - returns %d", __func__, room);
- return room;
-}
-
-
-
-static void klsi_105_read_bulk_callback(struct urb *urb)
+/* The data received is preceded by a length double-byte in LSB-first order.
+ */
+static void klsi_105_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct klsi_105_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
- int rc;
- int status = urb->status;
+ struct tty_struct *tty;
+ unsigned len;
- dbg("%s - port %d", __func__, port->number);
+ /* empty urbs seem to happen, we ignore them */
+ if (!urb->actual_length)
+ return;
- /* The urb might have been killed. */
- if (status) {
- dbg("%s - nonzero read bulk status received: %d", __func__,
- status);
+ if (urb->actual_length <= KLSI_HDR_LEN) {
+ dbg("%s - malformed packet", __func__);
return;
}
- /* The data received is again preceded by a length double-byte in LSB-
- * first order (see klsi_105_write() )
- */
- if (urb->actual_length == 0) {
- /* empty urbs seem to happen, we ignore them */
- /* dbg("%s - emtpy URB", __func__); */
- ;
- } else if (urb->actual_length <= 2) {
- dbg("%s - size %d URB not understood", __func__,
- urb->actual_length);
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
- } else {
- int bytes_sent = ((__u8 *) data)[0] +
- ((unsigned int) ((__u8 *) data)[1] << 8);
- tty = tty_port_tty_get(&port->port);
- /* we should immediately resubmit the URB, before attempting
- * to pass the data on to the tty layer. But that needs locking
- * against re-entry an then mixed-up data because of
- * intermixed tty_flip_buffer_push()s
- * FIXME
- */
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
-
- if (bytes_sent + 2 > urb->actual_length) {
- dbg("%s - trying to read more data than available"
- " (%d vs. %d)", __func__,
- bytes_sent+2, urb->actual_length);
- /* cap at implied limit */
- bytes_sent = urb->actual_length - 2;
- }
-
- tty_insert_flip_string(tty, data + 2, bytes_sent);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
- /* again lockless, but debug info only */
- priv->bytes_in += bytes_sent;
+ len = get_unaligned_le16(data);
+ if (len > urb->actual_length - KLSI_HDR_LEN) {
+ dbg("%s - packet length mismatch", __func__);
+ len = urb->actual_length - KLSI_HDR_LEN;
}
- /* Continue trying to always read */
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- klsi_105_read_bulk_callback,
- port);
- rc = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (rc)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, rc);
-} /* klsi_105_read_bulk_callback */
+ tty_insert_flip_string(tty, data + KLSI_HDR_LEN, len);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+}
static void klsi_105_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
@@ -887,8 +616,7 @@ static void klsi_105_set_termios(struct tty_struct *tty,
klsi_105_chg_port_settings(port, cfg);
err:
kfree(cfg);
-} /* klsi_105_set_termios */
-
+}
#if 0
static void mct_u232_break_ctl(struct tty_struct *tty, int break_state)
@@ -906,7 +634,7 @@ static void mct_u232_break_ctl(struct tty_struct *tty, int break_state)
lcr |= MCT_U232_SET_BREAK;
mct_u232_set_line_ctrl(serial, lcr);
-} /* mct_u232_break_ctl */
+}
#endif
static int klsi_105_tiocmget(struct tty_struct *tty, struct file *file)
@@ -962,29 +690,6 @@ static int klsi_105_tiocmset(struct tty_struct *tty, struct file *file,
return retval;
}
-static void klsi_105_throttle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- dbg("%s - port %d", __func__, port->number);
- usb_kill_urb(port->read_urb);
-}
-
-static void klsi_105_unthrottle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- int result;
-
- dbg("%s - port %d", __func__, port->number);
-
- port->read_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
-}
-
-
static int __init klsi_105_init(void)
{
@@ -1005,7 +710,6 @@ failed_usb_serial_register:
return retval;
}
-
static void __exit klsi_105_exit(void)
{
usb_deregister(&kl5kusb105d_driver);
@@ -1023,5 +727,3 @@ MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "enable extensive debugging messages");
-
-/* vim: set sts=8 ts=8 sw=8: */
diff --git a/drivers/usb/serial/kl5kusb105.h b/drivers/usb/serial/kl5kusb105.h
index 1231d9e..22a90ba 100644
--- a/drivers/usb/serial/kl5kusb105.h
+++ b/drivers/usb/serial/kl5kusb105.h
@@ -17,16 +17,16 @@
/* baud rates */
enum {
- kl5kusb105a_sio_b115200 = 0,
- kl5kusb105a_sio_b57600 = 1,
- kl5kusb105a_sio_b38400 = 2,
- kl5kusb105a_sio_b19200 = 4,
- kl5kusb105a_sio_b14400 = 5,
- kl5kusb105a_sio_b9600 = 6,
- kl5kusb105a_sio_b4800 = 8, /* unchecked */
- kl5kusb105a_sio_b2400 = 9, /* unchecked */
- kl5kusb105a_sio_b1200 = 0xa, /* unchecked */
- kl5kusb105a_sio_b600 = 0xb /* unchecked */
+ kl5kusb105a_sio_b115200 = 0,
+ kl5kusb105a_sio_b57600 = 1,
+ kl5kusb105a_sio_b38400 = 2,
+ kl5kusb105a_sio_b19200 = 4,
+ kl5kusb105a_sio_b14400 = 5,
+ kl5kusb105a_sio_b9600 = 6,
+ kl5kusb105a_sio_b4800 = 8, /* unchecked */
+ kl5kusb105a_sio_b2400 = 9, /* unchecked */
+ kl5kusb105a_sio_b1200 = 0xa, /* unchecked */
+ kl5kusb105a_sio_b600 = 0xb /* unchecked */
};
/* data bits */
@@ -53,17 +53,16 @@ enum {
#define KL5KUSB105A_CTS ((1<<5) | (1<<4))
#define KL5KUSB105A_WANTS_TO_SEND 0x30
-//#define KL5KUSB105A_DTR /* Data Terminal Ready */
-//#define KL5KUSB105A_CTS /* Clear To Send */
-//#define KL5KUSB105A_CD /* Carrier Detect */
-//#define KL5KUSB105A_DSR /* Data Set Ready */
-//#define KL5KUSB105A_RxD /* Receive pin */
-
-//#define KL5KUSB105A_LE
-//#define KL5KUSB105A_RTS
-//#define KL5KUSB105A_ST
-//#define KL5KUSB105A_SR
-//#define KL5KUSB105A_RI /* Ring Indicator */
-
-/* vim: set ts=8 sts=8: */
-
+#if 0
+#define KL5KUSB105A_DTR /* Data Terminal Ready */
+#define KL5KUSB105A_CTS /* Clear To Send */
+#define KL5KUSB105A_CD /* Carrier Detect */
+#define KL5KUSB105A_DSR /* Data Set Ready */
+#define KL5KUSB105A_RxD /* Receive pin */
+
+#define KL5KUSB105A_LE
+#define KL5KUSB105A_RTS
+#define KL5KUSB105A_ST
+#define KL5KUSB105A_SR
+#define KL5KUSB105A_RI /* Ring Indicator */
+#endif
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index c113a2a..bd5bd85 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -345,7 +345,8 @@ static void kobil_close(struct usb_serial_port *port)
/* FIXME: Add rts/dtr methods */
if (port->write_urb) {
- usb_kill_urb(port->write_urb);
+ usb_poison_urb(port->write_urb);
+ kfree(port->write_urb->transfer_buffer);
usb_free_urb(port->write_urb);
port->write_urb = NULL;
}
diff --git a/drivers/usb/serial/kobil_sct.h b/drivers/usb/serial/kobil_sct.h
index a51fbb5..be207f7 100644
--- a/drivers/usb/serial/kobil_sct.h
+++ b/drivers/usb/serial/kobil_sct.h
@@ -23,38 +23,55 @@
#define SUSBCR_SSL_SETDTR 0x0004
#define SUSBCR_SSL_CLRDTR 0x0010
-#define SUSBCR_SSL_PURGE_TXABORT 0x0100 // Kill the pending/current writes to the comm port.
-#define SUSBCR_SSL_PURGE_RXABORT 0x0200 // Kill the pending/current reads to the comm port.
-#define SUSBCR_SSL_PURGE_TXCLEAR 0x0400 // Kill the transmit queue if there.
-#define SUSBCR_SSL_PURGE_RXCLEAR 0x0800 // Kill the typeahead buffer if there.
+/* Kill the pending/current writes to the comm port. */
+#define SUSBCR_SSL_PURGE_TXABORT 0x0100
+/* Kill the pending/current reads to the comm port. */
+#define SUSBCR_SSL_PURGE_RXABORT 0x0200
+/* Kill the transmit queue if there. */
+#define SUSBCR_SSL_PURGE_TXCLEAR 0x0400
+/* Kill the typeahead buffer if there. */
+#define SUSBCR_SSL_PURGE_RXCLEAR 0x0800
#define SUSBCRequest_GetStatusLineState 4
-#define SUSBCR_GSL_RXCHAR 0x0001 // Any Character received
-#define SUSBCR_GSL_TXEMPTY 0x0004 // Transmitt Queue Empty
-#define SUSBCR_GSL_CTS 0x0008 // CTS changed state
-#define SUSBCR_GSL_DSR 0x0010 // DSR changed state
-#define SUSBCR_GSL_RLSD 0x0020 // RLSD changed state
-#define SUSBCR_GSL_BREAK 0x0040 // BREAK received
-#define SUSBCR_GSL_ERR 0x0080 // Line status error occurred
-#define SUSBCR_GSL_RING 0x0100 // Ring signal detected
+/* Any Character received */
+#define SUSBCR_GSL_RXCHAR 0x0001
+/* Transmitt Queue Empty */
+#define SUSBCR_GSL_TXEMPTY 0x0004
+/* CTS changed state */
+#define SUSBCR_GSL_CTS 0x0008
+/* DSR changed state */
+#define SUSBCR_GSL_DSR 0x0010
+/* RLSD changed state */
+#define SUSBCR_GSL_RLSD 0x0020
+/* BREAK received */
+#define SUSBCR_GSL_BREAK 0x0040
+/* Line status error occurred */
+#define SUSBCR_GSL_ERR 0x0080
+/* Ring signal detected */
+#define SUSBCR_GSL_RING 0x0100
#define SUSBCRequest_Misc 8
-#define SUSBCR_MSC_ResetReader 0x0001 // use a predefined reset sequence
-#define SUSBCR_MSC_ResetAllQueues 0x0002 // use a predefined sequence to reset the internal queues
+/* use a predefined reset sequence */
+#define SUSBCR_MSC_ResetReader 0x0001
+/* use a predefined sequence to reset the internal queues */
+#define SUSBCR_MSC_ResetAllQueues 0x0002
#define SUSBCRequest_GetMisc 0x10
-#define SUSBCR_MSC_GetFWVersion 0x0001 /* get the firmware version from device,
- coded like this 0xHHLLBBPP
- with HH = Firmware Version High Byte
- LL = Firmware Version Low Byte
- BB = Build Number
- PP = Further Attributes
- */
-
-#define SUSBCR_MSC_GetHWVersion 0x0002 /* get the hardware version from device
- coded like this 0xHHLLPPRR
- with HH = Software Version High Byte
- LL = Software Version Low Byte
- PP = Further Attributes
- RR = Reserved for the hardware ID
- */
+
+/*
+ * get the firmware version from device, coded like this 0xHHLLBBPP with
+ * HH = Firmware Version High Byte
+ * LL = Firmware Version Low Byte
+ * BB = Build Number
+ * PP = Further Attributes
+ */
+#define SUSBCR_MSC_GetFWVersion 0x0001
+
+/*
+ * get the hardware version from device coded like this 0xHHLLPPRR with
+ * HH = Software Version High Byte
+ * LL = Software Version Low Byte
+ * PP = Further Attributes
+ * RR = Reserved for the hardware ID
+ */
+#define SUSBCR_MSC_GetHWVersion 0x0002
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 2849f8c..7aa01b9 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -549,12 +549,9 @@ static void mct_u232_close(struct usb_serial_port *port)
{
dbg("%s port %d", __func__, port->number);
- if (port->serial->dev) {
- /* shutdown our urbs */
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
+ if (port->serial->dev)
usb_kill_urb(port->interrupt_in_urb);
- }
} /* mct_u232_close */
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 7417d5c..3a3f5e6 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -42,36 +42,44 @@
#define MCT_U232_SET_REQUEST_TYPE 0x40
#define MCT_U232_GET_REQUEST_TYPE 0xc0
-#define MCT_U232_GET_MODEM_STAT_REQUEST 2 /* Get Modem Status Register (MSR) */
-#define MCT_U232_GET_MODEM_STAT_SIZE 1
+/* Get Modem Status Register (MSR) */
+#define MCT_U232_GET_MODEM_STAT_REQUEST 2
+#define MCT_U232_GET_MODEM_STAT_SIZE 1
-#define MCT_U232_GET_LINE_CTRL_REQUEST 6 /* Get Line Control Register (LCR) */
-#define MCT_U232_GET_LINE_CTRL_SIZE 1 /* ... not used by this driver */
+/* Get Line Control Register (LCR) */
+/* ... not used by this driver */
+#define MCT_U232_GET_LINE_CTRL_REQUEST 6
+#define MCT_U232_GET_LINE_CTRL_SIZE 1
-#define MCT_U232_SET_BAUD_RATE_REQUEST 5 /* Set Baud Rate Divisor */
-#define MCT_U232_SET_BAUD_RATE_SIZE 4
+/* Set Baud Rate Divisor */
+#define MCT_U232_SET_BAUD_RATE_REQUEST 5
+#define MCT_U232_SET_BAUD_RATE_SIZE 4
-#define MCT_U232_SET_LINE_CTRL_REQUEST 7 /* Set Line Control Register (LCR) */
-#define MCT_U232_SET_LINE_CTRL_SIZE 1
+/* Set Line Control Register (LCR) */
+#define MCT_U232_SET_LINE_CTRL_REQUEST 7
+#define MCT_U232_SET_LINE_CTRL_SIZE 1
-#define MCT_U232_SET_MODEM_CTRL_REQUEST 10 /* Set Modem Control Register (MCR) */
-#define MCT_U232_SET_MODEM_CTRL_SIZE 1
+/* Set Modem Control Register (MCR) */
+#define MCT_U232_SET_MODEM_CTRL_REQUEST 10
+#define MCT_U232_SET_MODEM_CTRL_SIZE 1
-/* This USB device request code is not well understood. It is transmitted by
- the MCT-supplied Windows driver whenever the baud rate changes.
-*/
-#define MCT_U232_SET_UNKNOWN1_REQUEST 11 /* Unknown functionality */
-#define MCT_U232_SET_UNKNOWN1_SIZE 1
+/*
+ * This USB device request code is not well understood. It is transmitted by
+ * the MCT-supplied Windows driver whenever the baud rate changes.
+ */
+#define MCT_U232_SET_UNKNOWN1_REQUEST 11 /* Unknown functionality */
+#define MCT_U232_SET_UNKNOWN1_SIZE 1
-/* This USB device request code appears to control whether CTS is required
- during transmission.
-
- Sending a zero byte allows data transmission to a device which is not
- asserting CTS. Sending a '1' byte will cause transmission to be deferred
- until the device asserts CTS.
-*/
-#define MCT_U232_SET_CTS_REQUEST 12
-#define MCT_U232_SET_CTS_SIZE 1
+/*
+ * This USB device request code appears to control whether CTS is required
+ * during transmission.
+ *
+ * Sending a zero byte allows data transmission to a device which is not
+ * asserting CTS. Sending a '1' byte will cause transmission to be deferred
+ * until the device asserts CTS.
+ */
+#define MCT_U232_SET_CTS_REQUEST 12
+#define MCT_U232_SET_CTS_SIZE 1
#define MCT_U232_MAX_SIZE 4 /* of MCT_XXX_SIZE */
@@ -81,7 +89,8 @@
* and "Intel solution". They are the regular MCT and "Sitecom" for us.
* This is pointless to document in the header, see the code for the bits.
*/
-static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value, speed_t *result);
+static int mct_u232_calculate_baud_rate(struct usb_serial *serial,
+ speed_t value, speed_t *result);
/*
* Line Control Register (LCR)
@@ -125,16 +134,16 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
/*
* Line Status Register (LSR)
*/
-#define MCT_U232_LSR_INDEX 1 /* data[index] */
-#define MCT_U232_LSR_ERR 0x80 /* OE | PE | FE | BI */
-#define MCT_U232_LSR_TEMT 0x40 /* transmit register empty */
-#define MCT_U232_LSR_THRE 0x20 /* transmit holding register empty */
-#define MCT_U232_LSR_BI 0x10 /* break indicator */
-#define MCT_U232_LSR_FE 0x08 /* framing error */
-#define MCT_U232_LSR_OE 0x02 /* overrun error */
-#define MCT_U232_LSR_PE 0x04 /* parity error */
-#define MCT_U232_LSR_OE 0x02 /* overrun error */
-#define MCT_U232_LSR_DR 0x01 /* receive data ready */
+#define MCT_U232_LSR_INDEX 1 /* data[index] */
+#define MCT_U232_LSR_ERR 0x80 /* OE | PE | FE | BI */
+#define MCT_U232_LSR_TEMT 0x40 /* transmit register empty */
+#define MCT_U232_LSR_THRE 0x20 /* transmit holding register empty */
+#define MCT_U232_LSR_BI 0x10 /* break indicator */
+#define MCT_U232_LSR_FE 0x08 /* framing error */
+#define MCT_U232_LSR_OE 0x02 /* overrun error */
+#define MCT_U232_LSR_PE 0x04 /* parity error */
+#define MCT_U232_LSR_OE 0x02 /* overrun error */
+#define MCT_U232_LSR_DR 0x01 /* receive data ready */
/* -----------------------------------------------------------------------------
@@ -143,10 +152,10 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
*
* The technical details of the device have been acquired be using "SniffUSB"
* and the vendor-supplied device driver (version 2.3A) under Windows98. To
- * identify the USB vendor-specific requests and to assign them to terminal
+ * identify the USB vendor-specific requests and to assign them to terminal
* settings (flow control, baud rate, etc.) the program "SerialSettings" from
* William G. Greathouse has been proven to be very useful. I also used the
- * Win98 "HyperTerminal" and "usb-robot" on Linux for testing. The results and
+ * Win98 "HyperTerminal" and "usb-robot" on Linux for testing. The results and
* observations are summarized below:
*
* The USB requests seem to be directly mapped to the registers of a 8250,
@@ -186,33 +195,33 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* Data: LCR (see below)
*
* Bit 7: Divisor Latch Access Bit (DLAB). When set, access to the data
- * transmit/receive register (THR/RBR) and the Interrupt Enable Register
- * (IER) is disabled. Any access to these ports is now redirected to the
- * Divisor Latch Registers. Setting this bit, loading the Divisor
- * Registers, and clearing DLAB should be done with interrupts disabled.
+ * transmit/receive register (THR/RBR) and the Interrupt Enable Register
+ * (IER) is disabled. Any access to these ports is now redirected to the
+ * Divisor Latch Registers. Setting this bit, loading the Divisor
+ * Registers, and clearing DLAB should be done with interrupts disabled.
* Bit 6: Set Break. When set to "1", the transmitter begins to transmit
- * continuous Spacing until this bit is set to "0". This overrides any
- * bits of characters that are being transmitted.
+ * continuous Spacing until this bit is set to "0". This overrides any
+ * bits of characters that are being transmitted.
* Bit 5: Stick Parity. When parity is enabled, setting this bit causes parity
- * to always be "1" or "0", based on the value of Bit 4.
+ * to always be "1" or "0", based on the value of Bit 4.
* Bit 4: Even Parity Select (EPS). When parity is enabled and Bit 5 is "0",
- * setting this bit causes even parity to be transmitted and expected.
- * Otherwise, odd parity is used.
+ * setting this bit causes even parity to be transmitted and expected.
+ * Otherwise, odd parity is used.
* Bit 3: Parity Enable (PEN). When set to "1", a parity bit is inserted
- * between the last bit of the data and the Stop Bit. The UART will also
- * expect parity to be present in the received data.
+ * between the last bit of the data and the Stop Bit. The UART will also
+ * expect parity to be present in the received data.
* Bit 2: Number of Stop Bits (STB). If set to "1" and using 5-bit data words,
- * 1.5 Stop Bits are transmitted and expected in each data word. For
- * 6, 7 and 8-bit data words, 2 Stop Bits are transmitted and expected.
- * When this bit is set to "0", one Stop Bit is used on each data word.
+ * 1.5 Stop Bits are transmitted and expected in each data word. For
+ * 6, 7 and 8-bit data words, 2 Stop Bits are transmitted and expected.
+ * When this bit is set to "0", one Stop Bit is used on each data word.
* Bit 1: Word Length Select Bit #1 (WLSB1)
* Bit 0: Word Length Select Bit #0 (WLSB0)
- * Together these bits specify the number of bits in each data word.
- * 1 0 Word Length
- * 0 0 5 Data Bits
- * 0 1 6 Data Bits
- * 1 0 7 Data Bits
- * 1 1 8 Data Bits
+ * Together these bits specify the number of bits in each data word.
+ * 1 0 Word Length
+ * 0 0 5 Data Bits
+ * 0 1 6 Data Bits
+ * 1 0 7 Data Bits
+ * 1 1 8 Data Bits
*
* SniffUSB observations: Bit 7 seems not to be used. There seem to be two bugs
* in the Win98 driver: the break does not work (bit 6 is not asserted) and the
@@ -234,20 +243,20 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* Bit 6: Reserved, always 0.
* Bit 5: Reserved, always 0.
* Bit 4: Loop-Back Enable. When set to "1", the UART transmitter and receiver
- * are internally connected together to allow diagnostic operations. In
- * addition, the UART modem control outputs are connected to the UART
- * modem control inputs. CTS is connected to RTS, DTR is connected to
- * DSR, OUT1 is connected to RI, and OUT 2 is connected to DCD.
+ * are internally connected together to allow diagnostic operations. In
+ * addition, the UART modem control outputs are connected to the UART
+ * modem control inputs. CTS is connected to RTS, DTR is connected to
+ * DSR, OUT1 is connected to RI, and OUT 2 is connected to DCD.
* Bit 3: OUT 2. An auxiliary output that the host processor may set high or
- * low. In the IBM PC serial adapter (and most clones), OUT 2 is used
- * to tri-state (disable) the interrupt signal from the
- * 8250/16450/16550 UART.
+ * low. In the IBM PC serial adapter (and most clones), OUT 2 is used
+ * to tri-state (disable) the interrupt signal from the
+ * 8250/16450/16550 UART.
* Bit 2: OUT 1. An auxiliary output that the host processor may set high or
- * low. This output is not used on the IBM PC serial adapter.
+ * low. This output is not used on the IBM PC serial adapter.
* Bit 1: Request to Send (RTS). When set to "1", the output of the UART -RTS
- * line is Low (Active).
+ * line is Low (Active).
* Bit 0: Data Terminal Ready (DTR). When set to "1", the output of the UART
- * -DTR line is Low (Active).
+ * -DTR line is Low (Active).
*
* SniffUSB observations: Bit 2 and 4 seem not to be used but bit 3 has been
* seen _always_ set.
@@ -264,22 +273,22 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* Data: MSR (see below)
*
* Bit 7: Data Carrier Detect (CD). Reflects the state of the DCD line on the
- * UART.
+ * UART.
* Bit 6: Ring Indicator (RI). Reflects the state of the RI line on the UART.
* Bit 5: Data Set Ready (DSR). Reflects the state of the DSR line on the UART.
* Bit 4: Clear To Send (CTS). Reflects the state of the CTS line on the UART.
* Bit 3: Delta Data Carrier Detect (DDCD). Set to "1" if the -DCD line has
- * changed state one more more times since the last time the MSR was
- * read by the host.
+ * changed state one more more times since the last time the MSR was
+ * read by the host.
* Bit 2: Trailing Edge Ring Indicator (TERI). Set to "1" if the -RI line has
- * had a low to high transition since the last time the MSR was read by
- * the host.
+ * had a low to high transition since the last time the MSR was read by
+ * the host.
* Bit 1: Delta Data Set Ready (DDSR). Set to "1" if the -DSR line has changed
- * state one more more times since the last time the MSR was read by the
- * host.
+ * state one more more times since the last time the MSR was read by the
+ * host.
* Bit 0: Delta Clear To Send (DCTS). Set to "1" if the -CTS line has changed
- * state one more times since the last time the MSR was read by the
- * host.
+ * state one more times since the last time the MSR was read by the
+ * host.
*
* SniffUSB observations: the MSR is also returned as first byte on the
* interrupt-in endpoint 0x83 to signal changes of modem status lines. The USB
@@ -290,31 +299,34 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* --------------------------
*
* Bit 7 Error in Receiver FIFO. On the 8250/16450 UART, this bit is zero.
- * This bit is set to "1" when any of the bytes in the FIFO have one or
- * more of the following error conditions: PE, FE, or BI.
+ * This bit is set to "1" when any of the bytes in the FIFO have one
+ * or more of the following error conditions: PE, FE, or BI.
* Bit 6 Transmitter Empty (TEMT). When set to "1", there are no words
- * remaining in the transmit FIFO or the transmit shift register. The
- * transmitter is completely idle.
- * Bit 5 Transmitter Holding Register Empty (THRE). When set to "1", the FIFO
- * (or holding register) now has room for at least one additional word
- * to transmit. The transmitter may still be transmitting when this bit
- * is set to "1".
+ * remaining in the transmit FIFO or the transmit shift register. The
+ * transmitter is completely idle.
+ * Bit 5 Transmitter Holding Register Empty (THRE). When set to "1", the
+ * FIFO (or holding register) now has room for at least one additional
+ * word to transmit. The transmitter may still be transmitting when
+ * this bit is set to "1".
* Bit 4 Break Interrupt (BI). The receiver has detected a Break signal.
- * Bit 3 Framing Error (FE). A Start Bit was detected but the Stop Bit did not
- * appear at the expected time. The received word is probably garbled.
- * Bit 2 Parity Error (PE). The parity bit was incorrect for the word received.
- * Bit 1 Overrun Error (OE). A new word was received and there was no room in
- * the receive buffer. The newly-arrived word in the shift register is
- * discarded. On 8250/16450 UARTs, the word in the holding register is
- * discarded and the newly- arrived word is put in the holding register.
+ * Bit 3 Framing Error (FE). A Start Bit was detected but the Stop Bit did
+ * not appear at the expected time. The received word is probably
+ * garbled.
+ * Bit 2 Parity Error (PE). The parity bit was incorrect for the word
+ * received.
+ * Bit 1 Overrun Error (OE). A new word was received and there was no room
+ * in the receive buffer. The newly-arrived word in the shift register
+ * is discarded. On 8250/16450 UARTs, the word in the holding register
+ * is discarded and the newly- arrived word is put in the holding
+ * register.
* Bit 0 Data Ready (DR). One or more words are in the receive FIFO that the
- * host may read. A word must be completely received and moved from the
- * shift register into the FIFO (or holding register for 8250/16450
- * designs) before this bit is set.
+ * host may read. A word must be completely received and moved from
+ * the shift register into the FIFO (or holding register for
+ * 8250/16450 designs) before this bit is set.
*
- * SniffUSB observations: the LSR is returned as second byte on the interrupt-in
- * endpoint 0x83 to signal error conditions. Such errors have been seen with
- * minicom/zmodem transfers (CRC errors).
+ * SniffUSB observations: the LSR is returned as second byte on the
+ * interrupt-in endpoint 0x83 to signal error conditions. Such errors have
+ * been seen with minicom/zmodem transfers (CRC errors).
*
*
* Unknown #1
@@ -364,16 +376,16 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* --------------
*
* SniffUSB observations: the bulk-out endpoint 0x1 and interrupt-in endpoint
- * 0x81 is used to transmit and receive characters. The second interrupt-in
- * endpoint 0x83 signals exceptional conditions like modem line changes and
+ * 0x81 is used to transmit and receive characters. The second interrupt-in
+ * endpoint 0x83 signals exceptional conditions like modem line changes and
* errors. The first byte returned is the MSR and the second byte the LSR.
*
*
* Other observations
* ------------------
*
- * Queued bulk transfers like used in visor.c did not work.
- *
+ * Queued bulk transfers like used in visor.c did not work.
+ *
*
* Properties of the USB device used (as found in /var/log/messages)
* -----------------------------------------------------------------
@@ -411,26 +423,26 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* bInterface Class:SubClass:Protocol = 00:00:00
* iInterface = 00
* Endpoint:
- * bLength = 7
- * bDescriptorType = 05
- * bEndpointAddress = 81 (in)
- * bmAttributes = 03 (Interrupt)
- * wMaxPacketSize = 0040
- * bInterval = 02
+ * bLength = 7
+ * bDescriptorType = 05
+ * bEndpointAddress = 81 (in)
+ * bmAttributes = 03 (Interrupt)
+ * wMaxPacketSize = 0040
+ * bInterval = 02
* Endpoint:
- * bLength = 7
- * bDescriptorType = 05
- * bEndpointAddress = 01 (out)
- * bmAttributes = 02 (Bulk)
- * wMaxPacketSize = 0040
- * bInterval = 00
+ * bLength = 7
+ * bDescriptorType = 05
+ * bEndpointAddress = 01 (out)
+ * bmAttributes = 02 (Bulk)
+ * wMaxPacketSize = 0040
+ * bInterval = 00
* Endpoint:
- * bLength = 7
- * bDescriptorType = 05
- * bEndpointAddress = 83 (in)
- * bmAttributes = 03 (Interrupt)
- * wMaxPacketSize = 0002
- * bInterval = 02
+ * bLength = 7
+ * bDescriptorType = 05
+ * bEndpointAddress = 83 (in)
+ * bmAttributes = 03 (Interrupt)
+ * wMaxPacketSize = 0002
+ * bInterval = 02
*
*
* Hardware details (added by Martin Hamilton, 2001/12/06)
@@ -440,7 +452,7 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* adaptor, which turns out to simply be a re-badged U232-P9. We
* know this because there is a sticky label on the circuit board
* which says "U232-P9" ;-)
- *
+ *
* The circuit board inside the adaptor contains a Philips PDIUSBD12
* USB endpoint chip and a Philips P87C52UBAA microcontroller with
* embedded UART. Exhaustive documentation for these is available at:
@@ -449,7 +461,7 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* http://www.semiconductors.philips.com/pip/pdiusbd12
*
* Thanks to Julian Highfield for the pointer to the Philips database.
- *
+ *
*/
#endif /* __LINUX_USB_SERIAL_MCT_U232_H */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 0d47f2c..30922a7 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -34,21 +34,18 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-
+#include <linux/parport.h>
/*
* Version Information
*/
-#define DRIVER_VERSION "1.0.0.4F"
+#define DRIVER_VERSION "2.1"
#define DRIVER_AUTHOR "Aspire Communications pvt Ltd."
#define DRIVER_DESC "Moschip USB Serial Driver"
/* default urb timeout */
#define MOS_WDR_TIMEOUT (HZ * 5)
-#define MOS_PORT1 0x0200
-#define MOS_PORT2 0x0300
-#define MOS_VENREG 0x0000
#define MOS_MAX_PORT 0x02
#define MOS_WRITE 0x0E
#define MOS_READ 0x0D
@@ -63,7 +60,7 @@
#define NUM_URBS 16 /* URB Count */
#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
-/* This structure holds all of the local port information */
+/* This structure holds all of the local serial port information */
struct moschip_port {
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
@@ -74,11 +71,6 @@ struct moschip_port {
struct urb *write_urb_pool[NUM_URBS];
};
-/* This structure holds all of the individual serial device information */
-struct moschip_serial {
- int interrupt_started;
-};
-
static int debug;
static struct usb_serial_driver moschip7720_2port_driver;
@@ -94,6 +86,658 @@ static const struct usb_device_id moschip_port_id_table[] = {
};
MODULE_DEVICE_TABLE(usb, moschip_port_id_table);
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+
+/* initial values for parport regs */
+#define DCR_INIT_VAL 0x0c /* SLCTIN, nINIT */
+#define ECR_INIT_VAL 0x00 /* SPP mode */
+
+struct urbtracker {
+ struct mos7715_parport *mos_parport;
+ struct list_head urblist_entry;
+ struct kref ref_count;
+ struct urb *urb;
+};
+
+enum mos7715_pp_modes {
+ SPP = 0<<5,
+ PS2 = 1<<5, /* moschip calls this 'NIBBLE' mode */
+ PPF = 2<<5, /* moschip calls this 'CB-FIFO mode */
+};
+
+struct mos7715_parport {
+ struct parport *pp; /* back to containing struct */
+ struct kref ref_count; /* to instance of this struct */
+ struct list_head deferred_urbs; /* list deferred async urbs */
+ struct list_head active_urbs; /* list async urbs in flight */
+ spinlock_t listlock; /* protects list access */
+ bool msg_pending; /* usb sync call pending */
+ struct completion syncmsg_compl; /* usb sync call completed */
+ struct tasklet_struct urb_tasklet; /* for sending deferred urbs */
+ struct usb_serial *serial; /* back to containing struct */
+ __u8 shadowECR; /* parallel port regs... */
+ __u8 shadowDCR;
+ atomic_t shadowDSR; /* updated in int-in callback */
+};
+
+/* lock guards against dereferencing NULL ptr in parport ops callbacks */
+static DEFINE_SPINLOCK(release_lock);
+
+#endif /* CONFIG_USB_SERIAL_MOS7715_PARPORT */
+
+static const unsigned int dummy; /* for clarity in register access fns */
+
+enum mos_regs {
+ THR, /* serial port regs */
+ RHR,
+ IER,
+ FCR,
+ ISR,
+ LCR,
+ MCR,
+ LSR,
+ MSR,
+ SPR,
+ DLL,
+ DLM,
+ DPR, /* parallel port regs */
+ DSR,
+ DCR,
+ ECR,
+ SP1_REG, /* device control regs */
+ SP2_REG, /* serial port 2 (7720 only) */
+ PP_REG,
+ SP_CONTROL_REG,
+};
+
+/*
+ * Return the correct value for the Windex field of the setup packet
+ * for a control endpoint message. See the 7715 datasheet.
+ */
+static inline __u16 get_reg_index(enum mos_regs reg)
+{
+ static const __u16 mos7715_index_lookup_table[] = {
+ 0x00, /* THR */
+ 0x00, /* RHR */
+ 0x01, /* IER */
+ 0x02, /* FCR */
+ 0x02, /* ISR */
+ 0x03, /* LCR */
+ 0x04, /* MCR */
+ 0x05, /* LSR */
+ 0x06, /* MSR */
+ 0x07, /* SPR */
+ 0x00, /* DLL */
+ 0x01, /* DLM */
+ 0x00, /* DPR */
+ 0x01, /* DSR */
+ 0x02, /* DCR */
+ 0x0a, /* ECR */
+ 0x01, /* SP1_REG */
+ 0x02, /* SP2_REG (7720 only) */
+ 0x04, /* PP_REG (7715 only) */
+ 0x08, /* SP_CONTROL_REG */
+ };
+ return mos7715_index_lookup_table[reg];
+}
+
+/*
+ * Return the correct value for the upper byte of the Wvalue field of
+ * the setup packet for a control endpoint message.
+ */
+static inline __u16 get_reg_value(enum mos_regs reg,
+ unsigned int serial_portnum)
+{
+ if (reg >= SP1_REG) /* control reg */
+ return 0x0000;
+
+ else if (reg >= DPR) /* parallel port reg (7715 only) */
+ return 0x0100;
+
+ else /* serial port reg */
+ return (serial_portnum + 2) << 8;
+}
+
+/*
+ * Write data byte to the specified device register. The data is embedded in
+ * the value field of the setup packet. serial_portnum is ignored for registers
+ * not specific to a particular serial port.
+ */
+static int write_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
+ enum mos_regs reg, __u8 data)
+{
+ struct usb_device *usbdev = serial->dev;
+ unsigned int pipe = usb_sndctrlpipe(usbdev, 0);
+ __u8 request = (__u8)0x0e;
+ __u8 requesttype = (__u8)0x40;
+ __u16 index = get_reg_index(reg);
+ __u16 value = get_reg_value(reg, serial_portnum) + data;
+ int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+ index, NULL, 0, MOS_WDR_TIMEOUT);
+ if (status < 0)
+ dev_err(&usbdev->dev,
+ "mos7720: usb_control_msg() failed: %d", status);
+ return status;
+}
+
+/*
+ * Read data byte from the specified device register. The data returned by the
+ * device is embedded in the value field of the setup packet. serial_portnum is
+ * ignored for registers that are not specific to a particular serial port.
+ */
+static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
+ enum mos_regs reg, __u8 *data)
+{
+ struct usb_device *usbdev = serial->dev;
+ unsigned int pipe = usb_rcvctrlpipe(usbdev, 0);
+ __u8 request = (__u8)0x0d;
+ __u8 requesttype = (__u8)0xc0;
+ __u16 index = get_reg_index(reg);
+ __u16 value = get_reg_value(reg, serial_portnum);
+ int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+ index, data, 1, MOS_WDR_TIMEOUT);
+ if (status < 0)
+ dev_err(&usbdev->dev,
+ "mos7720: usb_control_msg() failed: %d", status);
+ return status;
+}
+
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+
+static inline int mos7715_change_mode(struct mos7715_parport *mos_parport,
+ enum mos7715_pp_modes mode)
+{
+ mos_parport->shadowECR = mode;
+ write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR);
+ return 0;
+}
+
+static void destroy_mos_parport(struct kref *kref)
+{
+ struct mos7715_parport *mos_parport =
+ container_of(kref, struct mos7715_parport, ref_count);
+
+ dbg("%s called", __func__);
+ kfree(mos_parport);
+}
+
+static void destroy_urbtracker(struct kref *kref)
+{
+ struct urbtracker *urbtrack =
+ container_of(kref, struct urbtracker, ref_count);
+ struct mos7715_parport *mos_parport = urbtrack->mos_parport;
+ dbg("%s called", __func__);
+ usb_free_urb(urbtrack->urb);
+ kfree(urbtrack);
+ kref_put(&mos_parport->ref_count, destroy_mos_parport);
+}
+
+/*
+ * This runs as a tasklet when sending an urb in a non-blocking parallel
+ * port callback had to be deferred because the disconnect mutex could not be
+ * obtained at the time.
+ */
+static void send_deferred_urbs(unsigned long _mos_parport)
+{
+ int ret_val;
+ unsigned long flags;
+ struct mos7715_parport *mos_parport = (void *)_mos_parport;
+ struct urbtracker *urbtrack;
+ struct list_head *cursor, *next;
+
+ dbg("%s called", __func__);
+
+ /* if release function ran, game over */
+ if (unlikely(mos_parport->serial == NULL))
+ return;
+
+ /* try again to get the mutex */
+ if (!mutex_trylock(&mos_parport->serial->disc_mutex)) {
+ dbg("%s: rescheduling tasklet", __func__);
+ tasklet_schedule(&mos_parport->urb_tasklet);
+ return;
+ }
+
+ /* if device disconnected, game over */
+ if (unlikely(mos_parport->serial->disconnected)) {
+ mutex_unlock(&mos_parport->serial->disc_mutex);
+ return;
+ }
+
+ spin_lock_irqsave(&mos_parport->listlock, flags);
+ if (list_empty(&mos_parport->deferred_urbs)) {
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ mutex_unlock(&mos_parport->serial->disc_mutex);
+ dbg("%s: deferred_urbs list empty", __func__);
+ return;
+ }
+
+ /* move contents of deferred_urbs list to active_urbs list and submit */
+ list_for_each_safe(cursor, next, &mos_parport->deferred_urbs)
+ list_move_tail(cursor, &mos_parport->active_urbs);
+ list_for_each_entry(urbtrack, &mos_parport->active_urbs,
+ urblist_entry) {
+ ret_val = usb_submit_urb(urbtrack->urb, GFP_ATOMIC);
+ dbg("%s: urb submitted", __func__);
+ if (ret_val) {
+ dev_err(&mos_parport->serial->dev->dev,
+ "usb_submit_urb() failed: %d", ret_val);
+ list_del(&urbtrack->urblist_entry);
+ kref_put(&urbtrack->ref_count, destroy_urbtracker);
+ }
+ }
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ mutex_unlock(&mos_parport->serial->disc_mutex);
+}
+
+/* callback for parallel port control urbs submitted asynchronously */
+static void async_complete(struct urb *urb)
+{
+ struct urbtracker *urbtrack = urb->context;
+ int status = urb->status;
+ dbg("%s called", __func__);
+ if (unlikely(status))
+ dbg("%s - nonzero urb status received: %d", __func__, status);
+
+ /* remove the urbtracker from the active_urbs list */
+ spin_lock(&urbtrack->mos_parport->listlock);
+ list_del(&urbtrack->urblist_entry);
+ spin_unlock(&urbtrack->mos_parport->listlock);
+ kref_put(&urbtrack->ref_count, destroy_urbtracker);
+}
+
+static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ enum mos_regs reg, __u8 data)
+{
+ struct urbtracker *urbtrack;
+ int ret_val;
+ unsigned long flags;
+ struct usb_ctrlrequest setup;
+ struct usb_serial *serial = mos_parport->serial;
+ struct usb_device *usbdev = serial->dev;
+ dbg("%s called", __func__);
+
+ /* create and initialize the control urb and containing urbtracker */
+ urbtrack = kmalloc(sizeof(struct urbtracker), GFP_ATOMIC);
+ if (urbtrack == NULL) {
+ dev_err(&usbdev->dev, "out of memory");
+ return -ENOMEM;
+ }
+ kref_get(&mos_parport->ref_count);
+ urbtrack->mos_parport = mos_parport;
+ urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (urbtrack->urb == NULL) {
+ dev_err(&usbdev->dev, "out of urbs");
+ kfree(urbtrack);
+ return -ENOMEM;
+ }
+ setup.bRequestType = (__u8)0x40;
+ setup.bRequest = (__u8)0x0e;
+ setup.wValue = get_reg_value(reg, dummy);
+ setup.wIndex = get_reg_index(reg);
+ setup.wLength = 0;
+ usb_fill_control_urb(urbtrack->urb, usbdev,
+ usb_sndctrlpipe(usbdev, 0),
+ (unsigned char *)&setup,
+ NULL, 0, async_complete, urbtrack);
+ kref_init(&urbtrack->ref_count);
+ INIT_LIST_HEAD(&urbtrack->urblist_entry);
+
+ /*
+ * get the disconnect mutex, or add tracker to the deferred_urbs list
+ * and schedule a tasklet to try again later
+ */
+ if (!mutex_trylock(&serial->disc_mutex)) {
+ spin_lock_irqsave(&mos_parport->listlock, flags);
+ list_add_tail(&urbtrack->urblist_entry,
+ &mos_parport->deferred_urbs);
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ tasklet_schedule(&mos_parport->urb_tasklet);
+ dbg("tasklet scheduled");
+ return 0;
+ }
+
+ /* bail if device disconnected */
+ if (serial->disconnected) {
+ kref_put(&urbtrack->ref_count, destroy_urbtracker);
+ mutex_unlock(&serial->disc_mutex);
+ return -ENODEV;
+ }
+
+ /* add the tracker to the active_urbs list and submit */
+ spin_lock_irqsave(&mos_parport->listlock, flags);
+ list_add_tail(&urbtrack->urblist_entry, &mos_parport->active_urbs);
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ ret_val = usb_submit_urb(urbtrack->urb, GFP_ATOMIC);
+ mutex_unlock(&serial->disc_mutex);
+ if (ret_val) {
+ dev_err(&usbdev->dev,
+ "%s: submit_urb() failed: %d", __func__, ret_val);
+ spin_lock_irqsave(&mos_parport->listlock, flags);
+ list_del(&urbtrack->urblist_entry);
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ kref_put(&urbtrack->ref_count, destroy_urbtracker);
+ return ret_val;
+ }
+ return 0;
+}
+
+/*
+ * This is the the common top part of all parallel port callback operations that
+ * send synchronous messages to the device. This implements convoluted locking
+ * that avoids two scenarios: (1) a port operation is called after usbserial
+ * has called our release function, at which point struct mos7715_parport has
+ * been destroyed, and (2) the device has been disconnected, but usbserial has
+ * not called the release function yet because someone has a serial port open.
+ * The shared release_lock prevents the first, and the mutex and disconnected
+ * flag maintained by usbserial covers the second. We also use the msg_pending
+ * flag to ensure that all synchronous usb messgage calls have completed before
+ * our release function can return.
+ */
+static int parport_prologue(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport;
+
+ spin_lock(&release_lock);
+ mos_parport = pp->private_data;
+ if (unlikely(mos_parport == NULL)) {
+ /* release fn called, port struct destroyed */
+ spin_unlock(&release_lock);
+ return -1;
+ }
+ mos_parport->msg_pending = true; /* synch usb call pending */
+ INIT_COMPLETION(mos_parport->syncmsg_compl);
+ spin_unlock(&release_lock);
+
+ mutex_lock(&mos_parport->serial->disc_mutex);
+ if (mos_parport->serial->disconnected) {
+ /* device disconnected */
+ mutex_unlock(&mos_parport->serial->disc_mutex);
+ mos_parport->msg_pending = false;
+ complete(&mos_parport->syncmsg_compl);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * This is the the common bottom part of all parallel port functions that send
+ * synchronous messages to the device.
+ */
+static inline void parport_epilogue(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ mutex_unlock(&mos_parport->serial->disc_mutex);
+ mos_parport->msg_pending = false;
+ complete(&mos_parport->syncmsg_compl);
+}
+
+static void parport_mos7715_write_data(struct parport *pp, unsigned char d)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ dbg("%s called: %2.2x", __func__, d);
+ if (parport_prologue(pp) < 0)
+ return;
+ mos7715_change_mode(mos_parport, SPP);
+ write_mos_reg(mos_parport->serial, dummy, DPR, (__u8)d);
+ parport_epilogue(pp);
+}
+
+static unsigned char parport_mos7715_read_data(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ unsigned char d;
+ dbg("%s called", __func__);
+ if (parport_prologue(pp) < 0)
+ return 0;
+ read_mos_reg(mos_parport->serial, dummy, DPR, &d);
+ parport_epilogue(pp);
+ return d;
+}
+
+static void parport_mos7715_write_control(struct parport *pp, unsigned char d)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ __u8 data;
+ dbg("%s called: %2.2x", __func__, d);
+ if (parport_prologue(pp) < 0)
+ return;
+ data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0);
+ write_mos_reg(mos_parport->serial, dummy, DCR, data);
+ mos_parport->shadowDCR = data;
+ parport_epilogue(pp);
+}
+
+static unsigned char parport_mos7715_read_control(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ __u8 dcr;
+ dbg("%s called", __func__);
+ spin_lock(&release_lock);
+ mos_parport = pp->private_data;
+ if (unlikely(mos_parport == NULL)) {
+ spin_unlock(&release_lock);
+ return 0;
+ }
+ dcr = mos_parport->shadowDCR & 0x0f;
+ spin_unlock(&release_lock);
+ return dcr;
+}
+
+static unsigned char parport_mos7715_frob_control(struct parport *pp,
+ unsigned char mask,
+ unsigned char val)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ __u8 dcr;
+ dbg("%s called", __func__);
+ mask &= 0x0f;
+ val &= 0x0f;
+ if (parport_prologue(pp) < 0)
+ return 0;
+ mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val;
+ write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ dcr = mos_parport->shadowDCR & 0x0f;
+ parport_epilogue(pp);
+ return dcr;
+}
+
+static unsigned char parport_mos7715_read_status(struct parport *pp)
+{
+ unsigned char status;
+ struct mos7715_parport *mos_parport = pp->private_data;
+ dbg("%s called", __func__);
+ spin_lock(&release_lock);
+ mos_parport = pp->private_data;
+ if (unlikely(mos_parport == NULL)) { /* release called */
+ spin_unlock(&release_lock);
+ return 0;
+ }
+ status = atomic_read(&mos_parport->shadowDSR) & 0xf8;
+ spin_unlock(&release_lock);
+ return status;
+}
+
+static void parport_mos7715_enable_irq(struct parport *pp)
+{
+ dbg("%s called", __func__);
+}
+static void parport_mos7715_disable_irq(struct parport *pp)
+{
+ dbg("%s called", __func__);
+}
+
+static void parport_mos7715_data_forward(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ dbg("%s called", __func__);
+ if (parport_prologue(pp) < 0)
+ return;
+ mos7715_change_mode(mos_parport, PS2);
+ mos_parport->shadowDCR &= ~0x20;
+ write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ parport_epilogue(pp);
+}
+
+static void parport_mos7715_data_reverse(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ dbg("%s called", __func__);
+ if (parport_prologue(pp) < 0)
+ return;
+ mos7715_change_mode(mos_parport, PS2);
+ mos_parport->shadowDCR |= 0x20;
+ write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ parport_epilogue(pp);
+}
+
+static void parport_mos7715_init_state(struct pardevice *dev,
+ struct parport_state *s)
+{
+ dbg("%s called", __func__);
+ s->u.pc.ctr = DCR_INIT_VAL;
+ s->u.pc.ecr = ECR_INIT_VAL;
+}
+
+/* N.B. Parport core code requires that this function not block */
+static void parport_mos7715_save_state(struct parport *pp,
+ struct parport_state *s)
+{
+ struct mos7715_parport *mos_parport;
+ dbg("%s called", __func__);
+ spin_lock(&release_lock);
+ mos_parport = pp->private_data;
+ if (unlikely(mos_parport == NULL)) { /* release called */
+ spin_unlock(&release_lock);
+ return;
+ }
+ s->u.pc.ctr = mos_parport->shadowDCR;
+ s->u.pc.ecr = mos_parport->shadowECR;
+ spin_unlock(&release_lock);
+}
+
+/* N.B. Parport core code requires that this function not block */
+static void parport_mos7715_restore_state(struct parport *pp,
+ struct parport_state *s)
+{
+ struct mos7715_parport *mos_parport;
+ dbg("%s called", __func__);
+ spin_lock(&release_lock);
+ mos_parport = pp->private_data;
+ if (unlikely(mos_parport == NULL)) { /* release called */
+ spin_unlock(&release_lock);
+ return;
+ }
+ write_parport_reg_nonblock(mos_parport, DCR, mos_parport->shadowDCR);
+ write_parport_reg_nonblock(mos_parport, ECR, mos_parport->shadowECR);
+ spin_unlock(&release_lock);
+}
+
+static size_t parport_mos7715_write_compat(struct parport *pp,
+ const void *buffer,
+ size_t len, int flags)
+{
+ int retval;
+ struct mos7715_parport *mos_parport = pp->private_data;
+ int actual_len;
+ dbg("%s called: %u chars", __func__, (unsigned int)len);
+ if (parport_prologue(pp) < 0)
+ return 0;
+ mos7715_change_mode(mos_parport, PPF);
+ retval = usb_bulk_msg(mos_parport->serial->dev,
+ usb_sndbulkpipe(mos_parport->serial->dev, 2),
+ (void *)buffer, len, &actual_len,
+ MOS_WDR_TIMEOUT);
+ parport_epilogue(pp);
+ if (retval) {
+ dev_err(&mos_parport->serial->dev->dev,
+ "mos7720: usb_bulk_msg() failed: %d", retval);
+ return 0;
+ }
+ return actual_len;
+}
+
+static struct parport_operations parport_mos7715_ops = {
+ .owner = THIS_MODULE,
+ .write_data = parport_mos7715_write_data,
+ .read_data = parport_mos7715_read_data,
+
+ .write_control = parport_mos7715_write_control,
+ .read_control = parport_mos7715_read_control,
+ .frob_control = parport_mos7715_frob_control,
+
+ .read_status = parport_mos7715_read_status,
+
+ .enable_irq = parport_mos7715_enable_irq,
+ .disable_irq = parport_mos7715_disable_irq,
+
+ .data_forward = parport_mos7715_data_forward,
+ .data_reverse = parport_mos7715_data_reverse,
+
+ .init_state = parport_mos7715_init_state,
+ .save_state = parport_mos7715_save_state,
+ .restore_state = parport_mos7715_restore_state,
+
+ .compat_write_data = parport_mos7715_write_compat,
+
+ .nibble_read_data = parport_ieee1284_read_nibble,
+ .byte_read_data = parport_ieee1284_read_byte,
+};
+
+/*
+ * Allocate and initialize parallel port control struct, initialize
+ * the parallel port hardware device, and register with the parport subsystem.
+ */
+static int mos7715_parport_init(struct usb_serial *serial)
+{
+ struct mos7715_parport *mos_parport;
+
+ /* allocate and initialize parallel port control struct */
+ mos_parport = kzalloc(sizeof(struct mos7715_parport), GFP_KERNEL);
+ if (mos_parport == NULL) {
+ dbg("mos7715_parport_init: kzalloc failed");
+ return -ENOMEM;
+ }
+ mos_parport->msg_pending = false;
+ kref_init(&mos_parport->ref_count);
+ spin_lock_init(&mos_parport->listlock);
+ INIT_LIST_HEAD(&mos_parport->active_urbs);
+ INIT_LIST_HEAD(&mos_parport->deferred_urbs);
+ usb_set_serial_data(serial, mos_parport); /* hijack private pointer */
+ mos_parport->serial = serial;
+ tasklet_init(&mos_parport->urb_tasklet, send_deferred_urbs,
+ (unsigned long) mos_parport);
+ init_completion(&mos_parport->syncmsg_compl);
+
+ /* cycle parallel port reset bit */
+ write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x80);
+ write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x00);
+
+ /* initialize device registers */
+ mos_parport->shadowDCR = DCR_INIT_VAL;
+ write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ mos_parport->shadowECR = ECR_INIT_VAL;
+ write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR);
+
+ /* register with parport core */
+ mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE,
+ PARPORT_DMA_NONE,
+ &parport_mos7715_ops);
+ if (mos_parport->pp == NULL) {
+ dev_err(&serial->interface->dev,
+ "Could not register parport\n");
+ kref_put(&mos_parport->ref_count, destroy_mos_parport);
+ return -EIO;
+ }
+ mos_parport->pp->private_data = mos_parport;
+ mos_parport->pp->modes = PARPORT_MODE_COMPAT | PARPORT_MODE_PCSPP;
+ mos_parport->pp->dev = &serial->interface->dev;
+ parport_announce_port(mos_parport->pp);
+
+ return 0;
+}
+#endif /* CONFIG_USB_SERIAL_MOS7715_PARPORT */
/*
* mos7720_interrupt_callback
@@ -109,8 +753,6 @@ static void mos7720_interrupt_callback(struct urb *urb)
__u8 sp1;
__u8 sp2;
- dbg(" : Entering");
-
switch (status) {
case 0:
/* success */
@@ -161,7 +803,7 @@ static void mos7720_interrupt_callback(struct urb *urb)
dbg("Serial Port 1: Receiver time out");
break;
case SERIAL_IIR_MS:
- dbg("Serial Port 1: Modem status change");
+ /* dbg("Serial Port 1: Modem status change"); */
break;
}
@@ -174,7 +816,7 @@ static void mos7720_interrupt_callback(struct urb *urb)
dbg("Serial Port 2: Receiver time out");
break;
case SERIAL_IIR_MS:
- dbg("Serial Port 2: Modem status change");
+ /* dbg("Serial Port 2: Modem status change"); */
break;
}
}
@@ -208,6 +850,7 @@ static void mos7715_interrupt_callback(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
+ case -ENODEV:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __func__,
status);
@@ -243,11 +886,21 @@ static void mos7715_interrupt_callback(struct urb *urb)
dbg("Serial Port: Receiver time out");
break;
case SERIAL_IIR_MS:
- dbg("Serial Port: Modem status change");
+ /* dbg("Serial Port: Modem status change"); */
break;
}
}
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+ { /* update local copy of DSR reg */
+ struct usb_serial_port *port = urb->context;
+ struct mos7715_parport *mos_parport = port->serial->private;
+ if (unlikely(mos_parport == NULL))
+ return;
+ atomic_set(&mos_parport->shadowDSR, data[2]);
+ }
+#endif
+
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
@@ -267,7 +920,6 @@ static void mos7720_bulk_in_callback(struct urb *urb)
int retval;
unsigned char *data ;
struct usb_serial_port *port;
- struct moschip_port *mos7720_port;
struct tty_struct *tty;
int status = urb->status;
@@ -276,13 +928,7 @@ static void mos7720_bulk_in_callback(struct urb *urb)
return;
}
- mos7720_port = urb->context;
- if (!mos7720_port) {
- dbg("NULL mos7720_port pointer");
- return ;
- }
-
- port = mos7720_port->port;
+ port = urb->context;
dbg("Entering...%s", __func__);
@@ -332,8 +978,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
return ;
}
- dbg("Entering .........");
-
tty = tty_port_tty_get(&mos7720_port->port->port);
if (tty && mos7720_port->open)
@@ -342,56 +986,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
}
/*
- * send_mos_cmd
- * this function will be used for sending command to device
- */
-static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
- __u16 index, u8 *data)
-{
- int status;
- u8 *buf;
- u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
-
- if (value < MOS_MAX_PORT) {
- if (product == MOSCHIP_DEVICE_ID_7715)
- value = 0x0200; /* identifies the 7715's serial port */
- else
- value = value*0x100+0x200;
- } else {
- value = 0x0000;
- if ((product == MOSCHIP_DEVICE_ID_7715) &&
- (index != 0x08)) {
- dbg("serial->product== MOSCHIP_DEVICE_ID_7715");
- /* index = 0x01 ; */
- }
- }
-
- if (request == MOS_WRITE) {
- value = value + *data;
- status = usb_control_msg(serial->dev,
- usb_sndctrlpipe(serial->dev, 0), MOS_WRITE,
- 0x40, value, index, NULL, 0, MOS_WDR_TIMEOUT);
- } else {
- buf = kmalloc(1, GFP_KERNEL);
- if (!buf) {
- status = -ENOMEM;
- goto out;
- }
- status = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0), MOS_READ,
- 0xc0, value, index, buf, 1, MOS_WDR_TIMEOUT);
- *data = *buf;
- kfree(buf);
- }
-out:
- if (status < 0)
- dbg("Command Write failed Value %x index %x", value, index);
-
- return status;
-}
-
-
-/*
* mos77xx_probe
* this function installs the appropriate read interrupt endpoint callback
* depending on whether the device is a 7720 or 7715, thus avoiding costly
@@ -424,11 +1018,10 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
struct usb_serial *serial;
struct usb_serial_port *port0;
struct urb *urb;
- struct moschip_serial *mos7720_serial;
struct moschip_port *mos7720_port;
int response;
int port_number;
- char data;
+ __u8 data;
int allocated_urbs = 0;
int j;
@@ -440,11 +1033,6 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
port0 = serial->port[0];
- mos7720_serial = usb_get_serial_data(serial);
-
- if (mos7720_serial == NULL || port0 == NULL)
- return -ENODEV;
-
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
@@ -489,103 +1077,36 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
* 0x08 : SP1/2 Control Reg
*/
port_number = port->number - port->serial->minor;
- send_mos_cmd(port->serial, MOS_READ, port_number, UART_LSR, &data);
+ read_mos_reg(serial, port_number, LSR, &data);
+
dbg("SS::%p LSR:%x", mos7720_port, data);
dbg("Check:Sending Command ..........");
- data = 0x02;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x01, &data);
- data = 0x02;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x02, &data);
+ write_mos_reg(serial, dummy, SP1_REG, 0x02);
+ write_mos_reg(serial, dummy, SP2_REG, 0x02);
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x02, &data);
+ write_mos_reg(serial, port_number, IER, 0x00);
+ write_mos_reg(serial, port_number, FCR, 0x00);
- data = 0xCF;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x02, &data);
- data = 0x03;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x03, &data);
- data = 0x0b;
- mos7720_port->shadowMCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
- data = 0x0b;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
-
- data = 0x00;
- send_mos_cmd(serial, MOS_READ, MOS_MAX_PORT, 0x08, &data);
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x08, &data);
-
-/* data = 0x00;
- send_mos_cmd(serial, MOS_READ, MOS_MAX_PORT, port_number + 1, &data);
- data = 0x03;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, port_number + 1, &data);
- data = 0x00;
- send_mos_cmd(port->serial, MOS_WRITE, MOS_MAX_PORT,
- port_number + 1, &data);
-*/
- data = 0x00;
- send_mos_cmd(serial, MOS_READ, MOS_MAX_PORT, 0x08, &data);
+ write_mos_reg(serial, port_number, FCR, 0xcf);
+ mos7720_port->shadowLCR = 0x03;
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ mos7720_port->shadowMCR = 0x0b;
+ write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00);
+ read_mos_reg(serial, dummy, SP_CONTROL_REG, &data);
data = data | (port->number - port->serial->minor + 1);
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x08, &data);
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, data);
+ mos7720_port->shadowLCR = 0x83;
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, THR, 0x0c);
+ write_mos_reg(serial, port_number, IER, 0x00);
+ mos7720_port->shadowLCR = 0x03;
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, IER, 0x0c);
- data = 0x83;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x03, &data);
- data = 0x0c;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x00, &data);
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
- data = 0x03;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x03, &data);
- data = 0x0c;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
- data = 0x0c;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
-
- /* see if we've set up our endpoint info yet *
- * (can't set it up in mos7720_startup as the *
- * structures were not set up at that time.) */
- if (!mos7720_serial->interrupt_started) {
- dbg("Interrupt buffer NULL !!!");
-
- /* not set up yet, so do it now */
- mos7720_serial->interrupt_started = 1;
-
- dbg("To Submit URB !!!");
-
- /* set up our interrupt urb */
- usb_fill_int_urb(port0->interrupt_in_urb, serial->dev,
- usb_rcvintpipe(serial->dev,
- port->interrupt_in_endpointAddress),
- port0->interrupt_in_buffer,
- port0->interrupt_in_urb->transfer_buffer_length,
- mos7720_interrupt_callback, mos7720_port,
- port0->interrupt_in_urb->interval);
-
- /* start interrupt read for this mos7720 this interrupt *
- * will continue as long as the mos7720 is connected */
- dbg("Submit URB over !!!");
- response = usb_submit_urb(port0->interrupt_in_urb, GFP_KERNEL);
- if (response)
- dev_err(&port->dev,
- "%s - Error %d submitting control urb\n",
- __func__, response);
- }
-
- /* set up our bulk in urb */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->bulk_in_buffer,
- port->read_urb->transfer_buffer_length,
- mos7720_bulk_in_callback, mos7720_port);
response = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (response)
dev_err(&port->dev, "%s - Error %d submitting read urb\n",
@@ -640,7 +1161,6 @@ static void mos7720_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct moschip_port *mos7720_port;
- char data;
int j;
dbg("mos7720_close:entering...");
@@ -673,13 +1193,10 @@ static void mos7720_close(struct usb_serial_port *port)
/* these commands must not be issued if the device has
* been disconnected */
if (!serial->disconnected) {
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE,
- port->number - port->serial->minor, 0x04, &data);
-
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE,
- port->number - port->serial->minor, 0x01, &data);
+ write_mos_reg(serial, port->number - port->serial->minor,
+ MCR, 0x00);
+ write_mos_reg(serial, port->number - port->serial->minor,
+ IER, 0x00);
}
mutex_unlock(&serial->disc_mutex);
mos7720_port->open = 0;
@@ -708,8 +1225,8 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
data = mos7720_port->shadowLCR & ~UART_LCR_SBC;
mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port->number - port->serial->minor,
- 0x03, &data);
+ write_mos_reg(serial, port->number - port->serial->minor,
+ LCR, mos7720_port->shadowLCR);
return;
}
@@ -854,9 +1371,8 @@ static void mos7720_throttle(struct tty_struct *tty)
/* if we are implementing RTS/CTS, toggle that line */
if (tty->termios->c_cflag & CRTSCTS) {
mos7720_port->shadowMCR &= ~UART_MCR_RTS;
- status = send_mos_cmd(port->serial, MOS_WRITE,
- port->number - port->serial->minor,
- UART_MCR, &mos7720_port->shadowMCR);
+ write_mos_reg(port->serial, port->number - port->serial->minor,
+ MCR, mos7720_port->shadowMCR);
if (status != 0)
return;
}
@@ -889,22 +1405,21 @@ static void mos7720_unthrottle(struct tty_struct *tty)
/* if we are implementing RTS/CTS, toggle that line */
if (tty->termios->c_cflag & CRTSCTS) {
mos7720_port->shadowMCR |= UART_MCR_RTS;
- status = send_mos_cmd(port->serial, MOS_WRITE,
- port->number - port->serial->minor,
- UART_MCR, &mos7720_port->shadowMCR);
+ write_mos_reg(port->serial, port->number - port->serial->minor,
+ MCR, mos7720_port->shadowMCR);
if (status != 0)
return;
}
}
+/* FIXME: this function does not work */
static int set_higher_rates(struct moschip_port *mos7720_port,
unsigned int baud)
{
- unsigned char data;
struct usb_serial_port *port;
struct usb_serial *serial;
int port_number;
-
+ enum mos_regs sp_reg;
if (mos7720_port == NULL)
return -EINVAL;
@@ -917,58 +1432,35 @@ static int set_higher_rates(struct moschip_port *mos7720_port,
dbg("Sending Setting Commands ..........");
port_number = port->number - port->serial->minor;
- data = 0x000;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
- data = 0x000;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x02, &data);
- data = 0x0CF;
- send_mos_cmd(serial, MOS_WRITE, port->number, 0x02, &data);
- data = 0x00b;
- mos7720_port->shadowMCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
- data = 0x00b;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
-
- data = 0x000;
- send_mos_cmd(serial, MOS_READ, MOS_MAX_PORT, 0x08, &data);
- data = 0x000;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x08, &data);
-
+ write_mos_reg(serial, port_number, IER, 0x00);
+ write_mos_reg(serial, port_number, FCR, 0x00);
+ write_mos_reg(serial, port_number, FCR, 0xcf);
+ mos7720_port->shadowMCR = 0x0b;
+ write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x00);
/***********************************************
* Set for higher rates *
***********************************************/
-
- data = baud * 0x10;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, port_number + 1, &data);
-
- data = 0x003;
- send_mos_cmd(serial, MOS_READ, MOS_MAX_PORT, 0x08, &data);
- data = 0x003;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x08, &data);
-
- data = 0x02b;
- mos7720_port->shadowMCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
- data = 0x02b;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
+ /* writing baud rate verbatum into uart clock field clearly not right */
+ if (port_number == 0)
+ sp_reg = SP1_REG;
+ else
+ sp_reg = SP2_REG;
+ write_mos_reg(serial, dummy, sp_reg, baud * 0x10);
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x03);
+ mos7720_port->shadowMCR = 0x2b;
+ write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
/***********************************************
* Set DLL/DLM
***********************************************/
-
- data = mos7720_port->shadowLCR | UART_LCR_DLAB;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x03, &data);
-
- data = 0x001; /* DLL */
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x00, &data);
- data = 0x000; /* DLM */
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
-
- data = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x03, &data);
+ mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, DLL, 0x01);
+ write_mos_reg(serial, port_number, DLM, 0x00);
+ mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
return 0;
}
@@ -1056,7 +1548,6 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
struct usb_serial *serial;
int divisor;
int status;
- unsigned char data;
unsigned char number;
if (mos7720_port == NULL)
@@ -1078,21 +1569,16 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
}
/* Enable access to divisor latch */
- data = mos7720_port->shadowLCR | UART_LCR_DLAB;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, number, UART_LCR, &data);
+ mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
+ write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR);
/* Write the divisor */
- data = ((unsigned char)(divisor & 0xff));
- send_mos_cmd(serial, MOS_WRITE, number, 0x00, &data);
-
- data = ((unsigned char)((divisor & 0xff00) >> 8));
- send_mos_cmd(serial, MOS_WRITE, number, 0x01, &data);
+ write_mos_reg(serial, number, DLL, (__u8)(divisor & 0xff));
+ write_mos_reg(serial, number, DLM, (__u8)((divisor & 0xff00) >> 8));
/* Disable access to divisor latch */
- data = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, number, 0x03, &data);
+ mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
+ write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR);
return status;
}
@@ -1117,7 +1603,6 @@ static void change_port_settings(struct tty_struct *tty,
__u8 lStop;
int status;
int port_number;
- char data;
if (mos7720_port == NULL)
return ;
@@ -1196,30 +1681,19 @@ static void change_port_settings(struct tty_struct *tty,
/* Update the LCR with the correct value */
mos7720_port->shadowLCR &=
- ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
+ ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
mos7720_port->shadowLCR |= (lData | lParity | lStop);
/* Disable Interrupts */
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, port->number - port->serial->minor,
- UART_IER, &data);
-
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_FCR, &data);
-
- data = 0xcf;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_FCR, &data);
+ write_mos_reg(serial, port_number, IER, 0x00);
+ write_mos_reg(serial, port_number, FCR, 0x00);
+ write_mos_reg(serial, port_number, FCR, 0xcf);
/* Send the updated LCR value to the mos7720 */
- data = mos7720_port->shadowLCR;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_LCR, &data);
-
- data = 0x00b;
- mos7720_port->shadowMCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
- data = 0x00b;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ mos7720_port->shadowMCR = 0x0b;
+ write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
/* set up the MCR register and send it to the mos7720 */
mos7720_port->shadowMCR = UART_MCR_OUT2;
@@ -1230,21 +1704,15 @@ static void change_port_settings(struct tty_struct *tty,
mos7720_port->shadowMCR |= (UART_MCR_XONANY);
/* To set hardware flow control to the specified *
* serial port, in SP1/2_CONTROL_REG */
- if (port->number) {
- data = 0x001;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT,
- 0x08, &data);
- } else {
- data = 0x002;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT,
- 0x08, &data);
- }
- } else {
+ if (port->number)
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
+ else
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
+
+ } else
mos7720_port->shadowMCR &= ~(UART_MCR_XONANY);
- }
- data = mos7720_port->shadowMCR;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_MCR, &data);
+ write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
@@ -1257,8 +1725,7 @@ static void change_port_settings(struct tty_struct *tty,
if (baud >= 230400) {
set_higher_rates(mos7720_port, baud);
/* Enable Interrupts */
- data = 0x0c;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_IER, &data);
+ write_mos_reg(serial, port_number, IER, 0x0c);
return;
}
@@ -1269,8 +1736,7 @@ static void change_port_settings(struct tty_struct *tty,
if (cflag & CBAUD)
tty_encode_baud_rate(tty, baud, baud);
/* Enable Interrupts */
- data = 0x0c;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_IER, &data);
+ write_mos_reg(serial, port_number, IER, 0x0c);
if (port->read_urb->status != -EINPROGRESS) {
port->read_urb->dev = serial->dev;
@@ -1308,7 +1774,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
return;
}
- dbg("setting termios - ASPIRE");
+ dbg("%s\n", "setting termios - ASPIRE");
cflag = tty->termios->c_cflag;
@@ -1326,7 +1792,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
change_port_settings(tty, mos7720_port, old_termios);
if (!port->read_urb) {
- dbg("URB KILLED !!!!!");
+ dbg("%s", "URB KILLED !!!!!");
return;
}
@@ -1361,8 +1827,7 @@ static int get_lsr_info(struct tty_struct *tty,
count = mos7720_chars_in_buffer(tty);
if (count == 0) {
- send_mos_cmd(port->serial, MOS_READ, port_number,
- UART_LSR, &data);
+ read_mos_reg(port->serial, port_number, LSR, &data);
if ((data & (UART_LSR_TEMT | UART_LSR_THRE))
== (UART_LSR_TEMT | UART_LSR_THRE)) {
dbg("%s -- Empty", __func__);
@@ -1400,13 +1865,11 @@ static int mos7720_tiocmget(struct tty_struct *tty, struct file *file)
}
static int mos7720_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
+ unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7720_port = usb_get_serial_port_data(port);
unsigned int mcr ;
- unsigned char lmcr;
-
dbg("%s - port %d", __func__, port->number);
dbg("he was at tiocmget");
@@ -1427,10 +1890,8 @@ static int mos7720_tiocmset(struct tty_struct *tty, struct file *file,
mcr &= ~UART_MCR_LOOP;
mos7720_port->shadowMCR = mcr;
- lmcr = mos7720_port->shadowMCR;
-
- send_mos_cmd(port->serial, MOS_WRITE,
- port->number - port->serial->minor, UART_MCR, &lmcr);
+ write_mos_reg(port->serial, port->number - port->serial->minor,
+ MCR, mos7720_port->shadowMCR);
return 0;
}
@@ -1440,7 +1901,6 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
{
unsigned int mcr ;
unsigned int arg;
- unsigned char data;
struct usb_serial_port *port;
@@ -1475,10 +1935,8 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
}
mos7720_port->shadowMCR = mcr;
-
- data = mos7720_port->shadowMCR;
- send_mos_cmd(port->serial, MOS_WRITE,
- port->number - port->serial->minor, UART_MCR, &data);
+ write_mos_reg(port->serial, port->number - port->serial->minor,
+ MCR, mos7720_port->shadowMCR);
return 0;
}
@@ -1590,12 +2048,12 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
static int mos7720_startup(struct usb_serial *serial)
{
- struct moschip_serial *mos7720_serial;
struct moschip_port *mos7720_port;
struct usb_device *dev;
int i;
char data;
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ int ret_val;
dbg("%s: Entering ..........", __func__);
@@ -1606,15 +2064,6 @@ static int mos7720_startup(struct usb_serial *serial)
dev = serial->dev;
- /* create our private serial structure */
- mos7720_serial = kzalloc(sizeof(struct moschip_serial), GFP_KERNEL);
- if (mos7720_serial == NULL) {
- dev_err(&dev->dev, "%s - Out of memory\n", __func__);
- return -ENOMEM;
- }
-
- usb_set_serial_data(serial, mos7720_serial);
-
/*
* The 7715 uses the first bulk in/out endpoint pair for the parallel
* port, and the second for the serial port. Because the usbserial core
@@ -1638,16 +2087,12 @@ static int mos7720_startup(struct usb_serial *serial)
serial->port[1]->interrupt_in_buffer = NULL;
}
- /* we set up the pointers to the endpoints in the mos7720_open *
- * function, as the structures aren't created yet. */
- /* set up port private structures */
+ /* set up serial port private structures */
for (i = 0; i < serial->num_ports; ++i) {
mos7720_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
if (mos7720_port == NULL) {
dev_err(&dev->dev, "%s - Out of memory\n", __func__);
- usb_set_serial_data(serial, NULL);
- kfree(mos7720_serial);
return -ENOMEM;
}
@@ -1669,12 +2114,22 @@ static int mos7720_startup(struct usb_serial *serial)
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
(__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ);
- /* LSR For Port 1 */
- send_mos_cmd(serial, MOS_READ, 0x00, UART_LSR, &data);
- dbg("LSR:%x", data);
+ /* start the interrupt urb */
+ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
+ if (ret_val)
+ dev_err(&dev->dev,
+ "%s - Error %d submitting control urb\n",
+ __func__, ret_val);
- /* LSR For Port 2 */
- send_mos_cmd(serial, MOS_READ, 0x01, UART_LSR, &data);
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+ if (product == MOSCHIP_DEVICE_ID_7715) {
+ ret_val = mos7715_parport_init(serial);
+ if (ret_val < 0)
+ return ret_val;
+ }
+#endif
+ /* LSR For Port 1 */
+ read_mos_reg(serial, 0, LSR, &data);
dbg("LSR:%x", data);
return 0;
@@ -1684,12 +2139,47 @@ static void mos7720_release(struct usb_serial *serial)
{
int i;
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+ /* close the parallel port */
+
+ if (le16_to_cpu(serial->dev->descriptor.idProduct)
+ == MOSCHIP_DEVICE_ID_7715) {
+ struct urbtracker *urbtrack;
+ unsigned long flags;
+ struct mos7715_parport *mos_parport =
+ usb_get_serial_data(serial);
+
+ /* prevent NULL ptr dereference in port callbacks */
+ spin_lock(&release_lock);
+ mos_parport->pp->private_data = NULL;
+ spin_unlock(&release_lock);
+
+ /* wait for synchronous usb calls to return */
+ if (mos_parport->msg_pending)
+ wait_for_completion_timeout(&mos_parport->syncmsg_compl,
+ MOS_WDR_TIMEOUT);
+
+ parport_remove_port(mos_parport->pp);
+ usb_set_serial_data(serial, NULL);
+ mos_parport->serial = NULL;
+
+ /* if tasklet currently scheduled, wait for it to complete */
+ tasklet_kill(&mos_parport->urb_tasklet);
+
+ /* unlink any urbs sent by the tasklet */
+ spin_lock_irqsave(&mos_parport->listlock, flags);
+ list_for_each_entry(urbtrack,
+ &mos_parport->active_urbs,
+ urblist_entry)
+ usb_unlink_urb(urbtrack->urb);
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+
+ kref_put(&mos_parport->ref_count, destroy_mos_parport);
+ }
+#endif
/* free private structure allocated for serial port */
for (i = 0; i < serial->num_ports; ++i)
kfree(usb_get_serial_port_data(serial->port[i]));
-
- /* free private structure allocated for serial device */
- kfree(usb_get_serial_data(serial));
}
static struct usb_driver usb_driver = {
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 2fda1c0..f8424d1 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -26,7 +26,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 84d0eda..e280ad8 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -42,35 +42,14 @@
#include <linux/bitops.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include "usb-wwan.h"
/* Function prototypes */
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id);
-static int option_open(struct tty_struct *tty, struct usb_serial_port *port);
-static void option_close(struct usb_serial_port *port);
-static void option_dtr_rts(struct usb_serial_port *port, int on);
-
-static int option_startup(struct usb_serial *serial);
-static void option_disconnect(struct usb_serial *serial);
-static void option_release(struct usb_serial *serial);
-static int option_write_room(struct tty_struct *tty);
-
+static int option_send_setup(struct usb_serial_port *port);
static void option_instat_callback(struct urb *urb);
-static int option_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static int option_chars_in_buffer(struct tty_struct *tty);
-static void option_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
-static int option_tiocmget(struct tty_struct *tty, struct file *file);
-static int option_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear);
-static int option_send_setup(struct usb_serial_port *port);
-#ifdef CONFIG_PM
-static int option_suspend(struct usb_serial *serial, pm_message_t message);
-static int option_resume(struct usb_serial *serial);
-#endif
-
/* Vendor and product IDs */
#define OPTION_VENDOR_ID 0x0AF0
#define OPTION_PRODUCT_COLT 0x5000
@@ -380,6 +359,10 @@ static int option_resume(struct usb_serial *serial);
#define CINTERION_VENDOR_ID 0x0681
+/* Olivetti products */
+#define OLIVETTI_VENDOR_ID 0x0b3c
+#define OLIVETTI_PRODUCT_OLICARD100 0xc000
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -675,6 +658,180 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
@@ -726,6 +883,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
{ USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -757,22 +916,22 @@ static struct usb_serial_driver option_1port_device = {
.id_table = option_ids,
.num_ports = 1,
.probe = option_probe,
- .open = option_open,
- .close = option_close,
- .dtr_rts = option_dtr_rts,
- .write = option_write,
- .write_room = option_write_room,
- .chars_in_buffer = option_chars_in_buffer,
- .set_termios = option_set_termios,
- .tiocmget = option_tiocmget,
- .tiocmset = option_tiocmset,
- .attach = option_startup,
- .disconnect = option_disconnect,
- .release = option_release,
+ .open = usb_wwan_open,
+ .close = usb_wwan_close,
+ .dtr_rts = usb_wwan_dtr_rts,
+ .write = usb_wwan_write,
+ .write_room = usb_wwan_write_room,
+ .chars_in_buffer = usb_wwan_chars_in_buffer,
+ .set_termios = usb_wwan_set_termios,
+ .tiocmget = usb_wwan_tiocmget,
+ .tiocmset = usb_wwan_tiocmset,
+ .attach = usb_wwan_startup,
+ .disconnect = usb_wwan_disconnect,
+ .release = usb_wwan_release,
.read_int_callback = option_instat_callback,
#ifdef CONFIG_PM
- .suspend = option_suspend,
- .resume = option_resume,
+ .suspend = usb_wwan_suspend,
+ .resume = usb_wwan_resume,
#endif
};
@@ -785,13 +944,6 @@ static int debug;
#define IN_BUFLEN 4096
#define OUT_BUFLEN 4096
-struct option_intf_private {
- spinlock_t susp_lock;
- unsigned int suspended:1;
- int in_flight;
- struct option_blacklist_info *blacklist_info;
-};
-
struct option_port_private {
/* Input endpoints and buffer for this port */
struct urb *in_urbs[N_IN_URB];
@@ -848,8 +1000,7 @@ module_exit(option_exit);
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
- struct option_intf_private *data;
-
+ struct usb_wwan_intf_private *data;
/* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
@@ -862,11 +1013,13 @@ static int option_probe(struct usb_serial *serial,
serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
return -ENODEV;
- data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL);
+ data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
+
if (!data)
return -ENOMEM;
+ data->send_setup = option_send_setup;
spin_lock_init(&data->susp_lock);
- data->blacklist_info = (struct option_blacklist_info*) id->driver_info;
+ data->private = (void *)id->driver_info;
return 0;
}
@@ -887,194 +1040,6 @@ static enum option_blacklist_reason is_blacklisted(const u8 ifnum,
return OPTION_BLACKLIST_NONE;
}
-static void option_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
-{
- dbg("%s", __func__);
- /* Doesn't support option setting */
- tty_termios_copy_hw(tty->termios, old_termios);
- option_send_setup(port);
-}
-
-static int option_tiocmget(struct tty_struct *tty, struct file *file)
-{
- struct usb_serial_port *port = tty->driver_data;
- unsigned int value;
- struct option_port_private *portdata;
-
- portdata = usb_get_serial_port_data(port);
-
- value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
- ((portdata->dtr_state) ? TIOCM_DTR : 0) |
- ((portdata->cts_state) ? TIOCM_CTS : 0) |
- ((portdata->dsr_state) ? TIOCM_DSR : 0) |
- ((portdata->dcd_state) ? TIOCM_CAR : 0) |
- ((portdata->ri_state) ? TIOCM_RNG : 0);
-
- return value;
-}
-
-static int option_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct option_port_private *portdata;
-
- portdata = usb_get_serial_port_data(port);
-
- /* FIXME: what locks portdata fields ? */
- if (set & TIOCM_RTS)
- portdata->rts_state = 1;
- if (set & TIOCM_DTR)
- portdata->dtr_state = 1;
-
- if (clear & TIOCM_RTS)
- portdata->rts_state = 0;
- if (clear & TIOCM_DTR)
- portdata->dtr_state = 0;
- return option_send_setup(port);
-}
-
-/* Write */
-static int option_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct option_port_private *portdata;
- struct option_intf_private *intfdata;
- int i;
- int left, todo;
- struct urb *this_urb = NULL; /* spurious */
- int err;
- unsigned long flags;
-
- portdata = usb_get_serial_port_data(port);
- intfdata = port->serial->private;
-
- dbg("%s: write (%d chars)", __func__, count);
-
- i = 0;
- left = count;
- for (i = 0; left > 0 && i < N_OUT_URB; i++) {
- todo = left;
- if (todo > OUT_BUFLEN)
- todo = OUT_BUFLEN;
-
- this_urb = portdata->out_urbs[i];
- if (test_and_set_bit(i, &portdata->out_busy)) {
- if (time_before(jiffies,
- portdata->tx_start_time[i] + 10 * HZ))
- continue;
- usb_unlink_urb(this_urb);
- continue;
- }
- dbg("%s: endpoint %d buf %d", __func__,
- usb_pipeendpoint(this_urb->pipe), i);
-
- err = usb_autopm_get_interface_async(port->serial->interface);
- if (err < 0)
- break;
-
- /* send the data */
- memcpy(this_urb->transfer_buffer, buf, todo);
- this_urb->transfer_buffer_length = todo;
-
- spin_lock_irqsave(&intfdata->susp_lock, flags);
- if (intfdata->suspended) {
- usb_anchor_urb(this_urb, &portdata->delayed);
- spin_unlock_irqrestore(&intfdata->susp_lock, flags);
- } else {
- intfdata->in_flight++;
- spin_unlock_irqrestore(&intfdata->susp_lock, flags);
- err = usb_submit_urb(this_urb, GFP_ATOMIC);
- if (err) {
- dbg("usb_submit_urb %p (write bulk) failed "
- "(%d)", this_urb, err);
- clear_bit(i, &portdata->out_busy);
- spin_lock_irqsave(&intfdata->susp_lock, flags);
- intfdata->in_flight--;
- spin_unlock_irqrestore(&intfdata->susp_lock, flags);
- continue;
- }
- }
-
- portdata->tx_start_time[i] = jiffies;
- buf += todo;
- left -= todo;
- }
-
- count -= left;
- dbg("%s: wrote (did %d)", __func__, count);
- return count;
-}
-
-static void option_indat_callback(struct urb *urb)
-{
- int err;
- int endpoint;
- struct usb_serial_port *port;
- struct tty_struct *tty;
- unsigned char *data = urb->transfer_buffer;
- int status = urb->status;
-
- dbg("%s: %p", __func__, urb);
-
- endpoint = usb_pipeendpoint(urb->pipe);
- port = urb->context;
-
- if (status) {
- dbg("%s: nonzero status: %d on endpoint %02x.",
- __func__, status, endpoint);
- } else {
- tty = tty_port_tty_get(&port->port);
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- } else
- dbg("%s: empty read urb received", __func__);
- tty_kref_put(tty);
-
- /* Resubmit urb so we continue receiving */
- if (status != -ESHUTDOWN) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err && err != -EPERM)
- printk(KERN_ERR "%s: resubmit read urb failed. "
- "(%d)", __func__, err);
- else
- usb_mark_last_busy(port->serial->dev);
- }
-
- }
- return;
-}
-
-static void option_outdat_callback(struct urb *urb)
-{
- struct usb_serial_port *port;
- struct option_port_private *portdata;
- struct option_intf_private *intfdata;
- int i;
-
- dbg("%s", __func__);
-
- port = urb->context;
- intfdata = port->serial->private;
-
- usb_serial_port_softint(port);
- usb_autopm_put_interface_async(port->serial->interface);
- portdata = usb_get_serial_port_data(port);
- spin_lock(&intfdata->susp_lock);
- intfdata->in_flight--;
- spin_unlock(&intfdata->susp_lock);
-
- for (i = 0; i < N_OUT_URB; ++i) {
- if (portdata->out_urbs[i] == urb) {
- smp_mb__before_clear_bit();
- clear_bit(i, &portdata->out_busy);
- break;
- }
- }
-}
-
static void option_instat_callback(struct urb *urb)
{
int err;
@@ -1131,183 +1096,6 @@ static void option_instat_callback(struct urb *urb)
}
}
-static int option_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct option_port_private *portdata;
- int i;
- int data_len = 0;
- struct urb *this_urb;
-
- portdata = usb_get_serial_port_data(port);
-
- for (i = 0; i < N_OUT_URB; i++) {
- this_urb = portdata->out_urbs[i];
- if (this_urb && !test_bit(i, &portdata->out_busy))
- data_len += OUT_BUFLEN;
- }
-
- dbg("%s: %d", __func__, data_len);
- return data_len;
-}
-
-static int option_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct option_port_private *portdata;
- int i;
- int data_len = 0;
- struct urb *this_urb;
-
- portdata = usb_get_serial_port_data(port);
-
- for (i = 0; i < N_OUT_URB; i++) {
- this_urb = portdata->out_urbs[i];
- /* FIXME: This locking is insufficient as this_urb may
- go unused during the test */
- if (this_urb && test_bit(i, &portdata->out_busy))
- data_len += this_urb->transfer_buffer_length;
- }
- dbg("%s: %d", __func__, data_len);
- return data_len;
-}
-
-static int option_open(struct tty_struct *tty, struct usb_serial_port *port)
-{
- struct option_port_private *portdata;
- struct option_intf_private *intfdata;
- struct usb_serial *serial = port->serial;
- int i, err;
- struct urb *urb;
-
- portdata = usb_get_serial_port_data(port);
- intfdata = serial->private;
-
- dbg("%s", __func__);
-
- /* Start reading from the IN endpoint */
- for (i = 0; i < N_IN_URB; i++) {
- urb = portdata->in_urbs[i];
- if (!urb)
- continue;
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err) {
- dbg("%s: submit urb %d failed (%d) %d",
- __func__, i, err,
- urb->transfer_buffer_length);
- }
- }
-
- option_send_setup(port);
-
- serial->interface->needs_remote_wakeup = 1;
- spin_lock_irq(&intfdata->susp_lock);
- portdata->opened = 1;
- spin_unlock_irq(&intfdata->susp_lock);
- usb_autopm_put_interface(serial->interface);
-
- return 0;
-}
-
-static void option_dtr_rts(struct usb_serial_port *port, int on)
-{
- struct usb_serial *serial = port->serial;
- struct option_port_private *portdata;
-
- dbg("%s", __func__);
- portdata = usb_get_serial_port_data(port);
- mutex_lock(&serial->disc_mutex);
- portdata->rts_state = on;
- portdata->dtr_state = on;
- if (serial->dev)
- option_send_setup(port);
- mutex_unlock(&serial->disc_mutex);
-}
-
-
-static void option_close(struct usb_serial_port *port)
-{
- int i;
- struct usb_serial *serial = port->serial;
- struct option_port_private *portdata;
- struct option_intf_private *intfdata = port->serial->private;
-
- dbg("%s", __func__);
- portdata = usb_get_serial_port_data(port);
-
- if (serial->dev) {
- /* Stop reading/writing urbs */
- spin_lock_irq(&intfdata->susp_lock);
- portdata->opened = 0;
- spin_unlock_irq(&intfdata->susp_lock);
-
- for (i = 0; i < N_IN_URB; i++)
- usb_kill_urb(portdata->in_urbs[i]);
- for (i = 0; i < N_OUT_URB; i++)
- usb_kill_urb(portdata->out_urbs[i]);
- usb_autopm_get_interface(serial->interface);
- serial->interface->needs_remote_wakeup = 0;
- }
-}
-
-/* Helper functions used by option_setup_urbs */
-static struct urb *option_setup_urb(struct usb_serial *serial, int endpoint,
- int dir, void *ctx, char *buf, int len,
- void (*callback)(struct urb *))
-{
- struct urb *urb;
-
- if (endpoint == -1)
- return NULL; /* endpoint not needed */
-
- urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
- if (urb == NULL) {
- dbg("%s: alloc for endpoint %d failed.", __func__, endpoint);
- return NULL;
- }
-
- /* Fill URB using supplied data. */
- usb_fill_bulk_urb(urb, serial->dev,
- usb_sndbulkpipe(serial->dev, endpoint) | dir,
- buf, len, callback, ctx);
-
- return urb;
-}
-
-/* Setup urbs */
-static void option_setup_urbs(struct usb_serial *serial)
-{
- int i, j;
- struct usb_serial_port *port;
- struct option_port_private *portdata;
-
- dbg("%s", __func__);
-
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
-
- /* Do indat endpoints first */
- for (j = 0; j < N_IN_URB; ++j) {
- portdata->in_urbs[j] = option_setup_urb(serial,
- port->bulk_in_endpointAddress,
- USB_DIR_IN, port,
- portdata->in_buffer[j],
- IN_BUFLEN, option_indat_callback);
- }
-
- /* outdat endpoints */
- for (j = 0; j < N_OUT_URB; ++j) {
- portdata->out_urbs[j] = option_setup_urb(serial,
- port->bulk_out_endpointAddress,
- USB_DIR_OUT, port,
- portdata->out_buffer[j],
- OUT_BUFLEN, option_outdat_callback);
- }
- }
-}
-
-
/** send RTS/DTR state to the port.
*
* This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN
@@ -1316,15 +1104,16 @@ static void option_setup_urbs(struct usb_serial *serial)
static int option_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- struct option_intf_private *intfdata =
- (struct option_intf_private *) serial->private;
+ struct usb_wwan_intf_private *intfdata =
+ (struct usb_wwan_intf_private *) serial->private;
struct option_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
int val = 0;
dbg("%s", __func__);
- if (is_blacklisted(ifNum, intfdata->blacklist_info) ==
- OPTION_BLACKLIST_SENDSETUP) {
+ if (is_blacklisted(ifNum,
+ (struct option_blacklist_info *) intfdata->private)
+ == OPTION_BLACKLIST_SENDSETUP) {
dbg("No send_setup on blacklisted interface #%d\n", ifNum);
return -EIO;
}
@@ -1341,224 +1130,6 @@ static int option_send_setup(struct usb_serial_port *port)
0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT);
}
-static int option_startup(struct usb_serial *serial)
-{
- int i, j, err;
- struct usb_serial_port *port;
- struct option_port_private *portdata;
- u8 *buffer;
-
- dbg("%s", __func__);
-
- /* Now setup per port private data */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
- if (!portdata) {
- dbg("%s: kmalloc for option_port_private (%d) failed!.",
- __func__, i);
- return 1;
- }
- init_usb_anchor(&portdata->delayed);
-
- for (j = 0; j < N_IN_URB; j++) {
- buffer = (u8 *)__get_free_page(GFP_KERNEL);
- if (!buffer)
- goto bail_out_error;
- portdata->in_buffer[j] = buffer;
- }
-
- for (j = 0; j < N_OUT_URB; j++) {
- buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
- if (!buffer)
- goto bail_out_error2;
- portdata->out_buffer[j] = buffer;
- }
-
- usb_set_serial_port_data(port, portdata);
-
- if (!port->interrupt_in_urb)
- continue;
- err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
- if (err)
- dbg("%s: submit irq_in urb failed %d",
- __func__, err);
- }
- option_setup_urbs(serial);
- return 0;
-
-bail_out_error2:
- for (j = 0; j < N_OUT_URB; j++)
- kfree(portdata->out_buffer[j]);
-bail_out_error:
- for (j = 0; j < N_IN_URB; j++)
- if (portdata->in_buffer[j])
- free_page((unsigned long)portdata->in_buffer[j]);
- kfree(portdata);
- return 1;
-}
-
-static void stop_read_write_urbs(struct usb_serial *serial)
-{
- int i, j;
- struct usb_serial_port *port;
- struct option_port_private *portdata;
-
- /* Stop reading/writing urbs */
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
- for (j = 0; j < N_IN_URB; j++)
- usb_kill_urb(portdata->in_urbs[j]);
- for (j = 0; j < N_OUT_URB; j++)
- usb_kill_urb(portdata->out_urbs[j]);
- }
-}
-
-static void option_disconnect(struct usb_serial *serial)
-{
- dbg("%s", __func__);
-
- stop_read_write_urbs(serial);
-}
-
-static void option_release(struct usb_serial *serial)
-{
- int i, j;
- struct usb_serial_port *port;
- struct option_port_private *portdata;
-
- dbg("%s", __func__);
-
- /* Now free them */
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
-
- for (j = 0; j < N_IN_URB; j++) {
- if (portdata->in_urbs[j]) {
- usb_free_urb(portdata->in_urbs[j]);
- free_page((unsigned long)
- portdata->in_buffer[j]);
- portdata->in_urbs[j] = NULL;
- }
- }
- for (j = 0; j < N_OUT_URB; j++) {
- if (portdata->out_urbs[j]) {
- usb_free_urb(portdata->out_urbs[j]);
- kfree(portdata->out_buffer[j]);
- portdata->out_urbs[j] = NULL;
- }
- }
- }
-
- /* Now free per port private data */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- kfree(usb_get_serial_port_data(port));
- }
-}
-
-#ifdef CONFIG_PM
-static int option_suspend(struct usb_serial *serial, pm_message_t message)
-{
- struct option_intf_private *intfdata = serial->private;
- int b;
-
- dbg("%s entered", __func__);
-
- if (message.event & PM_EVENT_AUTO) {
- spin_lock_irq(&intfdata->susp_lock);
- b = intfdata->in_flight;
- spin_unlock_irq(&intfdata->susp_lock);
-
- if (b)
- return -EBUSY;
- }
-
- spin_lock_irq(&intfdata->susp_lock);
- intfdata->suspended = 1;
- spin_unlock_irq(&intfdata->susp_lock);
- stop_read_write_urbs(serial);
-
- return 0;
-}
-
-static void play_delayed(struct usb_serial_port *port)
-{
- struct option_intf_private *data;
- struct option_port_private *portdata;
- struct urb *urb;
- int err;
-
- portdata = usb_get_serial_port_data(port);
- data = port->serial->private;
- while ((urb = usb_get_from_anchor(&portdata->delayed))) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (!err)
- data->in_flight++;
- }
-}
-
-static int option_resume(struct usb_serial *serial)
-{
- int i, j;
- struct usb_serial_port *port;
- struct option_intf_private *intfdata = serial->private;
- struct option_port_private *portdata;
- struct urb *urb;
- int err = 0;
-
- dbg("%s entered", __func__);
- /* get the interrupt URBs resubmitted unconditionally */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- if (!port->interrupt_in_urb) {
- dbg("%s: No interrupt URB for port %d", __func__, i);
- continue;
- }
- err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
- dbg("Submitted interrupt URB for port %d (result %d)", i, err);
- if (err < 0) {
- err("%s: Error %d for interrupt URB of port%d",
- __func__, err, i);
- goto err_out;
- }
- }
-
- for (i = 0; i < serial->num_ports; i++) {
- /* walk all ports */
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
-
- /* skip closed ports */
- spin_lock_irq(&intfdata->susp_lock);
- if (!portdata->opened) {
- spin_unlock_irq(&intfdata->susp_lock);
- continue;
- }
-
- for (j = 0; j < N_IN_URB; j++) {
- urb = portdata->in_urbs[j];
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- err("%s: Error %d for bulk URB %d",
- __func__, err, i);
- spin_unlock_irq(&intfdata->susp_lock);
- goto err_out;
- }
- }
- play_delayed(port);
- spin_unlock_irq(&intfdata->susp_lock);
- }
- spin_lock_irq(&intfdata->susp_lock);
- intfdata->suspended = 0;
- spin_unlock_irq(&intfdata->susp_lock);
-err_out:
- return err;
-}
-#endif
-
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index deeacde..e199b0f 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -51,12 +51,13 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
+#include <linux/kfifo.h>
#include "oti6858.h"
#define OTI6858_DESCRIPTION \
"Ours Technology Inc. OTi-6858 USB to serial adapter driver"
#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>"
-#define OTI6858_VERSION "0.1"
+#define OTI6858_VERSION "0.2"
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) },
@@ -75,18 +76,6 @@ static struct usb_driver oti6858_driver = {
static int debug;
-
-/* buffering code, copied from pl2303 driver */
-#define PL2303_BUF_SIZE 1024
-#define PL2303_TMP_BUF_SIZE 1024
-
-struct oti6858_buf {
- unsigned int buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
/* requests */
#define OTI6858_REQ_GET_STATUS (USB_DIR_IN | USB_TYPE_VENDOR | 0x00)
#define OTI6858_REQ_T_GET_STATUS 0x01
@@ -161,18 +150,6 @@ static int oti6858_tiocmset(struct tty_struct *tty, struct file *file,
static int oti6858_startup(struct usb_serial *serial);
static void oti6858_release(struct usb_serial *serial);
-/* functions operating on buffers */
-static struct oti6858_buf *oti6858_buf_alloc(unsigned int size);
-static void oti6858_buf_free(struct oti6858_buf *pb);
-static void oti6858_buf_clear(struct oti6858_buf *pb);
-static unsigned int oti6858_buf_data_avail(struct oti6858_buf *pb);
-static unsigned int oti6858_buf_space_avail(struct oti6858_buf *pb);
-static unsigned int oti6858_buf_put(struct oti6858_buf *pb, const char *buf,
- unsigned int count);
-static unsigned int oti6858_buf_get(struct oti6858_buf *pb, char *buf,
- unsigned int count);
-
-
/* device info */
static struct usb_serial_driver oti6858_device = {
.driver = {
@@ -201,7 +178,6 @@ static struct usb_serial_driver oti6858_device = {
struct oti6858_private {
spinlock_t lock;
- struct oti6858_buf *buf;
struct oti6858_control_pkt status;
struct {
@@ -295,7 +271,7 @@ static void setup_line(struct work_struct *work)
}
}
-void send_data(struct work_struct *work)
+static void send_data(struct work_struct *work)
{
struct oti6858_private *priv = container_of(work,
struct oti6858_private, delayed_write_work.work);
@@ -314,9 +290,12 @@ void send_data(struct work_struct *work)
return;
}
priv->flags.write_urb_in_use = 1;
-
- count = oti6858_buf_data_avail(priv->buf);
spin_unlock_irqrestore(&priv->lock, flags);
+
+ spin_lock_irqsave(&port->lock, flags);
+ count = kfifo_len(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
+
if (count > port->bulk_out_size)
count = port->bulk_out_size;
@@ -350,10 +329,9 @@ void send_data(struct work_struct *work)
return;
}
- spin_lock_irqsave(&priv->lock, flags);
- oti6858_buf_get(priv->buf, port->write_urb->transfer_buffer, count);
- spin_unlock_irqrestore(&priv->lock, flags);
-
+ count = kfifo_out_locked(&port->write_fifo,
+ port->write_urb->transfer_buffer,
+ count, &port->lock);
port->write_urb->transfer_buffer_length = count;
port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_NOIO);
@@ -376,11 +354,6 @@ static int oti6858_startup(struct usb_serial *serial)
priv = kzalloc(sizeof(struct oti6858_private), GFP_KERNEL);
if (!priv)
break;
- priv->buf = oti6858_buf_alloc(PL2303_BUF_SIZE);
- if (priv->buf == NULL) {
- kfree(priv);
- break;
- }
spin_lock_init(&priv->lock);
init_waitqueue_head(&priv->intr_wait);
@@ -397,7 +370,6 @@ static int oti6858_startup(struct usb_serial *serial)
for (--i; i >= 0; --i) {
priv = usb_get_serial_port_data(serial->port[i]);
- oti6858_buf_free(priv->buf);
kfree(priv);
usb_set_serial_port_data(serial->port[i], NULL);
}
@@ -407,17 +379,12 @@ static int oti6858_startup(struct usb_serial *serial)
static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
- struct oti6858_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
dbg("%s(port = %d, count = %d)", __func__, port->number, count);
if (!count)
return count;
- spin_lock_irqsave(&priv->lock, flags);
- count = oti6858_buf_put(priv->buf, buf, count);
- spin_unlock_irqrestore(&priv->lock, flags);
+ count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
return count;
}
@@ -425,15 +392,14 @@ static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
static int oti6858_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct oti6858_private *priv = usb_get_serial_port_data(port);
int room = 0;
unsigned long flags;
dbg("%s(port = %d)", __func__, port->number);
- spin_lock_irqsave(&priv->lock, flags);
- room = oti6858_buf_space_avail(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&port->lock, flags);
+ room = kfifo_avail(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
return room;
}
@@ -441,15 +407,14 @@ static int oti6858_write_room(struct tty_struct *tty)
static int oti6858_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct oti6858_private *priv = usb_get_serial_port_data(port);
int chars = 0;
unsigned long flags;
dbg("%s(port = %d)", __func__, port->number);
- spin_lock_irqsave(&priv->lock, flags);
- chars = oti6858_buf_data_avail(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&port->lock, flags);
+ chars = kfifo_len(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
return chars;
}
@@ -640,10 +605,10 @@ static void oti6858_close(struct usb_serial_port *port)
dbg("%s(port = %d)", __func__, port->number);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&port->lock, flags);
/* clear out any remaining data in the buffer */
- oti6858_buf_clear(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
+ kfifo_reset_out(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
dbg("%s(): after buf_clear()", __func__);
@@ -785,18 +750,12 @@ static int oti6858_ioctl(struct tty_struct *tty, struct file *file,
static void oti6858_release(struct usb_serial *serial)
{
- struct oti6858_private *priv;
int i;
dbg("%s()", __func__);
- for (i = 0; i < serial->num_ports; ++i) {
- priv = usb_get_serial_port_data(serial->port[i]);
- if (priv) {
- oti6858_buf_free(priv->buf);
- kfree(priv);
- }
- }
+ for (i = 0; i < serial->num_ports; ++i)
+ kfree(usb_get_serial_port_data(serial->port[i]));
}
static void oti6858_read_int_callback(struct urb *urb)
@@ -889,10 +848,14 @@ static void oti6858_read_int_callback(struct urb *urb)
}
} else if (!transient) {
unsigned long flags;
+ int count;
+
+ spin_lock_irqsave(&port->lock, flags);
+ count = kfifo_len(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
spin_lock_irqsave(&priv->lock, flags);
- if (priv->flags.write_urb_in_use == 0
- && oti6858_buf_data_avail(priv->buf) != 0) {
+ if (priv->flags.write_urb_in_use == 0 && count != 0) {
schedule_delayed_work(&priv->delayed_write_work, 0);
resubmit = 0;
}
@@ -1014,165 +977,6 @@ static void oti6858_write_bulk_callback(struct urb *urb)
}
}
-
-/*
- * oti6858_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-static struct oti6858_buf *oti6858_buf_alloc(unsigned int size)
-{
- struct oti6858_buf *pb;
-
- if (size == 0)
- return NULL;
-
- pb = kmalloc(sizeof(struct oti6858_buf), GFP_KERNEL);
- if (pb == NULL)
- return NULL;
-
- pb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (pb->buf_buf == NULL) {
- kfree(pb);
- return NULL;
- }
-
- pb->buf_size = size;
- pb->buf_get = pb->buf_put = pb->buf_buf;
-
- return pb;
-}
-
-/*
- * oti6858_buf_free
- *
- * Free the buffer and all associated memory.
- */
-static void oti6858_buf_free(struct oti6858_buf *pb)
-{
- if (pb) {
- kfree(pb->buf_buf);
- kfree(pb);
- }
-}
-
-/*
- * oti6858_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-static void oti6858_buf_clear(struct oti6858_buf *pb)
-{
- if (pb != NULL) {
- /* equivalent to a get of all data available */
- pb->buf_get = pb->buf_put;
- }
-}
-
-/*
- * oti6858_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-static unsigned int oti6858_buf_data_avail(struct oti6858_buf *pb)
-{
- if (pb == NULL)
- return 0;
- return (pb->buf_size + pb->buf_put - pb->buf_get) % pb->buf_size;
-}
-
-/*
- * oti6858_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-static unsigned int oti6858_buf_space_avail(struct oti6858_buf *pb)
-{
- if (pb == NULL)
- return 0;
- return (pb->buf_size + pb->buf_get - pb->buf_put - 1) % pb->buf_size;
-}
-
-/*
- * oti6858_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-static unsigned int oti6858_buf_put(struct oti6858_buf *pb, const char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL)
- return 0;
-
- len = oti6858_buf_space_avail(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_put;
- if (count > len) {
- memcpy(pb->buf_put, buf, len);
- memcpy(pb->buf_buf, buf+len, count - len);
- pb->buf_put = pb->buf_buf + count - len;
- } else {
- memcpy(pb->buf_put, buf, count);
- if (count < len)
- pb->buf_put += count;
- else /* count == len */
- pb->buf_put = pb->buf_buf;
- }
-
- return count;
-}
-
-/*
- * oti6858_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-static unsigned int oti6858_buf_get(struct oti6858_buf *pb, char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL)
- return 0;
-
- len = oti6858_buf_data_avail(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_get;
- if (count > len) {
- memcpy(buf, pb->buf_get, len);
- memcpy(buf+len, pb->buf_buf, count - len);
- pb->buf_get = pb->buf_buf + count - len;
- } else {
- memcpy(buf, pb->buf_get, count);
- if (count < len)
- pb->buf_get += count;
- else /* count == len */
- pb->buf_get = pb->buf_buf;
- }
-
- return count;
-}
-
/* module description and (de)initialization */
static int __init oti6858_init(void)
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index c28b160..6b60018 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -40,16 +40,6 @@ static int debug;
#define PL2303_CLOSING_WAIT (30*HZ)
-#define PL2303_BUF_SIZE 1024
-#define PL2303_TMP_BUF_SIZE 1024
-
-struct pl2303_buf {
- unsigned int buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
@@ -157,173 +147,12 @@ enum pl2303_type {
struct pl2303_private {
spinlock_t lock;
- struct pl2303_buf *buf;
- int write_urb_in_use;
wait_queue_head_t delta_msr_wait;
u8 line_control;
u8 line_status;
enum pl2303_type type;
};
-/*
- * pl2303_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-static struct pl2303_buf *pl2303_buf_alloc(unsigned int size)
-{
- struct pl2303_buf *pb;
-
- if (size == 0)
- return NULL;
-
- pb = kmalloc(sizeof(struct pl2303_buf), GFP_KERNEL);
- if (pb == NULL)
- return NULL;
-
- pb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (pb->buf_buf == NULL) {
- kfree(pb);
- return NULL;
- }
-
- pb->buf_size = size;
- pb->buf_get = pb->buf_put = pb->buf_buf;
-
- return pb;
-}
-
-/*
- * pl2303_buf_free
- *
- * Free the buffer and all associated memory.
- */
-static void pl2303_buf_free(struct pl2303_buf *pb)
-{
- if (pb) {
- kfree(pb->buf_buf);
- kfree(pb);
- }
-}
-
-/*
- * pl2303_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-static void pl2303_buf_clear(struct pl2303_buf *pb)
-{
- if (pb != NULL)
- pb->buf_get = pb->buf_put;
- /* equivalent to a get of all data available */
-}
-
-/*
- * pl2303_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-static unsigned int pl2303_buf_data_avail(struct pl2303_buf *pb)
-{
- if (pb == NULL)
- return 0;
-
- return (pb->buf_size + pb->buf_put - pb->buf_get) % pb->buf_size;
-}
-
-/*
- * pl2303_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-static unsigned int pl2303_buf_space_avail(struct pl2303_buf *pb)
-{
- if (pb == NULL)
- return 0;
-
- return (pb->buf_size + pb->buf_get - pb->buf_put - 1) % pb->buf_size;
-}
-
-/*
- * pl2303_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-static unsigned int pl2303_buf_put(struct pl2303_buf *pb, const char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL)
- return 0;
-
- len = pl2303_buf_space_avail(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_put;
- if (count > len) {
- memcpy(pb->buf_put, buf, len);
- memcpy(pb->buf_buf, buf+len, count - len);
- pb->buf_put = pb->buf_buf + count - len;
- } else {
- memcpy(pb->buf_put, buf, count);
- if (count < len)
- pb->buf_put += count;
- else /* count == len */
- pb->buf_put = pb->buf_buf;
- }
-
- return count;
-}
-
-/*
- * pl2303_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-static unsigned int pl2303_buf_get(struct pl2303_buf *pb, char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL)
- return 0;
-
- len = pl2303_buf_data_avail(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_get;
- if (count > len) {
- memcpy(buf, pb->buf_get, len);
- memcpy(buf+len, pb->buf_buf, count - len);
- pb->buf_get = pb->buf_buf + count - len;
- } else {
- memcpy(buf, pb->buf_get, count);
- if (count < len)
- pb->buf_get += count;
- else /* count == len */
- pb->buf_get = pb->buf_buf;
- }
-
- return count;
-}
-
static int pl2303_vendor_read(__u16 value, __u16 index,
struct usb_serial *serial, unsigned char *buf)
{
@@ -372,11 +201,6 @@ static int pl2303_startup(struct usb_serial *serial)
if (!priv)
goto cleanup;
spin_lock_init(&priv->lock);
- priv->buf = pl2303_buf_alloc(PL2303_BUF_SIZE);
- if (priv->buf == NULL) {
- kfree(priv);
- goto cleanup;
- }
init_waitqueue_head(&priv->delta_msr_wait);
priv->type = type;
usb_set_serial_port_data(serial->port[i], priv);
@@ -404,7 +228,6 @@ cleanup:
kfree(buf);
for (--i; i >= 0; --i) {
priv = usb_get_serial_port_data(serial->port[i]);
- pl2303_buf_free(priv->buf);
kfree(priv);
usb_set_serial_port_data(serial->port[i], NULL);
}
@@ -422,102 +245,6 @@ static int set_control_lines(struct usb_device *dev, u8 value)
return retval;
}
-static void pl2303_send(struct usb_serial_port *port)
-{
- int count, result;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->write_urb_in_use) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return;
- }
-
- count = pl2303_buf_get(priv->buf, port->write_urb->transfer_buffer,
- port->bulk_out_size);
-
- if (count == 0) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return;
- }
-
- priv->write_urb_in_use = 1;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- usb_serial_debug_data(debug, &port->dev, __func__, count,
- port->write_urb->transfer_buffer);
-
- port->write_urb->transfer_buffer_length = count;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result) {
- dev_err(&port->dev, "%s - failed submitting write urb,"
- " error %d\n", __func__, result);
- priv->write_urb_in_use = 0;
- /* TODO: reschedule pl2303_send */
- }
-
- usb_serial_port_softint(port);
-}
-
-static int pl2303_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
- dbg("%s - port %d, %d bytes", __func__, port->number, count);
-
- if (!count)
- return count;
-
- spin_lock_irqsave(&priv->lock, flags);
- count = pl2303_buf_put(priv->buf, buf, count);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pl2303_send(port);
-
- return count;
-}
-
-static int pl2303_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- int room = 0;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->lock, flags);
- room = pl2303_buf_space_avail(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- dbg("%s - returns %d", __func__, room);
- return room;
-}
-
-static int pl2303_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- int chars = 0;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->lock, flags);
- chars = pl2303_buf_data_avail(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- dbg("%s - returns %d", __func__, chars);
- return chars;
-}
-
static void pl2303_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -729,22 +456,10 @@ static void pl2303_dtr_rts(struct usb_serial_port *port, int on)
static void pl2303_close(struct usb_serial_port *port)
{
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
dbg("%s - port %d", __func__, port->number);
- spin_lock_irqsave(&priv->lock, flags);
- /* clear out any remaining data in the buffer */
- pl2303_buf_clear(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* shutdown our urbs */
- dbg("%s - shutting down urbs", __func__);
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
-
}
static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
@@ -770,10 +485,8 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
pl2303_set_termios(tty, port, &tmp_termios);
dbg("%s - submitting read urb", __func__);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
+ result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
if (result) {
- dev_err(&port->dev, "%s - failed submitting read urb,"
- " error %d\n", __func__, result);
pl2303_close(port);
return -EPROTO;
}
@@ -953,10 +666,7 @@ static void pl2303_release(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
priv = usb_get_serial_port_data(serial->port[i]);
- if (priv) {
- pl2303_buf_free(priv->buf);
- kfree(priv);
- }
+ kfree(priv);
}
}
@@ -1037,13 +747,31 @@ exit:
__func__, retval);
}
-static void pl2303_push_data(struct tty_struct *tty,
- struct usb_serial_port *port, struct urb *urb,
- u8 line_status)
+static void pl2303_process_read_urb(struct urb *urb)
{
+ struct usb_serial_port *port = urb->context;
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
- /* get tty_flag from status */
char tty_flag = TTY_NORMAL;
+ unsigned long flags;
+ u8 line_status;
+ int i;
+
+ /* update line status */
+ spin_lock_irqsave(&priv->lock, flags);
+ line_status = priv->line_status;
+ priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ wake_up_interruptible(&priv->delta_msr_wait);
+
+ if (!urb->actual_length)
+ return;
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (line_status & UART_BREAK_ERROR)
@@ -1058,107 +786,17 @@ static void pl2303_push_data(struct tty_struct *tty,
if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- if (tty_flag == TTY_NORMAL && !(port->console && port->sysrq))
- tty_insert_flip_string(tty, data, urb->actual_length);
- else {
- int i;
+ if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
if (!usb_serial_handle_sysrq_char(tty, port, data[i]))
tty_insert_flip_char(tty, data[i], tty_flag);
+ } else {
+ tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ urb->actual_length);
}
- tty_flip_buffer_push(tty);
-}
-
-static void pl2303_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
- unsigned long flags;
- int result;
- int status = urb->status;
- u8 line_status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - urb status = %d", __func__, status);
- if (status == -EPROTO) {
- /* PL2303 mysteriously fails with -EPROTO reschedule
- * the read */
- dbg("%s - caught -EPROTO, resubmitting the urb",
- __func__);
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed"
- " resubmitting read urb, error %d\n",
- __func__, result);
- return;
- }
- dbg("%s - unable to handle the error, exiting.", __func__);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, urb->transfer_buffer);
-
- spin_lock_irqsave(&priv->lock, flags);
- line_status = priv->line_status;
- priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
- spin_unlock_irqrestore(&priv->lock, flags);
- wake_up_interruptible(&priv->delta_msr_wait);
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- pl2303_push_data(tty, port, urb, line_status);
- }
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
- /* Schedule the next read _if_ we are still open */
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result && result != -EPERM)
- dev_err(&urb->dev->dev, "%s - failed resubmitting"
- " read urb, error %d\n", __func__, result);
-}
-
-static void pl2303_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- int result;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __func__,
- status);
- priv->write_urb_in_use = 0;
- return;
- default:
- /* error in the urb, so we have to resubmit it */
- dbg("%s - Overflow in write", __func__);
- dbg("%s - nonzero write bulk status received: %d", __func__,
- status);
- port->write_urb->transfer_buffer_length = 1;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed resubmitting write"
- " urb, error %d\n", __func__, result);
- else
- return;
- }
-
- priv->write_urb_in_use = 0;
-
- /* send any buffered data */
- pl2303_send(port);
}
/* All of the device info needed for the PL2303 SIO serial converter */
@@ -1170,21 +808,19 @@ static struct usb_serial_driver pl2303_device = {
.id_table = id_table,
.usb_driver = &pl2303_driver,
.num_ports = 1,
+ .bulk_in_size = 256,
+ .bulk_out_size = 256,
.open = pl2303_open,
.close = pl2303_close,
.dtr_rts = pl2303_dtr_rts,
.carrier_raised = pl2303_carrier_raised,
- .write = pl2303_write,
.ioctl = pl2303_ioctl,
.break_ctl = pl2303_break_ctl,
.set_termios = pl2303_set_termios,
.tiocmget = pl2303_tiocmget,
.tiocmset = pl2303_tiocmset,
- .read_bulk_callback = pl2303_read_bulk_callback,
+ .process_read_urb = pl2303_process_read_urb,
.read_int_callback = pl2303_read_int_callback,
- .write_bulk_callback = pl2303_write_bulk_callback,
- .write_room = pl2303_write_room,
- .chars_in_buffer = pl2303_chars_in_buffer,
.attach = pl2303_startup,
.release = pl2303_release,
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 23c09b3..a871645 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -5,7 +5,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
*/
#define BENQ_VENDOR_ID 0x04a5
@@ -137,5 +137,5 @@
#define SANWA_PRODUCT_ID 0x0001
/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
-#define ADLINK_VENDOR_ID 0x0b63
-#define ADLINK_ND6530_PRODUCT_ID 0x6530
+#define ADLINK_VENDOR_ID 0x0b63
+#define ADLINK_ND6530_PRODUCT_ID 0x6530
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 7e3bea2..214a3e5 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -50,6 +50,10 @@
#define SANYO_VENDOR_ID 0x0474
#define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */
+/* Samsung devices */
+#define SAMSUNG_VENDOR_ID 0x04e8
+#define SAMSUNG_PRODUCT_U520 0x6640 /* SCH-U520 */
+
static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
@@ -61,6 +65,7 @@ static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 53a2d5a..04bb759 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -15,6 +15,8 @@
#include <linux/tty_flip.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/slab.h>
+#include "usb-wwan.h"
#define DRIVER_AUTHOR "Qualcomm Inc"
#define DRIVER_DESC "Qualcomm USB Serial driver"
@@ -76,6 +78,8 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
{USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
+ {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
+ {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -92,6 +96,8 @@ static struct usb_driver qcdriver = {
static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
{
+ struct usb_wwan_intf_private *data;
+ struct usb_host_interface *intf = serial->interface->cur_altsetting;
int retval = -ENODEV;
__u8 nintf;
__u8 ifnum;
@@ -100,33 +106,45 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
nintf = serial->dev->actconfig->desc.bNumInterfaces;
dbg("Num Interfaces = %d", nintf);
- ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+ ifnum = intf->desc.bInterfaceNumber;
dbg("This Interface = %d", ifnum);
+ data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&data->susp_lock);
+
switch (nintf) {
case 1:
/* QDL mode */
- if (serial->interface->num_altsetting == 2) {
- struct usb_host_interface *intf;
-
+ /* Gobi 2000 has a single altsetting, older ones have two */
+ if (serial->interface->num_altsetting == 2)
intf = &serial->interface->altsetting[1];
- if (intf->desc.bNumEndpoints == 2) {
- if (usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) &&
- usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
- dbg("QDL port found");
- retval = usb_set_interface(serial->dev, ifnum, 1);
- if (retval < 0) {
- dev_err(&serial->dev->dev,
- "Could not set interface, error %d\n",
- retval);
- retval = -ENODEV;
- }
- return retval;
- }
+ else if (serial->interface->num_altsetting > 2)
+ break;
+
+ if (intf->desc.bNumEndpoints == 2 &&
+ usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) &&
+ usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
+ dbg("QDL port found");
+
+ if (serial->interface->num_altsetting == 1)
+ return 0;
+
+ retval = usb_set_interface(serial->dev, ifnum, 1);
+ if (retval < 0) {
+ dev_err(&serial->dev->dev,
+ "Could not set interface, error %d\n",
+ retval);
+ retval = -ENODEV;
}
+ return retval;
}
break;
+ case 3:
case 4:
/* Composite mode */
if (ifnum == 2) {
@@ -161,6 +179,18 @@ static struct usb_serial_driver qcdevice = {
.usb_driver = &qcdriver,
.num_ports = 1,
.probe = qcprobe,
+ .open = usb_wwan_open,
+ .close = usb_wwan_close,
+ .write = usb_wwan_write,
+ .write_room = usb_wwan_write_room,
+ .chars_in_buffer = usb_wwan_chars_in_buffer,
+ .attach = usb_wwan_startup,
+ .disconnect = usb_wwan_disconnect,
+ .release = usb_wwan_release,
+#ifdef CONFIG_PM
+ .suspend = usb_wwan_suspend,
+ .resume = usb_wwan_resume,
+#endif
};
static int __init qcinit(void)
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 43a0cad..a36e231 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -1,6 +1,7 @@
/*
* Safe Encapsulated USB Serial Driver
*
+ * Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2001 Lineo
* Copyright (C) 2001 Hewlett-Packard
*
@@ -84,8 +85,8 @@ static int debug;
static int safe = 1;
static int padded = CONFIG_USB_SERIAL_SAFE_PADDED;
-#define DRIVER_VERSION "v0.0b"
-#define DRIVER_AUTHOR "sl@lineo.com, tbr@lineo.com"
+#define DRIVER_VERSION "v0.1"
+#define DRIVER_AUTHOR "sl@lineo.com, tbr@lineo.com, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB Safe Encapsulated Serial"
MODULE_AUTHOR(DRIVER_AUTHOR);
@@ -212,191 +213,80 @@ static __u16 __inline__ fcs_compute10(unsigned char *sp, int len, __u16 fcs)
return fcs;
}
-static void safe_read_bulk_callback(struct urb *urb)
+static void safe_process_read_urb(struct urb *urb)
{
- struct usb_serial_port *port = urb->context;
+ struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned char length = urb->actual_length;
+ int actual_length;
struct tty_struct *tty;
- int result;
- int status = urb->status;
+ __u16 fcs;
- dbg("%s", __func__);
-
- if (status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
+ if (!length)
return;
- }
- dbg("safe_read_bulk_callback length: %d",
- port->read_urb->actual_length);
-#ifdef ECHO_RCV
- {
- int i;
- unsigned char *cp = port->read_urb->transfer_buffer;
- for (i = 0; i < port->read_urb->actual_length; i++) {
- if ((i % 32) == 0)
- printk("\nru[%02x] ", i);
- printk("%02x ", *cp++);
- }
- printk("\n");
- }
-#endif
tty = tty_port_tty_get(&port->port);
- if (safe) {
- __u16 fcs;
- fcs = fcs_compute10(data, length, CRC10_INITFCS);
- if (!fcs) {
- int actual_length = data[length - 2] >> 2;
- if (actual_length <= (length - 2)) {
- dev_info(&urb->dev->dev, "%s - actual: %d\n",
- __func__, actual_length);
- tty_insert_flip_string(tty,
- data, actual_length);
- tty_flip_buffer_push(tty);
- } else {
- dev_err(&port->dev,
- "%s - inconsistent lengths %d:%d\n",
- __func__, actual_length, length);
- }
- } else {
- dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
- }
- } else {
- tty_insert_flip_string(tty, data, length);
- tty_flip_buffer_push(tty);
- }
- tty_kref_put(tty);
-
- /* Continue trying to always read */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- urb->transfer_buffer, urb->transfer_buffer_length,
- safe_read_bulk_callback, port);
-
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- /* FIXME: Need a mechanism to retry later if this happens */
-}
-
-static int safe_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- unsigned char *data;
- int result;
- int i;
- int packet_length;
-
- dbg("safe_write port: %p %d urb: %p count: %d",
- port, port->number, port->write_urb, count);
-
- if (!port->write_urb) {
- dbg("%s - write urb NULL", __func__);
- return 0;
- }
-
- dbg("safe_write write_urb: %d transfer_buffer_length",
- port->write_urb->transfer_buffer_length);
-
- if (!port->write_urb->transfer_buffer_length) {
- dbg("%s - write urb transfer_buffer_length zero", __func__);
- return 0;
- }
- if (count == 0) {
- dbg("%s - write request of 0 bytes", __func__);
- return 0;
- }
- spin_lock_bh(&port->lock);
- if (port->write_urb_busy) {
- spin_unlock_bh(&port->lock);
- dbg("%s - already writing", __func__);
- return 0;
- }
- port->write_urb_busy = 1;
- spin_unlock_bh(&port->lock);
-
- packet_length = port->bulk_out_size; /* get max packetsize */
-
- i = packet_length - (safe ? 2 : 0); /* get bytes to send */
- count = (count > i) ? i : count;
-
-
- /* get the data into the transfer buffer */
- data = port->write_urb->transfer_buffer;
- memset(data, '0', packet_length);
-
- memcpy(data, buf, count);
-
- if (safe) {
- __u16 fcs;
-
- /* pad if necessary */
- if (!padded)
- packet_length = count + 2;
- /* set count */
- data[packet_length - 2] = count << 2;
- data[packet_length - 1] = 0;
+ if (!tty)
+ return;
- /* compute fcs and insert into trailer */
- fcs = fcs_compute10(data, packet_length, CRC10_INITFCS);
- data[packet_length - 2] |= fcs >> 8;
- data[packet_length - 1] |= fcs & 0xff;
+ if (!safe)
+ goto out;
- /* set length to send */
- port->write_urb->transfer_buffer_length = packet_length;
- } else {
- port->write_urb->transfer_buffer_length = count;
+ fcs = fcs_compute10(data, length, CRC10_INITFCS);
+ if (fcs) {
+ dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
+ goto err;
}
- usb_serial_debug_data(debug, &port->dev, __func__, count,
- port->write_urb->transfer_buffer);
-#ifdef ECHO_TX
- {
- int i;
- unsigned char *cp = port->write_urb->transfer_buffer;
- for (i = 0; i < port->write_urb->transfer_buffer_length; i++) {
- if ((i % 32) == 0)
- printk("\nsu[%02x] ", i);
- printk("%02x ", *cp++);
- }
- printk("\n");
- }
-#endif
- port->write_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->write_urb, GFP_KERNEL);
- if (result) {
- port->write_urb_busy = 0;
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- return 0;
+ actual_length = data[length - 2] >> 2;
+ if (actual_length > (length - 2)) {
+ dev_err(&port->dev, "%s - inconsistent lengths %d:%d\n",
+ __func__, actual_length, length);
+ goto err;
}
- dbg("%s urb: %p submitted", __func__, port->write_urb);
-
- return count;
+ dev_info(&urb->dev->dev, "%s - actual: %d\n", __func__, actual_length);
+ length = actual_length;
+out:
+ tty_insert_flip_string(tty, data, length);
+ tty_flip_buffer_push(tty);
+err:
+ tty_kref_put(tty);
}
-static int safe_write_room(struct tty_struct *tty)
+static int safe_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
{
- struct usb_serial_port *port = tty->driver_data;
- int room = 0; /* Default: no room */
- unsigned long flags;
+ unsigned char *buf = dest;
+ int count;
+ int trailer_len;
+ int pkt_len;
+ __u16 fcs;
+
+ trailer_len = safe ? 2 : 0;
+
+ count = kfifo_out_locked(&port->write_fifo, buf, size - trailer_len,
+ &port->lock);
+ if (!safe)
+ return count;
+
+ /* pad if necessary */
+ if (padded) {
+ pkt_len = size;
+ memset(buf + count, '0', pkt_len - count - trailer_len);
+ } else {
+ pkt_len = count + trailer_len;
+ }
- dbg("%s", __func__);
+ /* set count */
+ buf[pkt_len - 2] = count << 2;
+ buf[pkt_len - 1] = 0;
- spin_lock_irqsave(&port->lock, flags);
- if (port->write_urb_busy)
- room = port->bulk_out_size - (safe ? 2 : 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ /* compute fcs and insert into trailer */
+ fcs = fcs_compute10(buf, pkt_len, CRC10_INITFCS);
+ buf[pkt_len - 2] |= fcs >> 8;
+ buf[pkt_len - 1] |= fcs & 0xff;
- if (room)
- dbg("safe_write_room returns %d", room);
- return room;
+ return pkt_len;
}
static int safe_startup(struct usb_serial *serial)
@@ -421,9 +311,8 @@ static struct usb_serial_driver safe_device = {
.id_table = id_table,
.usb_driver = &safe_driver,
.num_ports = 1,
- .write = safe_write,
- .write_room = safe_write_room,
- .read_bulk_callback = safe_read_bulk_callback,
+ .process_read_urb = safe_process_read_urb,
+ .prepare_write_buffer = safe_prepare_write_buffer,
.attach = safe_startup,
};
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 5d39191..329d311 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -1,6 +1,7 @@
/*
* spcp8x5 USB to serial adaptor driver
*
+ * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
* Copyright (C) 2006 Linxb (xubin.lin@worldplus.com.cn)
* Copyright (C) 2006 S1 Corp.
*
@@ -29,7 +30,7 @@
/* Version Information */
-#define DRIVER_VERSION "v0.04"
+#define DRIVER_VERSION "v0.10"
#define DRIVER_DESC "SPCP8x5 USB to serial adaptor driver"
static int debug;
@@ -64,11 +65,6 @@ struct spcp8x5_usb_ctrl_arg {
u16 length;
};
-/* wait 30s before close */
-#define SPCP8x5_CLOSING_WAIT (30*HZ)
-
-#define SPCP8x5_BUF_SIZE 1024
-
/* spcp8x5 spec register define */
#define MCR_CONTROL_LINE_RTS 0x02
@@ -155,133 +151,6 @@ enum spcp8x5_type {
SPCP835_TYPE,
};
-/* 1st in 1st out buffer 4 driver */
-struct ringbuf {
- unsigned int buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
-/* alloc the ring buf and alloc the buffer itself */
-static inline struct ringbuf *alloc_ringbuf(unsigned int size)
-{
- struct ringbuf *pb;
-
- if (size == 0)
- return NULL;
-
- pb = kmalloc(sizeof(*pb), GFP_KERNEL);
- if (pb == NULL)
- return NULL;
-
- pb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (pb->buf_buf == NULL) {
- kfree(pb);
- return NULL;
- }
-
- pb->buf_size = size;
- pb->buf_get = pb->buf_put = pb->buf_buf;
-
- return pb;
-}
-
-/* free the ring buf and the buffer itself */
-static inline void free_ringbuf(struct ringbuf *pb)
-{
- if (pb != NULL) {
- kfree(pb->buf_buf);
- kfree(pb);
- }
-}
-
-/* clear pipo , juest repoint the pointer here */
-static inline void clear_ringbuf(struct ringbuf *pb)
-{
- if (pb != NULL)
- pb->buf_get = pb->buf_put;
-}
-
-/* get the number of data in the pipo */
-static inline unsigned int ringbuf_avail_data(struct ringbuf *pb)
-{
- if (pb == NULL)
- return 0;
- return (pb->buf_size + pb->buf_put - pb->buf_get) % pb->buf_size;
-}
-
-/* get the number of space in the pipo */
-static inline unsigned int ringbuf_avail_space(struct ringbuf *pb)
-{
- if (pb == NULL)
- return 0;
- return (pb->buf_size + pb->buf_get - pb->buf_put - 1) % pb->buf_size;
-}
-
-/* put count data into pipo */
-static unsigned int put_ringbuf(struct ringbuf *pb, const char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL)
- return 0;
-
- len = ringbuf_avail_space(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_put;
- if (count > len) {
- memcpy(pb->buf_put, buf, len);
- memcpy(pb->buf_buf, buf+len, count - len);
- pb->buf_put = pb->buf_buf + count - len;
- } else {
- memcpy(pb->buf_put, buf, count);
- if (count < len)
- pb->buf_put += count;
- else /* count == len */
- pb->buf_put = pb->buf_buf;
- }
- return count;
-}
-
-/* get count data from pipo */
-static unsigned int get_ringbuf(struct ringbuf *pb, char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL || buf == NULL)
- return 0;
-
- len = ringbuf_avail_data(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_get;
- if (count > len) {
- memcpy(buf, pb->buf_get, len);
- memcpy(buf+len, pb->buf_buf, count - len);
- pb->buf_get = pb->buf_buf + count - len;
- } else {
- memcpy(buf, pb->buf_get, count);
- if (count < len)
- pb->buf_get += count;
- else /* count == len */
- pb->buf_get = pb->buf_buf;
- }
-
- return count;
-}
-
static struct usb_driver spcp8x5_driver = {
.name = "spcp8x5",
.probe = usb_serial_probe,
@@ -293,8 +162,6 @@ static struct usb_driver spcp8x5_driver = {
struct spcp8x5_private {
spinlock_t lock;
- struct ringbuf *buf;
- int write_urb_in_use;
enum spcp8x5_type type;
wait_queue_head_t delta_msr_wait;
u8 line_control;
@@ -330,24 +197,15 @@ static int spcp8x5_startup(struct usb_serial *serial)
goto cleanup;
spin_lock_init(&priv->lock);
- priv->buf = alloc_ringbuf(SPCP8x5_BUF_SIZE);
- if (priv->buf == NULL)
- goto cleanup2;
-
init_waitqueue_head(&priv->delta_msr_wait);
priv->type = type;
usb_set_serial_port_data(serial->port[i] , priv);
-
}
return 0;
-
-cleanup2:
- kfree(priv);
cleanup:
for (--i; i >= 0; --i) {
priv = usb_get_serial_port_data(serial->port[i]);
- free_ringbuf(priv->buf);
kfree(priv);
usb_set_serial_port_data(serial->port[i] , NULL);
}
@@ -358,15 +216,9 @@ cleanup:
static void spcp8x5_release(struct usb_serial *serial)
{
int i;
- struct spcp8x5_private *priv;
- for (i = 0; i < serial->num_ports; i++) {
- priv = usb_get_serial_port_data(serial->port[i]);
- if (priv) {
- free_ringbuf(priv->buf);
- kfree(priv);
- }
- }
+ for (i = 0; i < serial->num_ports; i++)
+ kfree(usb_get_serial_port_data(serial->port[i]));
}
/* set the modem control line of the device.
@@ -470,33 +322,6 @@ static void spcp8x5_dtr_rts(struct usb_serial_port *port, int on)
spcp8x5_set_ctrlLine(port->serial->dev, control , priv->type);
}
-/* close the serial port. We should wait for data sending to device 1st and
- * then kill all urb. */
-static void spcp8x5_close(struct usb_serial_port *port)
-{
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- int result;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->lock, flags);
- /* clear out any remaining data in the buffer */
- clear_ringbuf(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* kill urb */
- if (port->write_urb != NULL) {
- result = usb_unlink_urb(port->write_urb);
- if (result)
- dev_dbg(&port->dev,
- "usb_unlink_urb(write_urb) = %d\n", result);
- }
- result = usb_unlink_urb(port->read_urb);
- if (result)
- dev_dbg(&port->dev, "usb_unlink_urb(read_urb) = %d\n", result);
-}
-
static void spcp8x5_init_termios(struct tty_struct *tty)
{
/* for the 1st time call this function */
@@ -620,7 +445,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
}
/* open the serial port. do some usb system call. set termios and get the line
- * status of the device. then submit the read urb */
+ * status of the device. */
static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ktermios tmp_termios;
@@ -655,52 +480,21 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
priv->line_status = status & 0xf0 ;
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - submitting read urb", __func__);
- port->read_urb->dev = serial->dev;
- ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (ret) {
- spcp8x5_close(port);
- return -EPROTO;
- }
port->port.drain_delay = 256;
- return 0;
+
+ return usb_serial_generic_open(tty, port);
}
-/* bulk read call back function. check the status of the urb. if transfer
- * failed return. then update the status and the tty send data to tty subsys.
- * submit urb again.
- */
-static void spcp8x5_read_bulk_callback(struct urb *urb)
+static void spcp8x5_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
- int result = urb->status;
u8 status;
char tty_flag;
- dev_dbg(&port->dev, "start, result = %d, urb->actual_length = %d\n,",
- result, urb->actual_length);
-
- /* check the urb status */
- if (result) {
- if (result == -EPROTO) {
- /* spcp8x5 mysteriously fails with -EPROTO */
- /* reschedule the read */
- urb->dev = port->serial->dev;
- result = usb_submit_urb(urb , GFP_ATOMIC);
- if (result)
- dev_dbg(&port->dev,
- "failed submitting read urb %d\n",
- result);
- return;
- }
- dev_dbg(&port->dev, "unable to handle the error, exiting.\n");
- return;
- }
-
/* get tty_flag from status */
tty_flag = TTY_NORMAL;
@@ -711,141 +505,33 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
/* wake up the wait for termios */
wake_up_interruptible(&priv->delta_msr_wait);
- /* break takes precedence over parity, which takes precedence over
- * framing errors */
- if (status & UART_BREAK_ERROR)
- tty_flag = TTY_BREAK;
- else if (status & UART_PARITY_ERROR)
- tty_flag = TTY_PARITY;
- else if (status & UART_FRAME_ERROR)
- tty_flag = TTY_FRAME;
- dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag);
-
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- /* overrun is special, not associated with a char */
- if (status & UART_OVERRUN_ERROR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- tty_insert_flip_string_fixed_flag(tty, data,
- urb->actual_length, tty_flag);
- tty_flip_buffer_push(tty);
- }
- tty_kref_put(tty);
-
- /* Schedule the next read */
- urb->dev = port->serial->dev;
- result = usb_submit_urb(urb , GFP_ATOMIC);
- if (result)
- dev_dbg(&port->dev, "failed submitting read urb %d\n", result);
-}
-
-/* get data from ring buffer and then write to usb bus */
-static void spcp8x5_send(struct usb_serial_port *port)
-{
- int count, result;
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->write_urb_in_use) {
- dev_dbg(&port->dev, "write urb still used\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (!urb->actual_length)
return;
- }
-
- /* send the 1st urb for writting */
- memset(port->write_urb->transfer_buffer , 0x00 , port->bulk_out_size);
- count = get_ringbuf(priv->buf, port->write_urb->transfer_buffer,
- port->bulk_out_size);
- if (count == 0) {
- spin_unlock_irqrestore(&priv->lock, flags);
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
return;
- }
-
- /* update the urb status */
- priv->write_urb_in_use = 1;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- port->write_urb->transfer_buffer_length = count;
- port->write_urb->dev = port->serial->dev;
-
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result) {
- dev_dbg(&port->dev, "failed submitting write urb, error %d\n",
- result);
- priv->write_urb_in_use = 0;
- /* TODO: reschedule spcp8x5_send */
- }
-
-
- schedule_work(&port->work);
-}
-/* this is the call back function for write urb. NOTE we should not sleep in
- * this routine. check the urb return code and then submit the write urb again
- * to hold the write loop */
-static void spcp8x5_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- int result;
- int status = urb->status;
+ if (status & UART_STATE_TRANSIENT_MASK) {
+ /* break takes precedence over parity, which takes precedence
+ * over framing errors */
+ if (status & UART_BREAK_ERROR)
+ tty_flag = TTY_BREAK;
+ else if (status & UART_PARITY_ERROR)
+ tty_flag = TTY_PARITY;
+ else if (status & UART_FRAME_ERROR)
+ tty_flag = TTY_FRAME;
+ dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag);
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(&port->dev, "urb shutting down with status: %d\n",
- status);
- priv->write_urb_in_use = 0;
- return;
- default:
- /* error in the urb, so we have to resubmit it */
- dbg("%s - Overflow in write", __func__);
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
- port->write_urb->transfer_buffer_length = 1;
- port->write_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result)
- dev_dbg(&port->dev,
- "failed resubmitting write urb %d\n", result);
- else
- return;
+ /* overrun is special, not associated with a char */
+ if (status & UART_OVERRUN_ERROR)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
- priv->write_urb_in_use = 0;
-
- /* send any buffered data */
- spcp8x5_send(port);
-}
-
-/* write data to ring buffer. and then start the write transfer */
-static int spcp8x5_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
- dev_dbg(&port->dev, "%d bytes\n", count);
-
- if (!count)
- return count;
-
- spin_lock_irqsave(&priv->lock, flags);
- count = put_ringbuf(priv->buf, buf, count);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- spcp8x5_send(port);
-
- return count;
+ tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
}
static int spcp8x5_wait_modem_info(struct usb_serial_port *port,
@@ -953,36 +639,6 @@ static int spcp8x5_tiocmget(struct tty_struct *tty, struct file *file)
return result;
}
-/* get the avail space room in ring buffer */
-static int spcp8x5_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- int room = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- room = ringbuf_avail_space(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return room;
-}
-
-/* get the number of avail data in write ring buffer */
-static int spcp8x5_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- int chars = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- chars = ringbuf_avail_data(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return chars;
-}
-
/* All of the device info needed for the spcp8x5 SIO serial converter */
static struct usb_serial_driver spcp8x5_device = {
.driver = {
@@ -992,21 +648,16 @@ static struct usb_serial_driver spcp8x5_device = {
.id_table = id_table,
.num_ports = 1,
.open = spcp8x5_open,
- .close = spcp8x5_close,
.dtr_rts = spcp8x5_dtr_rts,
.carrier_raised = spcp8x5_carrier_raised,
- .write = spcp8x5_write,
.set_termios = spcp8x5_set_termios,
.init_termios = spcp8x5_init_termios,
.ioctl = spcp8x5_ioctl,
.tiocmget = spcp8x5_tiocmget,
.tiocmset = spcp8x5_tiocmset,
- .write_room = spcp8x5_write_room,
- .read_bulk_callback = spcp8x5_read_bulk_callback,
- .write_bulk_callback = spcp8x5_write_bulk_callback,
- .chars_in_buffer = spcp8x5_chars_in_buffer,
.attach = spcp8x5_startup,
.release = spcp8x5_release,
+ .process_read_urb = spcp8x5_process_read_urb,
};
static int __init spcp8x5_init(void)
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e1bfda3..90979a1 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -30,7 +30,7 @@
#include <linux/spinlock.h>
#include <linux/ioctl.h>
#include <linux/serial.h>
-#include <linux/circ_buf.h>
+#include <linux/kfifo.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
@@ -40,7 +40,7 @@
/* Defines */
-#define TI_DRIVER_VERSION "v0.9"
+#define TI_DRIVER_VERSION "v0.10"
#define TI_DRIVER_AUTHOR "Al Borchers <alborchers@steinerpoint.com>"
#define TI_DRIVER_DESC "TI USB 3410/5052 Serial Driver"
@@ -82,7 +82,7 @@ struct ti_port {
spinlock_t tp_lock;
int tp_read_urb_state;
int tp_write_urb_in_use;
- struct circ_buf *tp_write_buf;
+ struct kfifo write_fifo;
};
struct ti_device {
@@ -144,15 +144,6 @@ static int ti_write_byte(struct ti_device *tdev, unsigned long addr,
static int ti_download_firmware(struct ti_device *tdev);
-/* circular buffer */
-static struct circ_buf *ti_buf_alloc(void);
-static void ti_buf_free(struct circ_buf *cb);
-static void ti_buf_clear(struct circ_buf *cb);
-static int ti_buf_data_avail(struct circ_buf *cb);
-static int ti_buf_space_avail(struct circ_buf *cb);
-static int ti_buf_put(struct circ_buf *cb, const char *buf, int count);
-static int ti_buf_get(struct circ_buf *cb, char *buf, int count);
-
/* Data */
@@ -450,8 +441,8 @@ static int ti_startup(struct usb_serial *serial)
tport->tp_closing_wait = closing_wait;
init_waitqueue_head(&tport->tp_msr_wait);
init_waitqueue_head(&tport->tp_write_wait);
- tport->tp_write_buf = ti_buf_alloc();
- if (tport->tp_write_buf == NULL) {
+ if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE,
+ GFP_KERNEL)) {
dev_err(&dev->dev, "%s - out of memory\n", __func__);
kfree(tport);
status = -ENOMEM;
@@ -468,7 +459,7 @@ static int ti_startup(struct usb_serial *serial)
free_tports:
for (--i; i >= 0; --i) {
tport = usb_get_serial_port_data(serial->port[i]);
- ti_buf_free(tport->tp_write_buf);
+ kfifo_free(&tport->write_fifo);
kfree(tport);
usb_set_serial_port_data(serial->port[i], NULL);
}
@@ -490,7 +481,7 @@ static void ti_release(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
tport = usb_get_serial_port_data(serial->port[i]);
if (tport) {
- ti_buf_free(tport->tp_write_buf);
+ kfifo_free(&tport->write_fifo);
kfree(tport);
}
}
@@ -701,7 +692,6 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct ti_port *tport = usb_get_serial_port_data(port);
- unsigned long flags;
dbg("%s - port %d", __func__, port->number);
@@ -713,10 +703,8 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
if (tport == NULL || !tport->tp_is_open)
return -ENODEV;
- spin_lock_irqsave(&tport->tp_lock, flags);
- count = ti_buf_put(tport->tp_write_buf, data, count);
- spin_unlock_irqrestore(&tport->tp_lock, flags);
-
+ count = kfifo_in_locked(&tport->write_fifo, data, count,
+ &tport->tp_lock);
ti_send(tport);
return count;
@@ -736,7 +724,7 @@ static int ti_write_room(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&tport->tp_lock, flags);
- room = ti_buf_space_avail(tport->tp_write_buf);
+ room = kfifo_avail(&tport->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -757,7 +745,7 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&tport->tp_lock, flags);
- chars = ti_buf_data_avail(tport->tp_write_buf);
+ chars = kfifo_len(&tport->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
dbg("%s - returns %d", __func__, chars);
@@ -1309,7 +1297,7 @@ static void ti_send(struct ti_port *tport)
if (tport->tp_write_urb_in_use)
goto unlock;
- count = ti_buf_get(tport->tp_write_buf,
+ count = kfifo_out(&tport->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
@@ -1504,7 +1492,7 @@ static void ti_drain(struct ti_port *tport, unsigned long timeout, int flush)
add_wait_queue(&tport->tp_write_wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (ti_buf_data_avail(tport->tp_write_buf) == 0
+ if (kfifo_len(&tport->write_fifo) == 0
|| timeout == 0 || signal_pending(current)
|| tdev->td_urb_error
|| port->serial->disconnected) /* disconnect */
@@ -1518,7 +1506,7 @@ static void ti_drain(struct ti_port *tport, unsigned long timeout, int flush)
/* flush any remaining data in the buffer */
if (flush)
- ti_buf_clear(tport->tp_write_buf);
+ kfifo_reset_out(&tport->write_fifo);
spin_unlock_irq(&tport->tp_lock);
@@ -1761,142 +1749,3 @@ static int ti_download_firmware(struct ti_device *tdev)
return 0;
}
-
-
-/* Circular Buffer Functions */
-
-/*
- * ti_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-
-static struct circ_buf *ti_buf_alloc(void)
-{
- struct circ_buf *cb;
-
- cb = kmalloc(sizeof(struct circ_buf), GFP_KERNEL);
- if (cb == NULL)
- return NULL;
-
- cb->buf = kmalloc(TI_WRITE_BUF_SIZE, GFP_KERNEL);
- if (cb->buf == NULL) {
- kfree(cb);
- return NULL;
- }
-
- ti_buf_clear(cb);
-
- return cb;
-}
-
-
-/*
- * ti_buf_free
- *
- * Free the buffer and all associated memory.
- */
-
-static void ti_buf_free(struct circ_buf *cb)
-{
- kfree(cb->buf);
- kfree(cb);
-}
-
-
-/*
- * ti_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-
-static void ti_buf_clear(struct circ_buf *cb)
-{
- cb->head = cb->tail = 0;
-}
-
-
-/*
- * ti_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-
-static int ti_buf_data_avail(struct circ_buf *cb)
-{
- return CIRC_CNT(cb->head, cb->tail, TI_WRITE_BUF_SIZE);
-}
-
-
-/*
- * ti_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-
-static int ti_buf_space_avail(struct circ_buf *cb)
-{
- return CIRC_SPACE(cb->head, cb->tail, TI_WRITE_BUF_SIZE);
-}
-
-
-/*
- * ti_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-
-static int ti_buf_put(struct circ_buf *cb, const char *buf, int count)
-{
- int c, ret = 0;
-
- while (1) {
- c = CIRC_SPACE_TO_END(cb->head, cb->tail, TI_WRITE_BUF_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(cb->buf + cb->head, buf, c);
- cb->head = (cb->head + c) & (TI_WRITE_BUF_SIZE-1);
- buf += c;
- count -= c;
- ret += c;
- }
-
- return ret;
-}
-
-
-/*
- * ti_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-
-static int ti_buf_get(struct circ_buf *cb, char *buf, int count)
-{
- int c, ret = 0;
-
- while (1) {
- c = CIRC_CNT_TO_END(cb->head, cb->tail, TI_WRITE_BUF_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(buf, cb->buf + cb->tail, c);
- cb->tail = (cb->tail + c) & (TI_WRITE_BUF_SIZE-1);
- buf += c;
- count -= c;
- ret += c;
- }
-
- return ret;
-}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 3873660..941c2d4 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -289,7 +289,7 @@ static void serial_down(struct tty_port *tport)
* The console is magical. Do not hang up the console hardware
* or there will be tears.
*/
- if (port->console)
+ if (port->port.console)
return;
if (drv->close)
drv->close(port);
@@ -328,7 +328,7 @@ static void serial_cleanup(struct tty_struct *tty)
/* The console is magical. Do not hang up the console hardware
* or there will be tears.
*/
- if (port->console)
+ if (port->port.console)
return;
dbg("%s - port %d", __func__, port->number);
@@ -548,8 +548,12 @@ static void usb_serial_port_work(struct work_struct *work)
static void kill_traffic(struct usb_serial_port *port)
{
+ int i;
+
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
+ usb_kill_urb(port->write_urbs[i]);
/*
* This is tricky.
* Some drivers submit the read_urb in the
@@ -568,6 +572,7 @@ static void kill_traffic(struct usb_serial_port *port)
static void port_release(struct device *dev)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
+ int i;
dbg ("%s - %s", __func__, dev_name(dev));
@@ -582,6 +587,10 @@ static void port_release(struct device *dev)
usb_free_urb(port->write_urb);
usb_free_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_out_urb);
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
+ usb_free_urb(port->write_urbs[i]);
+ kfree(port->bulk_out_buffers[i]);
+ }
kfifo_free(&port->write_fifo);
kfree(port->bulk_in_buffer);
kfree(port->bulk_out_buffer);
@@ -901,7 +910,9 @@ int usb_serial_probe(struct usb_interface *interface,
dev_err(&interface->dev, "No free urbs available\n");
goto probe_error;
}
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = serial->type->bulk_in_size;
+ if (!buffer_size)
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
port->bulk_in_size = buffer_size;
port->bulk_in_endpointAddress = endpoint->bEndpointAddress;
port->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
@@ -918,6 +929,8 @@ int usb_serial_probe(struct usb_interface *interface,
}
for (i = 0; i < num_bulk_out; ++i) {
+ int j;
+
endpoint = bulk_out_endpoint[i];
port = serial->port[i];
port->write_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -927,7 +940,9 @@ int usb_serial_probe(struct usb_interface *interface,
}
if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
goto probe_error;
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = serial->type->bulk_out_size;
+ if (!buffer_size)
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
port->bulk_out_size = buffer_size;
port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
port->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
@@ -941,6 +956,28 @@ int usb_serial_probe(struct usb_interface *interface,
endpoint->bEndpointAddress),
port->bulk_out_buffer, buffer_size,
serial->type->write_bulk_callback, port);
+ for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) {
+ set_bit(j, &port->write_urbs_free);
+ port->write_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!port->write_urbs[j]) {
+ dev_err(&interface->dev,
+ "No free urbs available\n");
+ goto probe_error;
+ }
+ port->bulk_out_buffers[j] = kmalloc(buffer_size,
+ GFP_KERNEL);
+ if (!port->bulk_out_buffers[j]) {
+ dev_err(&interface->dev,
+ "Couldn't allocate bulk_out_buffer\n");
+ goto probe_error;
+ }
+ usb_fill_bulk_urb(port->write_urbs[j], dev,
+ usb_sndbulkpipe(dev,
+ endpoint->bEndpointAddress),
+ port->bulk_out_buffers[j], buffer_size,
+ serial->type->write_bulk_callback,
+ port);
+ }
}
if (serial->type->read_int_callback) {
@@ -1294,6 +1331,8 @@ static void fixup_generic(struct usb_serial_driver *device)
set_to_generic_if_null(device, write_bulk_callback);
set_to_generic_if_null(device, disconnect);
set_to_generic_if_null(device, release);
+ set_to_generic_if_null(device, process_read_urb);
+ set_to_generic_if_null(device, prepare_write_buffer);
}
int usb_serial_register(struct usb_serial_driver *driver)
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
new file mode 100644
index 0000000..2be298a
--- /dev/null
+++ b/drivers/usb/serial/usb-wwan.h
@@ -0,0 +1,67 @@
+/*
+ * Definitions for USB serial mobile broadband cards
+ */
+
+#ifndef __LINUX_USB_USB_WWAN
+#define __LINUX_USB_USB_WWAN
+
+extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on);
+extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port);
+extern void usb_wwan_close(struct usb_serial_port *port);
+extern int usb_wwan_startup(struct usb_serial *serial);
+extern void usb_wwan_disconnect(struct usb_serial *serial);
+extern void usb_wwan_release(struct usb_serial *serial);
+extern int usb_wwan_write_room(struct tty_struct *tty);
+extern void usb_wwan_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old);
+extern int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file);
+extern int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear);
+extern int usb_wwan_send_setup(struct usb_serial_port *port);
+extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count);
+extern int usb_wwan_chars_in_buffer(struct tty_struct *tty);
+#ifdef CONFIG_PM
+extern int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message);
+extern int usb_wwan_resume(struct usb_serial *serial);
+#endif
+
+/* per port private data */
+
+#define N_IN_URB 4
+#define N_OUT_URB 4
+#define IN_BUFLEN 4096
+#define OUT_BUFLEN 4096
+
+struct usb_wwan_intf_private {
+ spinlock_t susp_lock;
+ unsigned int suspended:1;
+ int in_flight;
+ int (*send_setup) (struct usb_serial_port *port);
+ void *private;
+};
+
+struct usb_wwan_port_private {
+ /* Input endpoints and buffer for this port */
+ struct urb *in_urbs[N_IN_URB];
+ u8 *in_buffer[N_IN_URB];
+ /* Output endpoints and buffer for this port */
+ struct urb *out_urbs[N_OUT_URB];
+ u8 *out_buffer[N_OUT_URB];
+ unsigned long out_busy; /* Bit vector of URBs in use */
+ int opened;
+ struct usb_anchor delayed;
+
+ /* Settings for the port */
+ int rts_state; /* Handshaking pins (outputs) */
+ int dtr_state;
+ int cts_state; /* Handshaking pins (inputs) */
+ int dsr_state;
+ int dcd_state;
+ int ri_state;
+
+ unsigned long tx_start_time[N_OUT_URB];
+};
+
+#endif /* __LINUX_USB_USB_WWAN */
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 28026b4..f2ed6a3 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -16,7 +16,6 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#define URB_DEBUG_MAX_IN_FLIGHT_URBS 4000
#define USB_DEBUG_MAX_PACKET_SIZE 8
#define USB_DEBUG_BRK_SIZE 8
static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
@@ -44,12 +43,6 @@ static struct usb_driver debug_driver = {
.no_dynamic_id = 1,
};
-static int usb_debug_open(struct tty_struct *tty, struct usb_serial_port *port)
-{
- port->bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE;
- return usb_serial_generic_open(tty, port);
-}
-
/* This HW really does not support a serial break, so one will be
* emulated when ever the break state is set to true.
*/
@@ -69,7 +62,7 @@ static void usb_debug_read_bulk_callback(struct urb *urb)
memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
USB_DEBUG_BRK_SIZE) == 0) {
usb_serial_handle_break(port);
- usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
+ usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
return;
}
@@ -83,8 +76,7 @@ static struct usb_serial_driver debug_device = {
},
.id_table = id_table,
.num_ports = 1,
- .open = usb_debug_open,
- .max_in_flight_urbs = URB_DEBUG_MAX_IN_FLIGHT_URBS,
+ .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
.break_ctl = usb_debug_break_ctl,
.read_bulk_callback = usb_debug_read_bulk_callback,
};
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
new file mode 100644
index 0000000..0c70b4a
--- /dev/null
+++ b/drivers/usb/serial/usb_wwan.c
@@ -0,0 +1,665 @@
+/*
+ USB Driver layer for GSM modems
+
+ Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de>
+
+ This driver is free software; you can redistribute it and/or modify
+ it under the terms of Version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org>
+
+ History: see the git log.
+
+ Work sponsored by: Sigos GmbH, Germany <info@sigos.de>
+
+ This driver exists because the "normal" serial driver doesn't work too well
+ with GSM modems. Issues:
+ - data loss -- one single Receive URB is not nearly enough
+ - controlling the baud rate doesn't make sense
+*/
+
+#define DRIVER_VERSION "v0.7.2"
+#define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>"
+#define DRIVER_DESC "USB Driver for GSM modems"
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include "usb-wwan.h"
+
+static int debug;
+
+void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct usb_serial *serial = port->serial;
+ struct usb_wwan_port_private *portdata;
+
+ struct usb_wwan_intf_private *intfdata;
+
+ dbg("%s", __func__);
+
+ intfdata = port->serial->private;
+
+ if (!intfdata->send_setup)
+ return;
+
+ portdata = usb_get_serial_port_data(port);
+ mutex_lock(&serial->disc_mutex);
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+ if (serial->dev)
+ intfdata->send_setup(port);
+ mutex_unlock(&serial->disc_mutex);
+}
+EXPORT_SYMBOL(usb_wwan_dtr_rts);
+
+void usb_wwan_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old_termios)
+{
+ struct usb_wwan_intf_private *intfdata = port->serial->private;
+
+ dbg("%s", __func__);
+
+ /* Doesn't support option setting */
+ tty_termios_copy_hw(tty->termios, old_termios);
+
+ if (intfdata->send_setup)
+ intfdata->send_setup(port);
+}
+EXPORT_SYMBOL(usb_wwan_set_termios);
+
+int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ unsigned int value;
+ struct usb_wwan_port_private *portdata;
+
+ portdata = usb_get_serial_port_data(port);
+
+ value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
+ ((portdata->dtr_state) ? TIOCM_DTR : 0) |
+ ((portdata->cts_state) ? TIOCM_CTS : 0) |
+ ((portdata->dsr_state) ? TIOCM_DSR : 0) |
+ ((portdata->dcd_state) ? TIOCM_CAR : 0) |
+ ((portdata->ri_state) ? TIOCM_RNG : 0);
+
+ return value;
+}
+EXPORT_SYMBOL(usb_wwan_tiocmget);
+
+int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata;
+
+ portdata = usb_get_serial_port_data(port);
+ intfdata = port->serial->private;
+
+ if (!intfdata->send_setup)
+ return -EINVAL;
+
+ /* FIXME: what locks portdata fields ? */
+ if (set & TIOCM_RTS)
+ portdata->rts_state = 1;
+ if (set & TIOCM_DTR)
+ portdata->dtr_state = 1;
+
+ if (clear & TIOCM_RTS)
+ portdata->rts_state = 0;
+ if (clear & TIOCM_DTR)
+ portdata->dtr_state = 0;
+ return intfdata->send_setup(port);
+}
+EXPORT_SYMBOL(usb_wwan_tiocmset);
+
+/* Write */
+int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count)
+{
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata;
+ int i;
+ int left, todo;
+ struct urb *this_urb = NULL; /* spurious */
+ int err;
+ unsigned long flags;
+
+ portdata = usb_get_serial_port_data(port);
+ intfdata = port->serial->private;
+
+ dbg("%s: write (%d chars)", __func__, count);
+
+ i = 0;
+ left = count;
+ for (i = 0; left > 0 && i < N_OUT_URB; i++) {
+ todo = left;
+ if (todo > OUT_BUFLEN)
+ todo = OUT_BUFLEN;
+
+ this_urb = portdata->out_urbs[i];
+ if (test_and_set_bit(i, &portdata->out_busy)) {
+ if (time_before(jiffies,
+ portdata->tx_start_time[i] + 10 * HZ))
+ continue;
+ usb_unlink_urb(this_urb);
+ continue;
+ }
+ dbg("%s: endpoint %d buf %d", __func__,
+ usb_pipeendpoint(this_urb->pipe), i);
+
+ err = usb_autopm_get_interface_async(port->serial->interface);
+ if (err < 0)
+ break;
+
+ /* send the data */
+ memcpy(this_urb->transfer_buffer, buf, todo);
+ this_urb->transfer_buffer_length = todo;
+
+ spin_lock_irqsave(&intfdata->susp_lock, flags);
+ if (intfdata->suspended) {
+ usb_anchor_urb(this_urb, &portdata->delayed);
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+ } else {
+ intfdata->in_flight++;
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+ err = usb_submit_urb(this_urb, GFP_ATOMIC);
+ if (err) {
+ dbg("usb_submit_urb %p (write bulk) failed "
+ "(%d)", this_urb, err);
+ clear_bit(i, &portdata->out_busy);
+ spin_lock_irqsave(&intfdata->susp_lock, flags);
+ intfdata->in_flight--;
+ spin_unlock_irqrestore(&intfdata->susp_lock,
+ flags);
+ continue;
+ }
+ }
+
+ portdata->tx_start_time[i] = jiffies;
+ buf += todo;
+ left -= todo;
+ }
+
+ count -= left;
+ dbg("%s: wrote (did %d)", __func__, count);
+ return count;
+}
+EXPORT_SYMBOL(usb_wwan_write);
+
+static void usb_wwan_indat_callback(struct urb *urb)
+{
+ int err;
+ int endpoint;
+ struct usb_serial_port *port;
+ struct tty_struct *tty;
+ unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
+
+ dbg("%s: %p", __func__, urb);
+
+ endpoint = usb_pipeendpoint(urb->pipe);
+ port = urb->context;
+
+ if (status) {
+ dbg("%s: nonzero status: %d on endpoint %02x.",
+ __func__, status, endpoint);
+ } else {
+ tty = tty_port_tty_get(&port->port);
+ if (urb->actual_length) {
+ tty_insert_flip_string(tty, data, urb->actual_length);
+ tty_flip_buffer_push(tty);
+ } else
+ dbg("%s: empty read urb received", __func__);
+ tty_kref_put(tty);
+
+ /* Resubmit urb so we continue receiving */
+ if (status != -ESHUTDOWN) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err && err != -EPERM)
+ printk(KERN_ERR "%s: resubmit read urb failed. "
+ "(%d)", __func__, err);
+ else
+ usb_mark_last_busy(port->serial->dev);
+ }
+
+ }
+ return;
+}
+
+static void usb_wwan_outdat_callback(struct urb *urb)
+{
+ struct usb_serial_port *port;
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata;
+ int i;
+
+ dbg("%s", __func__);
+
+ port = urb->context;
+ intfdata = port->serial->private;
+
+ usb_serial_port_softint(port);
+ usb_autopm_put_interface_async(port->serial->interface);
+ portdata = usb_get_serial_port_data(port);
+ spin_lock(&intfdata->susp_lock);
+ intfdata->in_flight--;
+ spin_unlock(&intfdata->susp_lock);
+
+ for (i = 0; i < N_OUT_URB; ++i) {
+ if (portdata->out_urbs[i] == urb) {
+ smp_mb__before_clear_bit();
+ clear_bit(i, &portdata->out_busy);
+ break;
+ }
+ }
+}
+
+int usb_wwan_write_room(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_wwan_port_private *portdata;
+ int i;
+ int data_len = 0;
+ struct urb *this_urb;
+
+ portdata = usb_get_serial_port_data(port);
+
+ for (i = 0; i < N_OUT_URB; i++) {
+ this_urb = portdata->out_urbs[i];
+ if (this_urb && !test_bit(i, &portdata->out_busy))
+ data_len += OUT_BUFLEN;
+ }
+
+ dbg("%s: %d", __func__, data_len);
+ return data_len;
+}
+EXPORT_SYMBOL(usb_wwan_write_room);
+
+int usb_wwan_chars_in_buffer(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_wwan_port_private *portdata;
+ int i;
+ int data_len = 0;
+ struct urb *this_urb;
+
+ portdata = usb_get_serial_port_data(port);
+
+ for (i = 0; i < N_OUT_URB; i++) {
+ this_urb = portdata->out_urbs[i];
+ /* FIXME: This locking is insufficient as this_urb may
+ go unused during the test */
+ if (this_urb && test_bit(i, &portdata->out_busy))
+ data_len += this_urb->transfer_buffer_length;
+ }
+ dbg("%s: %d", __func__, data_len);
+ return data_len;
+}
+EXPORT_SYMBOL(usb_wwan_chars_in_buffer);
+
+int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
+{
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata;
+ struct usb_serial *serial = port->serial;
+ int i, err;
+ struct urb *urb;
+
+ portdata = usb_get_serial_port_data(port);
+ intfdata = serial->private;
+
+ dbg("%s", __func__);
+
+ /* Start reading from the IN endpoint */
+ for (i = 0; i < N_IN_URB; i++) {
+ urb = portdata->in_urbs[i];
+ if (!urb)
+ continue;
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ dbg("%s: submit urb %d failed (%d) %d",
+ __func__, i, err, urb->transfer_buffer_length);
+ }
+ }
+
+ if (intfdata->send_setup)
+ intfdata->send_setup(port);
+
+ serial->interface->needs_remote_wakeup = 1;
+ spin_lock_irq(&intfdata->susp_lock);
+ portdata->opened = 1;
+ spin_unlock_irq(&intfdata->susp_lock);
+ usb_autopm_put_interface(serial->interface);
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_wwan_open);
+
+void usb_wwan_close(struct usb_serial_port *port)
+{
+ int i;
+ struct usb_serial *serial = port->serial;
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata = port->serial->private;
+
+ dbg("%s", __func__);
+ portdata = usb_get_serial_port_data(port);
+
+ if (serial->dev) {
+ /* Stop reading/writing urbs */
+ spin_lock_irq(&intfdata->susp_lock);
+ portdata->opened = 0;
+ spin_unlock_irq(&intfdata->susp_lock);
+
+ for (i = 0; i < N_IN_URB; i++)
+ usb_kill_urb(portdata->in_urbs[i]);
+ for (i = 0; i < N_OUT_URB; i++)
+ usb_kill_urb(portdata->out_urbs[i]);
+ usb_autopm_get_interface(serial->interface);
+ serial->interface->needs_remote_wakeup = 0;
+ }
+}
+EXPORT_SYMBOL(usb_wwan_close);
+
+/* Helper functions used by usb_wwan_setup_urbs */
+static struct urb *usb_wwan_setup_urb(struct usb_serial *serial, int endpoint,
+ int dir, void *ctx, char *buf, int len,
+ void (*callback) (struct urb *))
+{
+ struct urb *urb;
+
+ if (endpoint == -1)
+ return NULL; /* endpoint not needed */
+
+ urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
+ if (urb == NULL) {
+ dbg("%s: alloc for endpoint %d failed.", __func__, endpoint);
+ return NULL;
+ }
+
+ /* Fill URB using supplied data. */
+ usb_fill_bulk_urb(urb, serial->dev,
+ usb_sndbulkpipe(serial->dev, endpoint) | dir,
+ buf, len, callback, ctx);
+
+ return urb;
+}
+
+/* Setup urbs */
+static void usb_wwan_setup_urbs(struct usb_serial *serial)
+{
+ int i, j;
+ struct usb_serial_port *port;
+ struct usb_wwan_port_private *portdata;
+
+ dbg("%s", __func__);
+
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ portdata = usb_get_serial_port_data(port);
+
+ /* Do indat endpoints first */
+ for (j = 0; j < N_IN_URB; ++j) {
+ portdata->in_urbs[j] = usb_wwan_setup_urb(serial,
+ port->
+ bulk_in_endpointAddress,
+ USB_DIR_IN,
+ port,
+ portdata->
+ in_buffer[j],
+ IN_BUFLEN,
+ usb_wwan_indat_callback);
+ }
+
+ /* outdat endpoints */
+ for (j = 0; j < N_OUT_URB; ++j) {
+ portdata->out_urbs[j] = usb_wwan_setup_urb(serial,
+ port->
+ bulk_out_endpointAddress,
+ USB_DIR_OUT,
+ port,
+ portdata->
+ out_buffer
+ [j],
+ OUT_BUFLEN,
+ usb_wwan_outdat_callback);
+ }
+ }
+}
+
+int usb_wwan_startup(struct usb_serial *serial)
+{
+ int i, j, err;
+ struct usb_serial_port *port;
+ struct usb_wwan_port_private *portdata;
+ u8 *buffer;
+
+ dbg("%s", __func__);
+
+ /* Now setup per port private data */
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
+ if (!portdata) {
+ dbg("%s: kmalloc for usb_wwan_port_private (%d) failed!.",
+ __func__, i);
+ return 1;
+ }
+ init_usb_anchor(&portdata->delayed);
+
+ for (j = 0; j < N_IN_URB; j++) {
+ buffer = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!buffer)
+ goto bail_out_error;
+ portdata->in_buffer[j] = buffer;
+ }
+
+ for (j = 0; j < N_OUT_URB; j++) {
+ buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
+ if (!buffer)
+ goto bail_out_error2;
+ portdata->out_buffer[j] = buffer;
+ }
+
+ usb_set_serial_port_data(port, portdata);
+
+ if (!port->interrupt_in_urb)
+ continue;
+ err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+ if (err)
+ dbg("%s: submit irq_in urb failed %d", __func__, err);
+ }
+ usb_wwan_setup_urbs(serial);
+ return 0;
+
+bail_out_error2:
+ for (j = 0; j < N_OUT_URB; j++)
+ kfree(portdata->out_buffer[j]);
+bail_out_error:
+ for (j = 0; j < N_IN_URB; j++)
+ if (portdata->in_buffer[j])
+ free_page((unsigned long)portdata->in_buffer[j]);
+ kfree(portdata);
+ return 1;
+}
+EXPORT_SYMBOL(usb_wwan_startup);
+
+static void stop_read_write_urbs(struct usb_serial *serial)
+{
+ int i, j;
+ struct usb_serial_port *port;
+ struct usb_wwan_port_private *portdata;
+
+ /* Stop reading/writing urbs */
+ for (i = 0; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ portdata = usb_get_serial_port_data(port);
+ for (j = 0; j < N_IN_URB; j++)
+ usb_kill_urb(portdata->in_urbs[j]);
+ for (j = 0; j < N_OUT_URB; j++)
+ usb_kill_urb(portdata->out_urbs[j]);
+ }
+}
+
+void usb_wwan_disconnect(struct usb_serial *serial)
+{
+ dbg("%s", __func__);
+
+ stop_read_write_urbs(serial);
+}
+EXPORT_SYMBOL(usb_wwan_disconnect);
+
+void usb_wwan_release(struct usb_serial *serial)
+{
+ int i, j;
+ struct usb_serial_port *port;
+ struct usb_wwan_port_private *portdata;
+
+ dbg("%s", __func__);
+
+ /* Now free them */
+ for (i = 0; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ portdata = usb_get_serial_port_data(port);
+
+ for (j = 0; j < N_IN_URB; j++) {
+ usb_free_urb(portdata->in_urbs[j]);
+ free_page((unsigned long)
+ portdata->in_buffer[j]);
+ portdata->in_urbs[j] = NULL;
+ }
+ for (j = 0; j < N_OUT_URB; j++) {
+ usb_free_urb(portdata->out_urbs[j]);
+ kfree(portdata->out_buffer[j]);
+ portdata->out_urbs[j] = NULL;
+ }
+ }
+
+ /* Now free per port private data */
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ kfree(usb_get_serial_port_data(port));
+ }
+}
+EXPORT_SYMBOL(usb_wwan_release);
+
+#ifdef CONFIG_PM
+int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
+{
+ struct usb_wwan_intf_private *intfdata = serial->private;
+ int b;
+
+ dbg("%s entered", __func__);
+
+ if (message.event & PM_EVENT_AUTO) {
+ spin_lock_irq(&intfdata->susp_lock);
+ b = intfdata->in_flight;
+ spin_unlock_irq(&intfdata->susp_lock);
+
+ if (b)
+ return -EBUSY;
+ }
+
+ spin_lock_irq(&intfdata->susp_lock);
+ intfdata->suspended = 1;
+ spin_unlock_irq(&intfdata->susp_lock);
+ stop_read_write_urbs(serial);
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_wwan_suspend);
+
+static void play_delayed(struct usb_serial_port *port)
+{
+ struct usb_wwan_intf_private *data;
+ struct usb_wwan_port_private *portdata;
+ struct urb *urb;
+ int err;
+
+ portdata = usb_get_serial_port_data(port);
+ data = port->serial->private;
+ while ((urb = usb_get_from_anchor(&portdata->delayed))) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!err)
+ data->in_flight++;
+ }
+}
+
+int usb_wwan_resume(struct usb_serial *serial)
+{
+ int i, j;
+ struct usb_serial_port *port;
+ struct usb_wwan_intf_private *intfdata = serial->private;
+ struct usb_wwan_port_private *portdata;
+ struct urb *urb;
+ int err = 0;
+
+ dbg("%s entered", __func__);
+ /* get the interrupt URBs resubmitted unconditionally */
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ if (!port->interrupt_in_urb) {
+ dbg("%s: No interrupt URB for port %d", __func__, i);
+ continue;
+ }
+ err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+ dbg("Submitted interrupt URB for port %d (result %d)", i, err);
+ if (err < 0) {
+ err("%s: Error %d for interrupt URB of port%d",
+ __func__, err, i);
+ goto err_out;
+ }
+ }
+
+ for (i = 0; i < serial->num_ports; i++) {
+ /* walk all ports */
+ port = serial->port[i];
+ portdata = usb_get_serial_port_data(port);
+
+ /* skip closed ports */
+ spin_lock_irq(&intfdata->susp_lock);
+ if (!portdata->opened) {
+ spin_unlock_irq(&intfdata->susp_lock);
+ continue;
+ }
+
+ for (j = 0; j < N_IN_URB; j++) {
+ urb = portdata->in_urbs[j];
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ err("%s: Error %d for bulk URB %d",
+ __func__, err, i);
+ spin_unlock_irq(&intfdata->susp_lock);
+ goto err_out;
+ }
+ }
+ play_delayed(port);
+ spin_unlock_irq(&intfdata->susp_lock);
+ }
+ spin_lock_irq(&intfdata->susp_lock);
+ intfdata->suspended = 0;
+ spin_unlock_irq(&intfdata->susp_lock);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL(usb_wwan_resume);
+#endif
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug messages");
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 0949427..eb76aae 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -38,17 +38,9 @@
/* function prototypes for a handspring visor */
static int visor_open(struct tty_struct *tty, struct usb_serial_port *port);
static void visor_close(struct usb_serial_port *port);
-static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static int visor_write_room(struct tty_struct *tty);
-static void visor_throttle(struct tty_struct *tty);
-static void visor_unthrottle(struct tty_struct *tty);
static int visor_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static int visor_calc_num_ports(struct usb_serial *serial);
-static void visor_release(struct usb_serial *serial);
-static void visor_write_bulk_callback(struct urb *urb);
-static void visor_read_bulk_callback(struct urb *urb);
static void visor_read_int_callback(struct urb *urb);
static int clie_3_5_startup(struct usb_serial *serial);
static int treo_attach(struct usb_serial *serial);
@@ -194,18 +186,14 @@ static struct usb_serial_driver handspring_device = {
.usb_driver = &visor_driver,
.id_table = id_table,
.num_ports = 2,
+ .bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
- .throttle = visor_throttle,
- .unthrottle = visor_unthrottle,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
.attach = treo_attach,
.probe = visor_probe,
.calc_num_ports = visor_calc_num_ports,
- .release = visor_release,
- .write = visor_write,
- .write_room = visor_write_room,
- .write_bulk_callback = visor_write_bulk_callback,
- .read_bulk_callback = visor_read_bulk_callback,
.read_int_callback = visor_read_int_callback,
};
@@ -219,18 +207,14 @@ static struct usb_serial_driver clie_5_device = {
.usb_driver = &visor_driver,
.id_table = clie_id_5_table,
.num_ports = 2,
+ .bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
- .throttle = visor_throttle,
- .unthrottle = visor_unthrottle,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
.attach = clie_5_attach,
.probe = visor_probe,
.calc_num_ports = visor_calc_num_ports,
- .release = visor_release,
- .write = visor_write,
- .write_room = visor_write_room,
- .write_bulk_callback = visor_write_bulk_callback,
- .read_bulk_callback = visor_read_bulk_callback,
.read_int_callback = visor_read_int_callback,
};
@@ -244,39 +228,19 @@ static struct usb_serial_driver clie_3_5_device = {
.usb_driver = &visor_driver,
.id_table = clie_id_3_5_table,
.num_ports = 1,
+ .bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
- .throttle = visor_throttle,
- .unthrottle = visor_unthrottle,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
.attach = clie_3_5_startup,
- .write = visor_write,
- .write_room = visor_write_room,
- .write_bulk_callback = visor_write_bulk_callback,
- .read_bulk_callback = visor_read_bulk_callback,
};
-struct visor_private {
- spinlock_t lock;
- int bytes_in;
- int bytes_out;
- int outstanding_urbs;
- unsigned char throttled;
- unsigned char actually_throttled;
-};
-
-/* number of outstanding urbs to prevent userspace DoS from happening */
-#define URB_UPPER_LIMIT 42
-
-static int stats;
-
/******************************************************************************
* Handspring Visor specific driver functions
******************************************************************************/
static int visor_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct usb_serial *serial = port->serial;
- struct visor_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
int result = 0;
dbg("%s - port %d", __func__, port->number);
@@ -287,26 +251,10 @@ static int visor_open(struct tty_struct *tty, struct usb_serial_port *port)
return -ENODEV;
}
- spin_lock_irqsave(&priv->lock, flags);
- priv->bytes_in = 0;
- priv->bytes_out = 0;
- priv->throttled = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-
/* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- visor_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
+ result = usb_serial_generic_open(tty, port);
+ if (result)
goto exit;
- }
if (port->interrupt_in_urb) {
dbg("%s - adding interrupt input for treo", __func__);
@@ -323,13 +271,12 @@ exit:
static void visor_close(struct usb_serial_port *port)
{
- struct visor_private *priv = usb_get_serial_port_data(port);
unsigned char *transfer_buffer;
dbg("%s - port %d", __func__, port->number);
/* shutdown our urbs */
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
mutex_lock(&port->serial->disc_mutex);
@@ -346,192 +293,6 @@ static void visor_close(struct usb_serial_port *port)
}
}
mutex_unlock(&port->serial->disc_mutex);
-
- if (stats)
- dev_info(&port->dev, "Bytes In = %d Bytes Out = %d\n",
- priv->bytes_in, priv->bytes_out);
-}
-
-
-static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct visor_private *priv = usb_get_serial_port_data(port);
- struct usb_serial *serial = port->serial;
- struct urb *urb;
- unsigned char *buffer;
- unsigned long flags;
- int status;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->lock, flags);
- if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
- spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit", __func__);
- return 0;
- }
- priv->outstanding_urbs++;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- buffer = kmalloc(count, GFP_ATOMIC);
- if (!buffer) {
- dev_err(&port->dev, "out of memory\n");
- count = -ENOMEM;
- goto error_no_buffer;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- dev_err(&port->dev, "no more free urbs\n");
- count = -ENOMEM;
- goto error_no_urb;
- }
-
- memcpy(buffer, buf, count);
-
- usb_serial_debug_data(debug, &port->dev, __func__, count, buffer);
-
- usb_fill_bulk_urb(urb, serial->dev,
- usb_sndbulkpipe(serial->dev,
- port->bulk_out_endpointAddress),
- buffer, count,
- visor_write_bulk_callback, port);
-
- /* send it down the pipe */
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev,
- "%s - usb_submit_urb(write bulk) failed with status = %d\n",
- __func__, status);
- count = status;
- goto error;
- } else {
- spin_lock_irqsave(&priv->lock, flags);
- priv->bytes_out += count;
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- /* we are done with this urb, so let the host driver
- * really free it when it is finished with it */
- usb_free_urb(urb);
-
- return count;
-error:
- usb_free_urb(urb);
-error_no_urb:
- kfree(buffer);
-error_no_buffer:
- spin_lock_irqsave(&priv->lock, flags);
- --priv->outstanding_urbs;
- spin_unlock_irqrestore(&priv->lock, flags);
- return count;
-}
-
-
-static int visor_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct visor_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- /*
- * We really can take anything the user throws at us
- * but let's pick a nice big number to tell the tty
- * layer that we have lots of free space, unless we don't.
- */
-
- spin_lock_irqsave(&priv->lock, flags);
- if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
- spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit", __func__);
- return 0;
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 2048;
-}
-
-
-static void visor_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct visor_private *priv = usb_get_serial_port_data(port);
- int status = urb->status;
- unsigned long flags;
-
- /* free up the transfer buffer, as usb_free_urb() does not do this */
- kfree(urb->transfer_buffer);
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status)
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
-
- spin_lock_irqsave(&priv->lock, flags);
- --priv->outstanding_urbs;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- usb_serial_port_softint(port);
-}
-
-
-static void visor_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct visor_private *priv = usb_get_serial_port_data(port);
- unsigned char *data = urb->transfer_buffer;
- int status = urb->status;
- struct tty_struct *tty;
- int result;
- int available_room = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
-
- if (urb->actual_length) {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_insert_flip_string(tty, data,
- urb->actual_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
- spin_lock(&priv->lock);
- if (tty)
- priv->bytes_in += available_room;
-
- } else {
- spin_lock(&priv->lock);
- }
-
- /* Continue trying to always read if we should */
- if (!priv->throttled) {
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- visor_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- } else
- priv->actually_throttled = 1;
- spin_unlock(&priv->lock);
}
static void visor_read_int_callback(struct urb *urb)
@@ -575,41 +336,6 @@ exit:
__func__, result);
}
-static void visor_throttle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct visor_private *priv = usb_get_serial_port_data(port);
-
- dbg("%s - port %d", __func__, port->number);
- spin_lock_irq(&priv->lock);
- priv->throttled = 1;
- spin_unlock_irq(&priv->lock);
-}
-
-
-static void visor_unthrottle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct visor_private *priv = usb_get_serial_port_data(port);
- int result, was_throttled;
-
- dbg("%s - port %d", __func__, port->number);
- spin_lock_irq(&priv->lock);
- priv->throttled = 0;
- was_throttled = priv->actually_throttled;
- priv->actually_throttled = 0;
- spin_unlock_irq(&priv->lock);
-
- if (was_throttled) {
- port->read_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
- }
-}
-
static int palm_os_3_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
@@ -777,28 +503,6 @@ static int visor_calc_num_ports(struct usb_serial *serial)
return num_ports;
}
-static int generic_startup(struct usb_serial *serial)
-{
- struct usb_serial_port **ports = serial->port;
- struct visor_private *priv;
- int i;
-
- for (i = 0; i < serial->num_ports; ++i) {
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- while (i-- != 0) {
- priv = usb_get_serial_port_data(ports[i]);
- usb_set_serial_port_data(ports[i], NULL);
- kfree(priv);
- }
- return -ENOMEM;
- }
- spin_lock_init(&priv->lock);
- usb_set_serial_port_data(ports[i], priv);
- }
- return 0;
-}
-
static int clie_3_5_startup(struct usb_serial *serial)
{
struct device *dev = &serial->dev->dev;
@@ -849,7 +553,7 @@ static int clie_3_5_startup(struct usb_serial *serial)
goto out;
}
- result = generic_startup(serial);
+ result = 0;
out:
kfree(data);
@@ -867,7 +571,7 @@ static int treo_attach(struct usb_serial *serial)
(le16_to_cpu(serial->dev->descriptor.idVendor)
== KYOCERA_VENDOR_ID)) ||
(serial->num_interrupt_in == 0))
- goto generic_startup;
+ return 0;
dbg("%s", __func__);
@@ -897,8 +601,7 @@ static int treo_attach(struct usb_serial *serial)
COPY_PORT(serial->port[1], swap_port);
kfree(swap_port);
-generic_startup:
- return generic_startup(serial);
+ return 0;
}
static int clie_5_attach(struct usb_serial *serial)
@@ -921,20 +624,7 @@ static int clie_5_attach(struct usb_serial *serial)
serial->port[0]->bulk_out_endpointAddress =
serial->port[1]->bulk_out_endpointAddress;
- return generic_startup(serial);
-}
-
-static void visor_release(struct usb_serial *serial)
-{
- struct visor_private *priv;
- int i;
-
- dbg("%s", __func__);
-
- for (i = 0; i < serial->num_ports; i++) {
- priv = usb_get_serial_port_data(serial->port[i]);
- kfree(priv);
- }
+ return 0;
}
static int __init visor_init(void)
@@ -1018,8 +708,6 @@ MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
-module_param(stats, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(stats, "Enables statistics or not");
module_param(vendor, ushort, 0);
MODULE_PARM_DESC(vendor, "User specified vendor ID");
diff --git a/drivers/usb/serial/visor.h b/drivers/usb/serial/visor.h
index 57229cf..88db4d0 100644
--- a/drivers/usb/serial/visor.h
+++ b/drivers/usb/serial/visor.h
@@ -9,8 +9,9 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * See Documentation/usb/usb-serial.txt for more information on using this driver
- *
+ * See Documentation/usb/usb-serial.txt for more information on using this
+ * driver.
+ *
*/
#ifndef __LINUX_USB_SERIAL_VISOR_H
@@ -65,7 +66,7 @@
#define ACEECA_MEZ1000_ID 0x0001
#define KYOCERA_VENDOR_ID 0x0C88
-#define KYOCERA_7135_ID 0x0021
+#define KYOCERA_7135_ID 0x0021
#define FOSSIL_VENDOR_ID 0x0E67
#define FOSSIL_ABACUS_ID 0x0002
@@ -145,7 +146,7 @@ struct visor_connection_info {
* The maximum number of connections currently supported is 2
*/
struct palm_ext_connection_info {
- __u8 num_ports;
+ __u8 num_ports;
__u8 endpoint_numbers_different;
__le16 reserved1;
struct {
diff --git a/drivers/usb/serial/zio.c b/drivers/usb/serial/zio.c
new file mode 100644
index 0000000..f579672
--- /dev/null
+++ b/drivers/usb/serial/zio.c
@@ -0,0 +1,64 @@
+/*
+ * ZIO Motherboard USB driver
+ *
+ * Copyright (C) 2010 Zilogic Systems <code@zilogic.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <linux/uaccess.h>
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1CBE, 0x0103) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver zio_driver = {
+ .name = "zio",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static struct usb_serial_driver zio_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "zio",
+ },
+ .id_table = id_table,
+ .usb_driver = &zio_driver,
+ .num_ports = 1,
+};
+
+static int __init zio_init(void)
+{
+ int retval;
+
+ retval = usb_serial_register(&zio_device);
+ if (retval)
+ return retval;
+ retval = usb_register(&zio_driver);
+ if (retval)
+ usb_serial_deregister(&zio_device);
+ return retval;
+}
+
+static void __exit zio_exit(void)
+{
+ usb_deregister(&zio_driver);
+ usb_serial_deregister(&zio_device);
+}
+
+module_init(zio_init);
+module_exit(zio_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index fdba2f6..e9cbc14 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -490,13 +490,13 @@ static int isd200_action( struct us_data *us, int action,
void* pointer, int value )
{
union ata_cdb ata;
- struct scsi_device srb_dev;
+ /* static to prevent this large struct being placed on the valuable stack */
+ static struct scsi_device srb_dev;
struct isd200_info *info = (struct isd200_info *)us->extra;
struct scsi_cmnd *srb = &info->srb;
int status;
memset(&ata, 0, sizeof(ata));
- memset(&srb_dev, 0, sizeof(srb_dev));
srb->cmnd = info->cmnd;
srb->device = &srb_dev;
++srb->serial_number;
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 198bb3e..1943be5 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -201,8 +201,8 @@ static int onetouch_connect_input(struct us_data *ss)
if (!onetouch || !input_dev)
goto fail1;
- onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN,
- GFP_KERNEL, &onetouch->data_dma);
+ onetouch->data = usb_alloc_coherent(udev, ONETOUCH_PKT_LEN,
+ GFP_KERNEL, &onetouch->data_dma);
if (!onetouch->data)
goto fail1;
@@ -264,8 +264,8 @@ static int onetouch_connect_input(struct us_data *ss)
return 0;
fail3: usb_free_urb(onetouch->irq);
- fail2: usb_buffer_free(udev, ONETOUCH_PKT_LEN,
- onetouch->data, onetouch->data_dma);
+ fail2: usb_free_coherent(udev, ONETOUCH_PKT_LEN,
+ onetouch->data, onetouch->data_dma);
fail1: kfree(onetouch);
input_free_device(input_dev);
return error;
@@ -279,8 +279,8 @@ static void onetouch_release_input(void *onetouch_)
usb_kill_urb(onetouch->irq);
input_unregister_device(onetouch->dev);
usb_free_urb(onetouch->irq);
- usb_buffer_free(onetouch->udev, ONETOUCH_PKT_LEN,
- onetouch->data, onetouch->data_dma);
+ usb_free_coherent(onetouch->udev, ONETOUCH_PKT_LEN,
+ onetouch->data, onetouch->data_dma);
}
}
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index f253ede..4471642 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -147,11 +147,9 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
* hasn't been mapped for DMA. Yes, this is clunky, but it's
* easier than always having the caller tell us whether the
* transfer buffer has already been mapped. */
- us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP;
if (us->current_urb->transfer_buffer == us->iobuf)
us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
us->current_urb->transfer_dma = us->iobuf_dma;
- us->current_urb->setup_dma = us->cr_dma;
/* submit the URB */
status = usb_submit_urb(us->current_urb, GFP_NOIO);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ccf1dbb..2c897ee 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -365,15 +365,6 @@ UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
"FinePix 1400Zoom",
US_SC_UFI, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
-/* Reported by Peter Wächtler <pwaechtler@loewe-komp.de>
- * The device needs the flags only.
- */
-UNUSUAL_DEV( 0x04ce, 0x0002, 0x0074, 0x0074,
- "ScanLogic",
- "SL11R-IDE",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_INQUIRY),
-
/* Reported by Ondrej Zary <linux@rainbow-software.org>
* The device reports one sector more and breaks when that sector is accessed
*/
@@ -1853,6 +1844,21 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Hans de Goede <hdegoede@redhat.com>
+ * These Appotech controllers are found in Picture Frames, they provide a
+ * (buggy) emulation of a cdrom drive which contains the windows software
+ * Uploading of pictures happens over the corresponding /dev/sg device. */
+UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000,
+ "BUILDWIN",
+ "Photo Frame",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
+ "BUILDWIN",
+ "Photo Frame",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bbeeb92..a7d0bf9 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -407,15 +407,14 @@ static int associate_dev(struct us_data *us, struct usb_interface *intf)
/* Store our private data in the interface */
usb_set_intfdata(intf, us);
- /* Allocate the device-related DMA-mapped buffers */
- us->cr = usb_buffer_alloc(us->pusb_dev, sizeof(*us->cr),
- GFP_KERNEL, &us->cr_dma);
+ /* Allocate the control/setup and DMA-mapped buffers */
+ us->cr = kmalloc(sizeof(*us->cr), GFP_KERNEL);
if (!us->cr) {
US_DEBUGP("usb_ctrlrequest allocation failed\n");
return -ENOMEM;
}
- us->iobuf = usb_buffer_alloc(us->pusb_dev, US_IOBUF_SIZE,
+ us->iobuf = usb_alloc_coherent(us->pusb_dev, US_IOBUF_SIZE,
GFP_KERNEL, &us->iobuf_dma);
if (!us->iobuf) {
US_DEBUGP("I/O buffer allocation failed\n");
@@ -499,9 +498,6 @@ static void adjust_quirks(struct us_data *us)
}
}
us->fflags = (us->fflags & ~mask) | f;
- dev_info(&us->pusb_intf->dev, "Quirks match for "
- "vid %04x pid %04x: %x\n",
- vid, pid, f);
}
/* Get the unusual_devs entries and the string descriptors */
@@ -511,6 +507,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
struct usb_device *dev = us->pusb_dev;
struct usb_interface_descriptor *idesc =
&us->pusb_intf->cur_altsetting->desc;
+ struct device *pdev = &us->pusb_intf->dev;
/* Store the entries */
us->unusual_dev = unusual_dev;
@@ -524,7 +521,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
adjust_quirks(us);
if (us->fflags & US_FL_IGNORE_DEVICE) {
- printk(KERN_INFO USB_STORAGE "device ignored\n");
+ dev_info(pdev, "device ignored\n");
return -ENODEV;
}
@@ -535,6 +532,12 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
if (dev->speed != USB_SPEED_HIGH)
us->fflags &= ~US_FL_GO_SLOW;
+ if (us->fflags)
+ dev_info(pdev, "Quirks match for vid %04x pid %04x: %lx\n",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct),
+ us->fflags);
+
/* Log a message if a non-generic unusual_dev entry contains an
* unnecessary subclass or protocol override. This may stimulate
* reports from users that will help us remove unneeded entries
@@ -555,20 +558,20 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
us->protocol == idesc->bInterfaceProtocol)
msg += 2;
if (msg >= 0 && !(us->fflags & US_FL_NEED_OVERRIDE))
- printk(KERN_NOTICE USB_STORAGE "This device "
- "(%04x,%04x,%04x S %02x P %02x)"
- " has %s in unusual_devs.h (kernel"
- " %s)\n"
- " Please send a copy of this message to "
- "<linux-usb@vger.kernel.org> and "
- "<usb-storage@lists.one-eyed-alien.net>\n",
- le16_to_cpu(ddesc->idVendor),
- le16_to_cpu(ddesc->idProduct),
- le16_to_cpu(ddesc->bcdDevice),
- idesc->bInterfaceSubClass,
- idesc->bInterfaceProtocol,
- msgs[msg],
- utsname()->release);
+ dev_notice(pdev, "This device "
+ "(%04x,%04x,%04x S %02x P %02x)"
+ " has %s in unusual_devs.h (kernel"
+ " %s)\n"
+ " Please send a copy of this message to "
+ "<linux-usb@vger.kernel.org> and "
+ "<usb-storage@lists.one-eyed-alien.net>\n",
+ le16_to_cpu(ddesc->idVendor),
+ le16_to_cpu(ddesc->idProduct),
+ le16_to_cpu(ddesc->bcdDevice),
+ idesc->bInterfaceSubClass,
+ idesc->bInterfaceProtocol,
+ msgs[msg],
+ utsname()->release);
}
return 0;
@@ -718,8 +721,8 @@ static int usb_stor_acquire_resources(struct us_data *us)
/* Start up our control thread */
th = kthread_run(usb_stor_control_thread, us, "usb-storage");
if (IS_ERR(th)) {
- printk(KERN_WARNING USB_STORAGE
- "Unable to start control thread\n");
+ dev_warn(&us->pusb_intf->dev,
+ "Unable to start control thread\n");
return PTR_ERR(th);
}
us->ctl_thread = th;
@@ -757,13 +760,9 @@ static void dissociate_dev(struct us_data *us)
{
US_DEBUGP("-- %s\n", __func__);
- /* Free the device-related DMA-mapped buffers */
- if (us->cr)
- usb_buffer_free(us->pusb_dev, sizeof(*us->cr), us->cr,
- us->cr_dma);
- if (us->iobuf)
- usb_buffer_free(us->pusb_dev, US_IOBUF_SIZE, us->iobuf,
- us->iobuf_dma);
+ /* Free the buffers */
+ kfree(us->cr);
+ usb_free_coherent(us->pusb_dev, US_IOBUF_SIZE, us->iobuf, us->iobuf_dma);
/* Remove our private data from the interface */
usb_set_intfdata(us->pusb_intf, NULL);
@@ -816,13 +815,14 @@ static void release_everything(struct us_data *us)
static int usb_stor_scan_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
+ struct device *dev = &us->pusb_intf->dev;
- dev_dbg(&us->pusb_intf->dev, "device found\n");
+ dev_dbg(dev, "device found\n");
set_freezable();
/* Wait for the timeout to expire or for a disconnect */
if (delay_use > 0) {
- dev_dbg(&us->pusb_intf->dev, "waiting for device to settle "
+ dev_dbg(dev, "waiting for device to settle "
"before scanning\n");
wait_event_freezable_timeout(us->delay_wait,
test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
@@ -840,7 +840,7 @@ static int usb_stor_scan_thread(void * __us)
mutex_unlock(&us->dev_mutex);
}
scsi_scan_host(us_to_host(us));
- dev_dbg(&us->pusb_intf->dev, "scan complete\n");
+ dev_dbg(dev, "scan complete\n");
/* Should we unbind if no devices were detected? */
}
@@ -876,8 +876,8 @@ int usb_stor_probe1(struct us_data **pus,
*/
host = scsi_host_alloc(&usb_stor_host_template, sizeof(*us));
if (!host) {
- printk(KERN_WARNING USB_STORAGE
- "Unable to allocate the scsi host\n");
+ dev_warn(&intf->dev,
+ "Unable to allocate the scsi host\n");
return -ENOMEM;
}
@@ -925,6 +925,7 @@ int usb_stor_probe2(struct us_data *us)
{
struct task_struct *th;
int result;
+ struct device *dev = &us->pusb_intf->dev;
/* Make sure the transport and protocol have both been set */
if (!us->transport || !us->proto_handler) {
@@ -949,18 +950,18 @@ int usb_stor_probe2(struct us_data *us)
goto BadDevice;
snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
dev_name(&us->pusb_intf->dev));
- result = scsi_add_host(us_to_host(us), &us->pusb_intf->dev);
+ result = scsi_add_host(us_to_host(us), dev);
if (result) {
- printk(KERN_WARNING USB_STORAGE
- "Unable to add the scsi host\n");
+ dev_warn(dev,
+ "Unable to add the scsi host\n");
goto BadDevice;
}
/* Start up the thread for delayed SCSI-device scanning */
th = kthread_create(usb_stor_scan_thread, us, "usb-stor-scan");
if (IS_ERR(th)) {
- printk(KERN_WARNING USB_STORAGE
- "Unable to start the device-scanning thread\n");
+ dev_warn(dev,
+ "Unable to start the device-scanning thread\n");
complete(&us->scanning_done);
quiesce_and_remove_host(us);
result = PTR_ERR(th);
@@ -1046,12 +1047,12 @@ static int __init usb_stor_init(void)
{
int retval;
- printk(KERN_INFO "Initializing USB Mass Storage driver...\n");
+ pr_info("Initializing USB Mass Storage driver...\n");
/* register the driver, return usb_register return code if error */
retval = usb_register(&usb_storage_driver);
if (retval == 0) {
- printk(KERN_INFO "USB Mass Storage support registered.\n");
+ pr_info("USB Mass Storage support registered.\n");
usb_usual_set_present(USB_US_TYPE_STOR);
}
return retval;
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 6971713..89d3bff 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -139,8 +139,7 @@ struct us_data {
struct usb_ctrlrequest *cr; /* control requests */
struct usb_sg_request current_sg; /* scatter-gather req. */
unsigned char *iobuf; /* I/O buffer */
- dma_addr_t cr_dma; /* buffer DMA addresses */
- dma_addr_t iobuf_dma;
+ dma_addr_t iobuf_dma; /* buffer DMA addresses */
struct task_struct *ctl_thread; /* the control thread */
/* mutual exclusion and synchronization structures */
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 6152278..d110588 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -387,8 +387,8 @@ static void skel_write_bulk_callback(struct urb *urb)
}
/* free up our allocated buffer */
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
up(&dev->limit_sem);
}
@@ -442,8 +442,8 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
goto error;
}
- buf = usb_buffer_alloc(dev->udev, writesize, GFP_KERNEL,
- &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
goto error;
@@ -491,7 +491,7 @@ error_unanchor:
usb_unanchor_urb(urb);
error:
if (urb) {
- usb_buffer_free(dev->udev, writesize, buf, urb->transfer_dma);
+ usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
usb_free_urb(urb);
}
up(&dev->limit_sem);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 608d61a..84b744c 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -474,8 +474,6 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
struct wa_xfer_ctl *xfer_ctl =
container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
- BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
- && xfer->urb->setup_packet == NULL);
memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
sizeof(xfer_ctl->baSetupData));
break;
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 759cda5..3d94c42 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -58,9 +58,7 @@
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
-/* FIXME: Yes, I know: BAD--it's not my fault the USB HC iface is not
- * public */
-#include <linux/../../drivers/usb/core/hcd.h>
+#include <linux/usb/hcd.h>
#include <linux/uwb.h>
#include <linux/usb/wusb.h>
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9777583..aa88911 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -642,7 +642,7 @@ static struct miscdevice vhost_net_misc = {
&vhost_net_fops,
};
-int vhost_net_init(void)
+static int vhost_net_init(void)
{
int r = vhost_init();
if (r)
@@ -659,7 +659,7 @@ err_init:
}
module_init(vhost_net_init);
-void vhost_net_exit(void)
+static void vhost_net_exit(void)
{
misc_deregister(&vhost_net_misc);
vhost_cleanup();
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 49fa953..750effe 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -715,8 +715,8 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
return 0;
}
-int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
- struct iovec iov[], int iov_size)
+static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
+ struct iovec iov[], int iov_size)
{
const struct vhost_memory_region *reg;
struct vhost_memory *mem;
@@ -741,7 +741,7 @@ int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
_iov = iov + ret;
size = reg->memory_size - addr + reg->guest_phys_addr;
_iov->iov_len = min((u64)len, size);
- _iov->iov_base = (void *)(unsigned long)
+ _iov->iov_base = (void __user *)(unsigned long)
(reg->userspace_addr + addr - reg->guest_phys_addr);
s += size;
addr += size;
@@ -995,7 +995,7 @@ void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
* want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
- struct vring_used_elem *used;
+ struct vring_used_elem __user *used;
/* The virtqueue contains a ring of used buffers. Get a pointer to the
* next entry in that used ring. */
@@ -1019,7 +1019,8 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
smp_wmb();
/* Log used ring entry write. */
log_write(vq->log_base,
- vq->log_addr + ((void *)used - (void *)vq->used),
+ vq->log_addr +
+ ((void __user *)used - (void __user *)vq->used),
sizeof *used);
/* Log used index update. */
log_write(vq->log_base,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6e16244..fd55c27 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1511,6 +1511,7 @@ config FB_VIA
select FB_CFB_IMAGEBLIT
select I2C_ALGOBIT
select I2C
+ select GPIOLIB
help
This is the frame buffer device driver for Graphics chips of VIA
UniChrome (Pro) Family (CLE266,PM800/CN400,P4M800CE/P4M800Pro/
@@ -1520,6 +1521,21 @@ config FB_VIA
To compile this driver as a module, choose M here: the
module will be called viafb.
+
+if FB_VIA
+
+config FB_VIA_DIRECT_PROCFS
+ bool "direct hardware access via procfs (DEPRECATED)(DANGEROUS)"
+ depends on FB_VIA
+ default n
+ help
+ Allow direct hardware access to some output registers via procfs.
+ This is dangerous but may provide the only chance to get the
+ correct output device configuration.
+ Its use is strongly discouraged.
+
+endif
+
config FB_NEOMAGIC
tristate "NeoMagic display support"
depends on FB && PCI
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 6c37e8e..3c1e13e 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -2099,7 +2099,7 @@ static ssize_t radeon_show_one_edid(char *buf, loff_t off, size_t count, const u
}
-static ssize_t radeon_show_edid1(struct kobject *kobj,
+static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -2112,7 +2112,7 @@ static ssize_t radeon_show_edid1(struct kobject *kobj,
}
-static ssize_t radeon_show_edid2(struct kobject *kobj,
+static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/video/via/Makefile b/drivers/video/via/Makefile
index eeed238..d496adb 100644
--- a/drivers/video/via/Makefile
+++ b/drivers/video/via/Makefile
@@ -4,4 +4,6 @@
obj-$(CONFIG_FB_VIA) += viafb.o
-viafb-y :=viafbdev.o hw.o via_i2c.o dvi.o lcd.o ioctl.o accel.o via_utility.o vt1636.o global.o tblDPASetting.o viamode.o tbl1636.o
+viafb-y :=viafbdev.o hw.o via_i2c.o dvi.o lcd.o ioctl.o accel.o \
+ via_utility.o vt1636.o global.o tblDPASetting.o viamode.o tbl1636.o \
+ via-core.o via-gpio.o via_modesetting.o
diff --git a/drivers/video/via/accel.c b/drivers/video/via/accel.c
index d5077df..e44893e 100644
--- a/drivers/video/via/accel.c
+++ b/drivers/video/via/accel.c
@@ -18,14 +18,45 @@
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
+/*
+ * Figure out an appropriate bytes-per-pixel setting.
+ */
+static int viafb_set_bpp(void __iomem *engine, u8 bpp)
+{
+ u32 gemode;
+
+ /* Preserve the reserved bits */
+ /* Lowest 2 bits to zero gives us no rotation */
+ gemode = readl(engine + VIA_REG_GEMODE) & 0xfffffcfc;
+ switch (bpp) {
+ case 8:
+ gemode |= VIA_GEM_8bpp;
+ break;
+ case 16:
+ gemode |= VIA_GEM_16bpp;
+ break;
+ case 32:
+ gemode |= VIA_GEM_32bpp;
+ break;
+ default:
+ printk(KERN_WARNING "viafb_set_bpp: Unsupported bpp %d\n", bpp);
+ return -EINVAL;
+ }
+ writel(gemode, engine + VIA_REG_GEMODE);
+ return 0;
+}
+
+
static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
u8 dst_bpp, u32 dst_addr, u32 dst_pitch, u32 dst_x, u32 dst_y,
u32 *src_mem, u32 src_addr, u32 src_pitch, u32 src_x, u32 src_y,
u32 fg_color, u32 bg_color, u8 fill_rop)
{
u32 ge_cmd = 0, tmp, i;
+ int ret;
if (!op || op > 3) {
printk(KERN_WARNING "hw_bitblt_1: Invalid operation: %d\n", op);
@@ -59,22 +90,9 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
}
}
- switch (dst_bpp) {
- case 8:
- tmp = 0x00000000;
- break;
- case 16:
- tmp = 0x00000100;
- break;
- case 32:
- tmp = 0x00000300;
- break;
- default:
- printk(KERN_WARNING "hw_bitblt_1: Unsupported bpp %d\n",
- dst_bpp);
- return -EINVAL;
- }
- writel(tmp, engine + 0x04);
+ ret = viafb_set_bpp(engine, dst_bpp);
+ if (ret)
+ return ret;
if (op != VIA_BITBLT_FILL) {
if (src_x & (op == VIA_BITBLT_MONO ? 0xFFFF8000 : 0xFFFFF000)
@@ -171,6 +189,7 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
u32 fg_color, u32 bg_color, u8 fill_rop)
{
u32 ge_cmd = 0, tmp, i;
+ int ret;
if (!op || op > 3) {
printk(KERN_WARNING "hw_bitblt_2: Invalid operation: %d\n", op);
@@ -204,22 +223,9 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
}
}
- switch (dst_bpp) {
- case 8:
- tmp = 0x00000000;
- break;
- case 16:
- tmp = 0x00000100;
- break;
- case 32:
- tmp = 0x00000300;
- break;
- default:
- printk(KERN_WARNING "hw_bitblt_2: Unsupported bpp %d\n",
- dst_bpp);
- return -EINVAL;
- }
- writel(tmp, engine + 0x04);
+ ret = viafb_set_bpp(engine, dst_bpp);
+ if (ret)
+ return ret;
if (op == VIA_BITBLT_FILL)
tmp = 0;
@@ -312,17 +318,29 @@ int viafb_init_engine(struct fb_info *info)
{
struct viafb_par *viapar = info->par;
void __iomem *engine;
+ int highest_reg, i;
u32 vq_start_addr, vq_end_addr, vq_start_low, vq_end_low, vq_high,
vq_len, chip_name = viapar->shared->chip_info.gfx_chip_name;
- engine = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
- viapar->shared->engine_mmio = engine;
+ engine = viapar->shared->vdev->engine_mmio;
if (!engine) {
printk(KERN_WARNING "viafb_init_accel: ioremap failed, "
"hardware acceleration disabled\n");
return -ENOMEM;
}
+ /* Initialize registers to reset the 2D engine */
+ switch (viapar->shared->chip_info.twod_engine) {
+ case VIA_2D_ENG_M1:
+ highest_reg = 0x5c;
+ break;
+ default:
+ highest_reg = 0x40;
+ break;
+ }
+ for (i = 0; i <= highest_reg; i += 4)
+ writel(0x0, engine + i);
+
switch (chip_name) {
case UNICHROME_CLE266:
case UNICHROME_K400:
@@ -352,13 +370,28 @@ int viafb_init_engine(struct fb_info *info)
viapar->shared->vq_vram_addr = viapar->fbmem_free;
viapar->fbmem_used += VQ_SIZE;
- /* Init 2D engine reg to reset 2D engine */
- writel(0x0, engine + VIA_REG_KEYCONTROL);
+#if defined(CONFIG_FB_VIA_CAMERA) || defined(CONFIG_FB_VIA_CAMERA_MODULE)
+ /*
+ * Set aside a chunk of framebuffer memory for the camera
+ * driver. Someday this driver probably needs a proper allocator
+ * for fbmem; for now, we just have to do this before the
+ * framebuffer initializes itself.
+ *
+ * As for the size: the engine can handle three frames,
+ * 16 bits deep, up to VGA resolution.
+ */
+ viapar->shared->vdev->camera_fbmem_size = 3*VGA_HEIGHT*VGA_WIDTH*2;
+ viapar->fbmem_free -= viapar->shared->vdev->camera_fbmem_size;
+ viapar->fbmem_used += viapar->shared->vdev->camera_fbmem_size;
+ viapar->shared->vdev->camera_fbmem_offset = viapar->fbmem_free;
+#endif
/* Init AGP and VQ regs */
switch (chip_name) {
case UNICHROME_K8M890:
case UNICHROME_P4M900:
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
writel(0x00100000, engine + VIA_REG_CR_TRANSET);
writel(0x680A0000, engine + VIA_REG_CR_TRANSPACE);
writel(0x02000000, engine + VIA_REG_CR_TRANSPACE);
@@ -393,6 +426,8 @@ int viafb_init_engine(struct fb_info *info)
switch (chip_name) {
case UNICHROME_K8M890:
case UNICHROME_P4M900:
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
vq_start_low |= 0x20000000;
vq_end_low |= 0x20000000;
vq_high |= 0x20000000;
@@ -446,7 +481,7 @@ void viafb_show_hw_cursor(struct fb_info *info, int Status)
struct viafb_par *viapar = info->par;
u32 temp, iga_path = viapar->iga_path;
- temp = readl(viapar->shared->engine_mmio + VIA_REG_CURSOR_MODE);
+ temp = readl(viapar->shared->vdev->engine_mmio + VIA_REG_CURSOR_MODE);
switch (Status) {
case HW_Cursor_ON:
temp |= 0x1;
@@ -463,23 +498,33 @@ void viafb_show_hw_cursor(struct fb_info *info, int Status)
default:
temp &= 0x7FFFFFFF;
}
- writel(temp, viapar->shared->engine_mmio + VIA_REG_CURSOR_MODE);
+ writel(temp, viapar->shared->vdev->engine_mmio + VIA_REG_CURSOR_MODE);
}
void viafb_wait_engine_idle(struct fb_info *info)
{
struct viafb_par *viapar = info->par;
int loop = 0;
-
- while (!(readl(viapar->shared->engine_mmio + VIA_REG_STATUS) &
- VIA_VR_QUEUE_BUSY) && (loop < MAXLOOP)) {
- loop++;
- cpu_relax();
+ u32 mask;
+ void __iomem *engine = viapar->shared->vdev->engine_mmio;
+
+ switch (viapar->shared->chip_info.twod_engine) {
+ case VIA_2D_ENG_H5:
+ case VIA_2D_ENG_M1:
+ mask = VIA_CMD_RGTR_BUSY_M1 | VIA_2D_ENG_BUSY_M1 |
+ VIA_3D_ENG_BUSY_M1;
+ break;
+ default:
+ while (!(readl(engine + VIA_REG_STATUS) &
+ VIA_VR_QUEUE_BUSY) && (loop < MAXLOOP)) {
+ loop++;
+ cpu_relax();
+ }
+ mask = VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY;
+ break;
}
- while ((readl(viapar->shared->engine_mmio + VIA_REG_STATUS) &
- (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)) &&
- (loop < MAXLOOP)) {
+ while ((readl(engine + VIA_REG_STATUS) & mask) && (loop < MAXLOOP)) {
loop++;
cpu_relax();
}
diff --git a/drivers/video/via/accel.h b/drivers/video/via/accel.h
index 615c84a..2c122d2 100644
--- a/drivers/video/via/accel.h
+++ b/drivers/video/via/accel.h
@@ -67,6 +67,34 @@
/* from 0x100 to 0x1ff */
#define VIA_REG_COLORPAT 0x100
+/* defines for VIA 2D registers for vt3353/3409 (M1 engine)*/
+#define VIA_REG_GECMD_M1 0x000
+#define VIA_REG_GEMODE_M1 0x004
+#define VIA_REG_GESTATUS_M1 0x004 /* as same as VIA_REG_GEMODE */
+#define VIA_REG_PITCH_M1 0x008 /* pitch of src and dst */
+#define VIA_REG_DIMENSION_M1 0x00C /* width and height */
+#define VIA_REG_DSTPOS_M1 0x010
+#define VIA_REG_LINE_XY_M1 0x010
+#define VIA_REG_DSTBASE_M1 0x014
+#define VIA_REG_SRCPOS_M1 0x018
+#define VIA_REG_LINE_K1K2_M1 0x018
+#define VIA_REG_SRCBASE_M1 0x01C
+#define VIA_REG_PATADDR_M1 0x020
+#define VIA_REG_MONOPAT0_M1 0x024
+#define VIA_REG_MONOPAT1_M1 0x028
+#define VIA_REG_OFFSET_M1 0x02C
+#define VIA_REG_LINE_ERROR_M1 0x02C
+#define VIA_REG_CLIPTL_M1 0x040 /* top and left of clipping */
+#define VIA_REG_CLIPBR_M1 0x044 /* bottom and right of clipping */
+#define VIA_REG_KEYCONTROL_M1 0x048 /* color key control */
+#define VIA_REG_FGCOLOR_M1 0x04C
+#define VIA_REG_DSTCOLORKEY_M1 0x04C /* as same as VIA_REG_FG */
+#define VIA_REG_BGCOLOR_M1 0x050
+#define VIA_REG_SRCCOLORKEY_M1 0x050 /* as same as VIA_REG_BG */
+#define VIA_REG_MONOPATFGC_M1 0x058 /* Add BG color of Pattern. */
+#define VIA_REG_MONOPATBGC_M1 0x05C /* Add FG color of Pattern. */
+#define VIA_REG_COLORPAT_M1 0x100 /* from 0x100 to 0x1ff */
+
/* VIA_REG_PITCH(0x38): Pitch Setting */
#define VIA_PITCH_ENABLE 0x80000000
@@ -157,6 +185,18 @@
/* Virtual Queue is busy */
#define VIA_VR_QUEUE_BUSY 0x00020000
+/* VIA_REG_STATUS(0x400): Engine Status for H5 */
+#define VIA_CMD_RGTR_BUSY_H5 0x00000010 /* Command Regulator is busy */
+#define VIA_2D_ENG_BUSY_H5 0x00000002 /* 2D Engine is busy */
+#define VIA_3D_ENG_BUSY_H5 0x00001FE1 /* 3D Engine is busy */
+#define VIA_VR_QUEUE_BUSY_H5 0x00000004 /* Virtual Queue is busy */
+
+/* VIA_REG_STATUS(0x400): Engine Status for VT3353/3409 */
+#define VIA_CMD_RGTR_BUSY_M1 0x00000010 /* Command Regulator is busy */
+#define VIA_2D_ENG_BUSY_M1 0x00000002 /* 2D Engine is busy */
+#define VIA_3D_ENG_BUSY_M1 0x00001FE1 /* 3D Engine is busy */
+#define VIA_VR_QUEUE_BUSY_M1 0x00000004 /* Virtual Queue is busy */
+
#define MAXLOOP 0xFFFFFF
#define VIA_BITBLT_COLOR 1
diff --git a/drivers/video/via/chip.h b/drivers/video/via/chip.h
index 8c06bd3..d9b6e06 100644
--- a/drivers/video/via/chip.h
+++ b/drivers/video/via/chip.h
@@ -121,9 +121,17 @@ struct lvds_chip_information {
int i2c_port;
};
+/* The type of 2D engine */
+enum via_2d_engine {
+ VIA_2D_ENG_H2,
+ VIA_2D_ENG_H5,
+ VIA_2D_ENG_M1,
+};
+
struct chip_information {
int gfx_chip_name;
int gfx_chip_revision;
+ enum via_2d_engine twod_engine;
struct tmds_chip_information tmds_chip_info;
struct lvds_chip_information lvds_chip_info;
struct lvds_chip_information lvds_chip_info2;
diff --git a/drivers/video/via/dvi.c b/drivers/video/via/dvi.c
index abe59b8..39b040b 100644
--- a/drivers/video/via/dvi.c
+++ b/drivers/video/via/dvi.c
@@ -18,6 +18,8 @@
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
#include "global.h"
static void tmds_register_write(int index, u8 data);
@@ -96,7 +98,7 @@ int viafb_tmds_trasmitter_identify(void)
viaparinfo->chip_info->tmds_chip_info.tmds_chip_name = VT1632_TMDS;
viaparinfo->chip_info->
tmds_chip_info.tmds_chip_slave_addr = VT1632_TMDS_I2C_ADDR;
- viaparinfo->chip_info->tmds_chip_info.i2c_port = I2CPORTINDEX;
+ viaparinfo->chip_info->tmds_chip_info.i2c_port = VIA_PORT_31;
if (check_tmds_chip(VT1632_DEVICE_ID_REG, VT1632_DEVICE_ID) != FAIL) {
/*
* Currently only support 12bits,dual edge,add 24bits mode later
@@ -110,7 +112,7 @@ int viafb_tmds_trasmitter_identify(void)
viaparinfo->chip_info->tmds_chip_info.i2c_port);
return OK;
} else {
- viaparinfo->chip_info->tmds_chip_info.i2c_port = GPIOPORTINDEX;
+ viaparinfo->chip_info->tmds_chip_info.i2c_port = VIA_PORT_2C;
if (check_tmds_chip(VT1632_DEVICE_ID_REG, VT1632_DEVICE_ID)
!= FAIL) {
tmds_register_write(0x08, 0x3b);
@@ -160,32 +162,26 @@ int viafb_tmds_trasmitter_identify(void)
static void tmds_register_write(int index, u8 data)
{
- viaparinfo->shared->i2c_stuff.i2c_port =
- viaparinfo->chip_info->tmds_chip_info.i2c_port;
-
- viafb_i2c_writebyte(viaparinfo->chip_info->tmds_chip_info.
- tmds_chip_slave_addr, index,
- data);
+ viafb_i2c_writebyte(viaparinfo->chip_info->tmds_chip_info.i2c_port,
+ viaparinfo->chip_info->tmds_chip_info.tmds_chip_slave_addr,
+ index, data);
}
static int tmds_register_read(int index)
{
u8 data;
- viaparinfo->shared->i2c_stuff.i2c_port =
- viaparinfo->chip_info->tmds_chip_info.i2c_port;
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->
- tmds_chip_info.tmds_chip_slave_addr,
- (u8) index, &data);
+ viafb_i2c_readbyte(viaparinfo->chip_info->tmds_chip_info.i2c_port,
+ (u8) viaparinfo->chip_info->tmds_chip_info.tmds_chip_slave_addr,
+ (u8) index, &data);
return data;
}
static int tmds_register_read_bytes(int index, u8 *buff, int buff_len)
{
- viaparinfo->shared->i2c_stuff.i2c_port =
- viaparinfo->chip_info->tmds_chip_info.i2c_port;
- viafb_i2c_readbytes((u8) viaparinfo->chip_info->tmds_chip_info.
- tmds_chip_slave_addr, (u8) index, buff, buff_len);
+ viafb_i2c_readbytes(viaparinfo->chip_info->tmds_chip_info.i2c_port,
+ (u8) viaparinfo->chip_info->tmds_chip_info.tmds_chip_slave_addr,
+ (u8) index, buff, buff_len);
return 0;
}
@@ -541,9 +537,10 @@ void viafb_dvi_enable(void)
else
data = 0x37;
viafb_i2c_writebyte(viaparinfo->chip_info->
- tmds_chip_info.
- tmds_chip_slave_addr,
- 0x08, data);
+ tmds_chip_info.i2c_port,
+ viaparinfo->chip_info->
+ tmds_chip_info.tmds_chip_slave_addr,
+ 0x08, data);
}
}
}
diff --git a/drivers/video/via/global.h b/drivers/video/via/global.h
index 8d95d5f..28221a0 100644
--- a/drivers/video/via/global.h
+++ b/drivers/video/via/global.h
@@ -41,7 +41,6 @@
#include "share.h"
#include "dvi.h"
#include "viamode.h"
-#include "via_i2c.h"
#include "hw.h"
#include "lcd.h"
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index f2583b1..b996803 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
static struct pll_map pll_value[] = {
@@ -62,6 +63,7 @@ static struct pll_map pll_value[] = {
CX700_52_977M, VX855_52_977M},
{CLK_56_250M, CLE266_PLL_56_250M, K800_PLL_56_250M,
CX700_56_250M, VX855_56_250M},
+ {CLK_57_275M, 0, 0, 0, VX855_57_275M},
{CLK_60_466M, CLE266_PLL_60_466M, K800_PLL_60_466M,
CX700_60_466M, VX855_60_466M},
{CLK_61_500M, CLE266_PLL_61_500M, K800_PLL_61_500M,
@@ -525,8 +527,7 @@ static void dvi_patch_skew_dvp_low(void);
static void set_dvi_output_path(int set_iga, int output_interface);
static void set_lcd_output_path(int set_iga, int output_interface);
static void load_fix_bit_crtc_reg(void);
-static void init_gfx_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi);
+static void init_gfx_chip_info(int chip_type);
static void init_tmds_chip_info(void);
static void init_lvds_chip_info(void);
static void device_screen_off(void);
@@ -537,18 +538,6 @@ static void device_on(void);
static void enable_second_display_channel(void);
static void disable_second_display_channel(void);
-void viafb_write_reg(u8 index, u16 io_port, u8 data)
-{
- outb(index, io_port);
- outb(data, io_port + 1);
- /*DEBUG_MSG(KERN_INFO "\nIndex=%2d Value=%2d", index, data); */
-}
-u8 viafb_read_reg(int io_port, u8 index)
-{
- outb(index, io_port);
- return inb(io_port + 1);
-}
-
void viafb_lock_crt(void)
{
viafb_write_reg_mask(CR11, VIACR, BIT7, BIT7);
@@ -560,16 +549,6 @@ void viafb_unlock_crt(void)
viafb_write_reg_mask(CR47, VIACR, 0, BIT0);
}
-void viafb_write_reg_mask(u8 index, int io_port, u8 data, u8 mask)
-{
- u8 tmp;
-
- outb(index, io_port);
- tmp = inb(io_port + 1);
- outb((data & mask) | (tmp & (~mask)), io_port + 1);
- /*DEBUG_MSG(KERN_INFO "\nIndex=%2d Value=%2d", index, tmp); */
-}
-
void write_dac_reg(u8 index, u8 r, u8 g, u8 b)
{
outb(index, LUT_INDEX_WRITE);
@@ -646,102 +625,6 @@ void viafb_set_iga_path(void)
}
}
-void viafb_set_primary_address(u32 addr)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_primary_address(0x%08X)\n", addr);
- viafb_write_reg(CR0D, VIACR, addr & 0xFF);
- viafb_write_reg(CR0C, VIACR, (addr >> 8) & 0xFF);
- viafb_write_reg(CR34, VIACR, (addr >> 16) & 0xFF);
- viafb_write_reg_mask(CR48, VIACR, (addr >> 24) & 0x1F, 0x1F);
-}
-
-void viafb_set_secondary_address(u32 addr)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_address(0x%08X)\n", addr);
- /* secondary display supports only quadword aligned memory */
- viafb_write_reg_mask(CR62, VIACR, (addr >> 2) & 0xFE, 0xFE);
- viafb_write_reg(CR63, VIACR, (addr >> 10) & 0xFF);
- viafb_write_reg(CR64, VIACR, (addr >> 18) & 0xFF);
- viafb_write_reg_mask(CRA3, VIACR, (addr >> 26) & 0x07, 0x07);
-}
-
-void viafb_set_primary_pitch(u32 pitch)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_primary_pitch(0x%08X)\n", pitch);
- /* spec does not say that first adapter skips 3 bits but old
- * code did it and seems to be reasonable in analogy to 2nd adapter
- */
- pitch = pitch >> 3;
- viafb_write_reg(0x13, VIACR, pitch & 0xFF);
- viafb_write_reg_mask(0x35, VIACR, (pitch >> (8 - 5)) & 0xE0, 0xE0);
-}
-
-void viafb_set_secondary_pitch(u32 pitch)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_pitch(0x%08X)\n", pitch);
- pitch = pitch >> 3;
- viafb_write_reg(0x66, VIACR, pitch & 0xFF);
- viafb_write_reg_mask(0x67, VIACR, (pitch >> 8) & 0x03, 0x03);
- viafb_write_reg_mask(0x71, VIACR, (pitch >> (10 - 7)) & 0x80, 0x80);
-}
-
-void viafb_set_primary_color_depth(u8 depth)
-{
- u8 value;
-
- DEBUG_MSG(KERN_DEBUG "viafb_set_primary_color_depth(%d)\n", depth);
- switch (depth) {
- case 8:
- value = 0x00;
- break;
- case 15:
- value = 0x04;
- break;
- case 16:
- value = 0x14;
- break;
- case 24:
- value = 0x0C;
- break;
- case 30:
- value = 0x08;
- break;
- default:
- printk(KERN_WARNING "viafb_set_primary_color_depth: "
- "Unsupported depth: %d\n", depth);
- return;
- }
-
- viafb_write_reg_mask(0x15, VIASR, value, 0x1C);
-}
-
-void viafb_set_secondary_color_depth(u8 depth)
-{
- u8 value;
-
- DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_color_depth(%d)\n", depth);
- switch (depth) {
- case 8:
- value = 0x00;
- break;
- case 16:
- value = 0x40;
- break;
- case 24:
- value = 0xC0;
- break;
- case 30:
- value = 0x80;
- break;
- default:
- printk(KERN_WARNING "viafb_set_secondary_color_depth: "
- "Unsupported depth: %d\n", depth);
- return;
- }
-
- viafb_write_reg_mask(0x67, VIACR, value, 0xC0);
-}
-
static void set_color_register(u8 index, u8 red, u8 green, u8 blue)
{
outb(0xFF, 0x3C6); /* bit mask of palette */
@@ -1126,16 +1009,12 @@ void viafb_load_reg(int timing_value, int viafb_load_reg_num,
void viafb_write_regx(struct io_reg RegTable[], int ItemNum)
{
int i;
- unsigned char RegTemp;
/*DEBUG_MSG(KERN_INFO "Table Size : %x!!\n",ItemNum ); */
- for (i = 0; i < ItemNum; i++) {
- outb(RegTable[i].index, RegTable[i].port);
- RegTemp = inb(RegTable[i].port + 1);
- RegTemp = (RegTemp & (~RegTable[i].mask)) | RegTable[i].value;
- outb(RegTemp, RegTable[i].port + 1);
- }
+ for (i = 0; i < ItemNum; i++)
+ via_write_reg_mask(RegTable[i].port, RegTable[i].index,
+ RegTable[i].value, RegTable[i].mask);
}
void viafb_load_fetch_count_reg(int h_addr, int bpp_byte, int set_iga)
@@ -1516,8 +1395,6 @@ u32 viafb_get_clk_value(int clk)
/* Set VCLK*/
void viafb_set_vclock(u32 CLK, int set_iga)
{
- unsigned char RegTemp;
-
/* H.W. Reset : ON */
viafb_write_reg_mask(CR17, VIACR, 0x00, BIT7);
@@ -1590,8 +1467,7 @@ void viafb_set_vclock(u32 CLK, int set_iga)
}
/* Fire! */
- RegTemp = inb(VIARMisc);
- outb(RegTemp | (BIT2 + BIT3), VIAWMisc);
+ via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */
}
void viafb_load_crtc_timing(struct display_timing device_timing,
@@ -1835,6 +1711,7 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
int index = 0;
int h_addr, v_addr;
u32 pll_D_N;
+ u8 polarity = 0;
for (i = 0; i < video_mode->mode_array; i++) {
index = i;
@@ -1863,20 +1740,11 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
v_addr = crt_reg.ver_addr;
/* update polarity for CRT timing */
- if (crt_table[index].h_sync_polarity == NEGATIVE) {
- if (crt_table[index].v_sync_polarity == NEGATIVE)
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))) |
- (BIT6 + BIT7), VIAWMisc);
- else
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))) | (BIT6),
- VIAWMisc);
- } else {
- if (crt_table[index].v_sync_polarity == NEGATIVE)
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))) | (BIT7),
- VIAWMisc);
- else
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))), VIAWMisc);
- }
+ if (crt_table[index].h_sync_polarity == NEGATIVE)
+ polarity |= BIT6;
+ if (crt_table[index].v_sync_polarity == NEGATIVE)
+ polarity |= BIT7;
+ via_write_misc_reg_mask(polarity, BIT6 | BIT7);
if (set_iga == IGA1) {
viafb_unlock_crt();
@@ -1910,10 +1778,9 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
}
-void viafb_init_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi)
+void viafb_init_chip_info(int chip_type)
{
- init_gfx_chip_info(pdev, pdi);
+ init_gfx_chip_info(chip_type);
init_tmds_chip_info();
init_lvds_chip_info();
@@ -1980,12 +1847,11 @@ void viafb_update_device_setting(int hres, int vres,
}
}
-static void init_gfx_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi)
+static void init_gfx_chip_info(int chip_type)
{
u8 tmp;
- viaparinfo->chip_info->gfx_chip_name = pdi->driver_data;
+ viaparinfo->chip_info->gfx_chip_name = chip_type;
/* Check revision of CLE266 Chip */
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
@@ -2016,6 +1882,21 @@ static void init_gfx_chip_info(struct pci_dev *pdev,
CX700_REVISION_700;
}
}
+
+ /* Determine which 2D engine we have */
+ switch (viaparinfo->chip_info->gfx_chip_name) {
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
+ viaparinfo->chip_info->twod_engine = VIA_2D_ENG_M1;
+ break;
+ case UNICHROME_K8M890:
+ case UNICHROME_P4M900:
+ viaparinfo->chip_info->twod_engine = VIA_2D_ENG_H5;
+ break;
+ default:
+ viaparinfo->chip_info->twod_engine = VIA_2D_ENG_H2;
+ break;
+ }
}
static void init_tmds_chip_info(void)
@@ -2232,13 +2113,11 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
/* Fill VPIT Parameters */
/* Write Misc Register */
- outb(VPIT.Misc, VIAWMisc);
+ outb(VPIT.Misc, VIA_MISC_REG_WRITE);
/* Write Sequencer */
- for (i = 1; i <= StdSR; i++) {
- outb(i, VIASR);
- outb(VPIT.SR[i - 1], VIASR + 1);
- }
+ for (i = 1; i <= StdSR; i++)
+ via_write_reg(VIASR, i, VPIT.SR[i - 1]);
viafb_write_reg_mask(0x15, VIASR, 0xA2, 0xA2);
viafb_set_iga_path();
@@ -2247,10 +2126,8 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
viafb_fill_crtc_timing(crt_timing, vmode_tbl, video_bpp / 8, IGA1);
/* Write Graphic Controller */
- for (i = 0; i < StdGR; i++) {
- outb(i, VIAGR);
- outb(VPIT.GR[i], VIAGR + 1);
- }
+ for (i = 0; i < StdGR; i++)
+ via_write_reg(VIAGR, i, VPIT.GR[i]);
/* Write Attribute Controller */
for (i = 0; i < StdAR; i++) {
@@ -2277,11 +2154,11 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
}
}
- viafb_set_primary_pitch(viafbinfo->fix.line_length);
- viafb_set_secondary_pitch(viafb_dual_fb ? viafbinfo1->fix.line_length
+ via_set_primary_pitch(viafbinfo->fix.line_length);
+ via_set_secondary_pitch(viafb_dual_fb ? viafbinfo1->fix.line_length
: viafbinfo->fix.line_length);
- viafb_set_primary_color_depth(viaparinfo->depth);
- viafb_set_secondary_color_depth(viafb_dual_fb ? viaparinfo1->depth
+ via_set_primary_color_depth(viaparinfo->depth);
+ via_set_secondary_color_depth(viafb_dual_fb ? viaparinfo1->depth
: viaparinfo->depth);
/* Update Refresh Rate Setting */
@@ -2473,108 +2350,6 @@ static void disable_second_display_channel(void)
viafb_write_reg_mask(CR6A, VIACR, BIT6, BIT6);
}
-int viafb_get_fb_size_from_pci(void)
-{
- unsigned long configid, deviceid, FBSize = 0;
- int VideoMemSize;
- int DeviceFound = false;
-
- for (configid = 0x80000000; configid < 0x80010800; configid += 0x100) {
- outl(configid, (unsigned long)0xCF8);
- deviceid = (inl((unsigned long)0xCFC) >> 16) & 0xffff;
-
- switch (deviceid) {
- case CLE266:
- case KM400:
- outl(configid + 0xE0, (unsigned long)0xCF8);
- FBSize = inl((unsigned long)0xCFC);
- DeviceFound = true; /* Found device id */
- break;
-
- case CN400_FUNCTION3:
- case CN700_FUNCTION3:
- case CX700_FUNCTION3:
- case KM800_FUNCTION3:
- case KM890_FUNCTION3:
- case P4M890_FUNCTION3:
- case P4M900_FUNCTION3:
- case VX800_FUNCTION3:
- case VX855_FUNCTION3:
- /*case CN750_FUNCTION3: */
- outl(configid + 0xA0, (unsigned long)0xCF8);
- FBSize = inl((unsigned long)0xCFC);
- DeviceFound = true; /* Found device id */
- break;
-
- default:
- break;
- }
-
- if (DeviceFound)
- break;
- }
-
- DEBUG_MSG(KERN_INFO "Device ID = %lx\n", deviceid);
-
- FBSize = FBSize & 0x00007000;
- DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
-
- if (viaparinfo->chip_info->gfx_chip_name < UNICHROME_CX700) {
- switch (FBSize) {
- case 0x00004000:
- VideoMemSize = (16 << 20); /*16M */
- break;
-
- case 0x00005000:
- VideoMemSize = (32 << 20); /*32M */
- break;
-
- case 0x00006000:
- VideoMemSize = (64 << 20); /*64M */
- break;
-
- default:
- VideoMemSize = (32 << 20); /*32M */
- break;
- }
- } else {
- switch (FBSize) {
- case 0x00001000:
- VideoMemSize = (8 << 20); /*8M */
- break;
-
- case 0x00002000:
- VideoMemSize = (16 << 20); /*16M */
- break;
-
- case 0x00003000:
- VideoMemSize = (32 << 20); /*32M */
- break;
-
- case 0x00004000:
- VideoMemSize = (64 << 20); /*64M */
- break;
-
- case 0x00005000:
- VideoMemSize = (128 << 20); /*128M */
- break;
-
- case 0x00006000:
- VideoMemSize = (256 << 20); /*256M */
- break;
-
- case 0x00007000: /* Only on VX855/875 */
- VideoMemSize = (512 << 20); /*512M */
- break;
-
- default:
- VideoMemSize = (32 << 20); /*32M */
- break;
- }
- }
-
- return VideoMemSize;
-}
void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
*p_gfx_dpa_setting)
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index 12ef32d..a109de3 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -24,6 +24,11 @@
#include "viamode.h"
#include "global.h"
+#include "via_modesetting.h"
+
+#define viafb_read_reg(p, i) via_read_reg(p, i)
+#define viafb_write_reg(i, p, d) via_write_reg(p, i, d)
+#define viafb_write_reg_mask(i, p, d, m) via_write_reg_mask(p, i, d, m)
/***************************************************
* Definition IGA1 Design Method of CRTC Registers *
@@ -823,8 +828,8 @@ struct iga2_crtc_timing {
};
/* device ID */
-#define CLE266 0x3123
-#define KM400 0x3205
+#define CLE266_FUNCTION3 0x3123
+#define KM400_FUNCTION3 0x3205
#define CN400_FUNCTION2 0x2259
#define CN400_FUNCTION3 0x3259
/* support VT3314 chipset */
@@ -870,7 +875,6 @@ extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
extern int viafb_hotplug;
-void viafb_write_reg_mask(u8 index, int io_port, u8 data, u8 mask);
void viafb_set_output_path(int device, int set_iga,
int output_interface);
@@ -885,8 +889,6 @@ void viafb_crt_disable(void);
void viafb_crt_enable(void);
void init_ad9389(void);
/* Access I/O Function */
-void viafb_write_reg(u8 index, u16 io_port, u8 data);
-u8 viafb_read_reg(int io_port, u8 index);
void viafb_lock_crt(void);
void viafb_unlock_crt(void);
void viafb_load_fetch_count_reg(int h_addr, int bpp_byte, int set_iga);
@@ -900,20 +902,14 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
struct VideoModeTable *vmode_tbl1, int video_bpp1);
void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
struct VideoModeTable *vmode_tbl);
-void viafb_init_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi);
+void viafb_init_chip_info(int chip_type);
void viafb_init_dac(int set_iga);
int viafb_get_pixclock(int hres, int vres, int vmode_refresh);
int viafb_get_refresh(int hres, int vres, u32 float_refresh);
void viafb_update_device_setting(int hres, int vres, int bpp,
int vmode_refresh, int flag);
-int viafb_get_fb_size_from_pci(void);
void viafb_set_iga_path(void);
-void viafb_set_primary_address(u32 addr);
-void viafb_set_secondary_address(u32 addr);
-void viafb_set_primary_pitch(u32 pitch);
-void viafb_set_secondary_pitch(u32 pitch);
void viafb_set_primary_color_register(u8 index, u8 red, u8 green, u8 blue);
void viafb_set_secondary_color_register(u8 index, u8 red, u8 green, u8 blue);
void viafb_get_fb_info(unsigned int *fb_base, unsigned int *fb_len);
diff --git a/drivers/video/via/ioctl.h b/drivers/video/via/ioctl.h
index de89980..c430fa2 100644
--- a/drivers/video/via/ioctl.h
+++ b/drivers/video/via/ioctl.h
@@ -75,7 +75,7 @@
/*SAMM operation flag*/
#define OP_SAMM 0x80
-#define LCD_PANEL_ID_MAXIMUM 22
+#define LCD_PANEL_ID_MAXIMUM 23
#define STATE_ON 0x1
#define STATE_OFF 0x0
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 1b1ccdc..2ab0f15 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -18,7 +18,8 @@
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
#include "global.h"
#include "lcdtbl.h"
@@ -172,18 +173,16 @@ static bool lvds_identify_integratedlvds(void)
int viafb_lvds_trasmitter_identify(void)
{
- viaparinfo->shared->i2c_stuff.i2c_port = I2CPORTINDEX;
- if (viafb_lvds_identify_vt1636()) {
- viaparinfo->chip_info->lvds_chip_info.i2c_port = I2CPORTINDEX;
+ if (viafb_lvds_identify_vt1636(VIA_PORT_31)) {
+ viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_31;
DEBUG_MSG(KERN_INFO
- "Found VIA VT1636 LVDS on port i2c 0x31 \n");
+ "Found VIA VT1636 LVDS on port i2c 0x31\n");
} else {
- viaparinfo->shared->i2c_stuff.i2c_port = GPIOPORTINDEX;
- if (viafb_lvds_identify_vt1636()) {
+ if (viafb_lvds_identify_vt1636(VIA_PORT_2C)) {
viaparinfo->chip_info->lvds_chip_info.i2c_port =
- GPIOPORTINDEX;
+ VIA_PORT_2C;
DEBUG_MSG(KERN_INFO
- "Found VIA VT1636 LVDS on port gpio 0x2c \n");
+ "Found VIA VT1636 LVDS on port gpio 0x2c\n");
}
}
@@ -398,6 +397,15 @@ static void fp_id_to_vindex(int panel_id)
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
+ case 0x17:
+ /* OLPC XO-1.5 panel */
+ viaparinfo->lvds_setting_info->lcd_panel_hres = 1200;
+ viaparinfo->lvds_setting_info->lcd_panel_vres = 900;
+ viaparinfo->lvds_setting_info->lcd_panel_id =
+ LCD_PANEL_IDD_1200X900;
+ viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
+ viaparinfo->lvds_setting_info->LCDDithering = 0;
+ break;
default:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
@@ -412,9 +420,8 @@ static int lvds_register_read(int index)
{
u8 data;
- viaparinfo->shared->i2c_stuff.i2c_port = GPIOPORTINDEX;
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->
- lvds_chip_info.lvds_chip_slave_addr,
+ viafb_i2c_readbyte(VIA_PORT_2C,
+ (u8) viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr,
(u8) index, &data);
return data;
}
diff --git a/drivers/video/via/lcd.h b/drivers/video/via/lcd.h
index 071f47c..9762ec6 100644
--- a/drivers/video/via/lcd.h
+++ b/drivers/video/via/lcd.h
@@ -60,6 +60,8 @@
#define LCD_PANEL_IDB_1360X768 0x0B
/* Resolution: 480x640, Channel: single, Dithering: Enable */
#define LCD_PANEL_IDC_480X640 0x0C
+/* Resolution: 1200x900, Channel: single, Dithering: Disable */
+#define LCD_PANEL_IDD_1200X900 0x0D
extern int viafb_LCD2_ON;
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index d55aaa7..7f0de7f 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -43,16 +43,9 @@
/* Video Memory Size */
#define VIDEO_MEMORY_SIZE_16M 0x1000000
-/* standard VGA IO port
-*/
-#define VIARMisc 0x3CC
-#define VIAWMisc 0x3C2
-#define VIAStatus 0x3DA
-#define VIACR 0x3D4
-#define VIASR 0x3C4
-#define VIAGR 0x3CE
-#define VIAAR 0x3C0
-
+/*
+ * Lengths of the VPIT structure arrays.
+ */
#define StdCR 0x19
#define StdSR 0x04
#define StdGR 0x09
@@ -570,6 +563,10 @@
#define M1200X720_R60_HSP NEGATIVE
#define M1200X720_R60_VSP POSITIVE
+/* 1200x900@60 Sync Polarity (DCON) */
+#define M1200X900_R60_HSP NEGATIVE
+#define M1200X900_R60_VSP NEGATIVE
+
/* 1280x600@60 Sync Polarity (GTF Mode) */
#define M1280x600_R60_HSP NEGATIVE
#define M1280x600_R60_VSP POSITIVE
@@ -651,6 +648,7 @@
#define CLK_52_406M 52406000
#define CLK_52_977M 52977000
#define CLK_56_250M 56250000
+#define CLK_57_275M 57275000
#define CLK_60_466M 60466000
#define CLK_61_500M 61500000
#define CLK_65_000M 65000000
@@ -939,6 +937,7 @@
#define VX855_52_406M 0x00580C03
#define VX855_52_977M 0x00940C05
#define VX855_56_250M 0x009D0C05
+#define VX855_57_275M 0x009D8C85 /* Used by XO panel */
#define VX855_60_466M 0x00A90C05
#define VX855_61_500M 0x00AC0C05
#define VX855_65_000M 0x006D0C03
@@ -1065,6 +1064,7 @@
#define RES_1600X1200_60HZ_PIXCLOCK 6172
#define RES_1600X1200_75HZ_PIXCLOCK 4938
#define RES_1280X720_60HZ_PIXCLOCK 13426
+#define RES_1200X900_60HZ_PIXCLOCK 17459
#define RES_1920X1080_60HZ_PIXCLOCK 5787
#define RES_1400X1050_60HZ_PIXCLOCK 8214
#define RES_1400X1050_75HZ_PIXCLOCK 6410
diff --git a/drivers/video/via/via-core.c b/drivers/video/via/via-core.c
new file mode 100644
index 0000000..e8cfe83
--- /dev/null
+++ b/drivers/video/via/via-core.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
+ */
+
+/*
+ * Core code for the Via multifunction framebuffer device.
+ */
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
+#include <linux/via-gpio.h>
+#include "global.h"
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+/*
+ * The default port config.
+ */
+static struct via_port_cfg adap_configs[] = {
+ [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_OFF, VIASR, 0x26 },
+ [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
+ [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
+ [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
+ [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
+ { 0, 0, 0, 0 }
+};
+
+/*
+ * We currently only support one viafb device (will there ever be
+ * more than one?), so just declare it globally here.
+ */
+static struct viafb_dev global_dev;
+
+
+/*
+ * Basic register access; spinlock required.
+ */
+static inline void viafb_mmio_write(int reg, u32 v)
+{
+ iowrite32(v, global_dev.engine_mmio + reg);
+}
+
+static inline int viafb_mmio_read(int reg)
+{
+ return ioread32(global_dev.engine_mmio + reg);
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Interrupt management. We have a single IRQ line for a lot of
+ * different functions, so we need to share it. The design here
+ * is that we don't want to reimplement the shared IRQ code here;
+ * we also want to avoid having contention for a single handler thread.
+ * So each subdev driver which needs interrupts just requests
+ * them directly from the kernel. We just have what's needed for
+ * overall access to the interrupt control register.
+ */
+
+/*
+ * Which interrupts are enabled now?
+ */
+static u32 viafb_enabled_ints;
+
+static void viafb_int_init(void)
+{
+ viafb_enabled_ints = 0;
+
+ viafb_mmio_write(VDE_INTERRUPT, 0);
+}
+
+/*
+ * Allow subdevs to ask for specific interrupts to be enabled. These
+ * functions must be called with reg_lock held
+ */
+void viafb_irq_enable(u32 mask)
+{
+ viafb_enabled_ints |= mask;
+ viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE);
+}
+EXPORT_SYMBOL_GPL(viafb_irq_enable);
+
+void viafb_irq_disable(u32 mask)
+{
+ viafb_enabled_ints &= ~mask;
+ if (viafb_enabled_ints == 0)
+ viafb_mmio_write(VDE_INTERRUPT, 0); /* Disable entirely */
+ else
+ viafb_mmio_write(VDE_INTERRUPT,
+ viafb_enabled_ints | VDE_I_ENABLE);
+}
+EXPORT_SYMBOL_GPL(viafb_irq_disable);
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Access to the DMA engine. This currently provides what the camera
+ * driver needs (i.e. outgoing only) but is easily expandable if need
+ * be.
+ */
+
+/*
+ * There are four DMA channels in the vx855. For now, we only
+ * use one of them, though. Most of the time, the DMA channel
+ * will be idle, so we keep the IRQ handler unregistered except
+ * when some subsystem has indicated an interest.
+ */
+static int viafb_dma_users;
+static DECLARE_COMPLETION(viafb_dma_completion);
+/*
+ * This mutex protects viafb_dma_users and our global interrupt
+ * registration state; it also serializes access to the DMA
+ * engine.
+ */
+static DEFINE_MUTEX(viafb_dma_lock);
+
+/*
+ * The VX855 DMA descriptor (used for s/g transfers) looks
+ * like this.
+ */
+struct viafb_vx855_dma_descr {
+ u32 addr_low; /* Low part of phys addr */
+ u32 addr_high; /* High 12 bits of addr */
+ u32 fb_offset; /* Offset into FB memory */
+ u32 seg_size; /* Size, 16-byte units */
+ u32 tile_mode; /* "tile mode" setting */
+ u32 next_desc_low; /* Next descriptor addr */
+ u32 next_desc_high;
+ u32 pad; /* Fill out to 64 bytes */
+};
+
+/*
+ * Flags added to the "next descriptor low" pointers
+ */
+#define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */
+#define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */
+
+/*
+ * The completion IRQ handler.
+ */
+static irqreturn_t viafb_dma_irq(int irq, void *data)
+{
+ int csr;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&global_dev.reg_lock);
+ csr = viafb_mmio_read(VDMA_CSR0);
+ if (csr & VDMA_C_DONE) {
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
+ complete(&viafb_dma_completion);
+ ret = IRQ_HANDLED;
+ }
+ spin_unlock(&global_dev.reg_lock);
+ return ret;
+}
+
+/*
+ * Indicate a need for DMA functionality.
+ */
+int viafb_request_dma(void)
+{
+ int ret = 0;
+
+ /*
+ * Only VX855 is supported currently.
+ */
+ if (global_dev.chip_type != UNICHROME_VX855)
+ return -ENODEV;
+ /*
+ * Note the new user and set up our interrupt handler
+ * if need be.
+ */
+ mutex_lock(&viafb_dma_lock);
+ viafb_dma_users++;
+ if (viafb_dma_users == 1) {
+ ret = request_irq(global_dev.pdev->irq, viafb_dma_irq,
+ IRQF_SHARED, "via-dma", &viafb_dma_users);
+ if (ret)
+ viafb_dma_users--;
+ else
+ viafb_irq_enable(VDE_I_DMA0TDEN);
+ }
+ mutex_unlock(&viafb_dma_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(viafb_request_dma);
+
+void viafb_release_dma(void)
+{
+ mutex_lock(&viafb_dma_lock);
+ viafb_dma_users--;
+ if (viafb_dma_users == 0) {
+ viafb_irq_disable(VDE_I_DMA0TDEN);
+ free_irq(global_dev.pdev->irq, &viafb_dma_users);
+ }
+ mutex_unlock(&viafb_dma_lock);
+}
+EXPORT_SYMBOL_GPL(viafb_release_dma);
+
+
+#if 0
+/*
+ * Copy a single buffer from FB memory, synchronously. This code works
+ * but is not currently used.
+ */
+void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len)
+{
+ unsigned long flags;
+ int csr;
+
+ mutex_lock(&viafb_dma_lock);
+ init_completion(&viafb_dma_completion);
+ /*
+ * Program the controller.
+ */
+ spin_lock_irqsave(&global_dev.reg_lock, flags);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
+ /* Enable ints; must happen after CSR0 write! */
+ viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE);
+ viafb_mmio_write(VDMA_MARL0, (int) (paddr & 0xfffffff0));
+ viafb_mmio_write(VDMA_MARH0, (int) ((paddr >> 28) & 0xfff));
+ /* Data sheet suggests DAR0 should be <<4, but it lies */
+ viafb_mmio_write(VDMA_DAR0, offset);
+ viafb_mmio_write(VDMA_DQWCR0, len >> 4);
+ viafb_mmio_write(VDMA_TMR0, 0);
+ viafb_mmio_write(VDMA_DPRL0, 0);
+ viafb_mmio_write(VDMA_DPRH0, 0);
+ viafb_mmio_write(VDMA_PMR0, 0);
+ csr = viafb_mmio_read(VDMA_CSR0);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
+ spin_unlock_irqrestore(&global_dev.reg_lock, flags);
+ /*
+ * Now we just wait until the interrupt handler says
+ * we're done.
+ */
+ wait_for_completion_interruptible(&viafb_dma_completion);
+ viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
+ mutex_unlock(&viafb_dma_lock);
+}
+EXPORT_SYMBOL_GPL(viafb_dma_copy_out);
+#endif
+
+/*
+ * Do a scatter/gather DMA copy from FB memory. You must have done
+ * a successful call to viafb_request_dma() first.
+ */
+int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
+{
+ struct viafb_vx855_dma_descr *descr;
+ void *descrpages;
+ dma_addr_t descr_handle;
+ unsigned long flags;
+ int i;
+ struct scatterlist *sgentry;
+ dma_addr_t nextdesc;
+
+ /*
+ * Get a place to put the descriptors.
+ */
+ descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
+ nsg*sizeof(struct viafb_vx855_dma_descr),
+ &descr_handle, GFP_KERNEL);
+ if (descrpages == NULL) {
+ dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&viafb_dma_lock);
+ /*
+ * Fill them in.
+ */
+ descr = descrpages;
+ nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
+ for_each_sg(sg, sgentry, nsg, i) {
+ dma_addr_t paddr = sg_dma_address(sgentry);
+ descr->addr_low = paddr & 0xfffffff0;
+ descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
+ descr->fb_offset = offset;
+ descr->seg_size = sg_dma_len(sgentry) >> 4;
+ descr->tile_mode = 0;
+ descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
+ descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
+ descr->pad = 0xffffffff; /* VIA driver does this */
+ offset += sg_dma_len(sgentry);
+ nextdesc += sizeof(struct viafb_vx855_dma_descr);
+ descr++;
+ }
+ descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC;
+ /*
+ * Program the engine.
+ */
+ spin_lock_irqsave(&global_dev.reg_lock, flags);
+ init_completion(&viafb_dma_completion);
+ viafb_mmio_write(VDMA_DQWCR0, 0);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
+ viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN);
+ viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC);
+ viafb_mmio_write(VDMA_DPRH0,
+ (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000);
+ (void) viafb_mmio_read(VDMA_CSR0);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
+ spin_unlock_irqrestore(&global_dev.reg_lock, flags);
+ /*
+ * Now we just wait until the interrupt handler says
+ * we're done. Except that, actually, we need to wait a little
+ * longer: the interrupts seem to jump the gun a little and we
+ * get corrupted frames sometimes.
+ */
+ wait_for_completion_timeout(&viafb_dma_completion, 1);
+ msleep(1);
+ if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0)
+ printk(KERN_ERR "VIA DMA timeout!\n");
+ /*
+ * Clean up and we're done.
+ */
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
+ viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
+ mutex_unlock(&viafb_dma_lock);
+ dma_free_coherent(&global_dev.pdev->dev,
+ nsg*sizeof(struct viafb_vx855_dma_descr), descrpages,
+ descr_handle);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
+
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Figure out how big our framebuffer memory is. Kind of ugly,
+ * but evidently we can't trust the information found in the
+ * fbdev configuration area.
+ */
+static u16 via_function3[] = {
+ CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
+ CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
+ P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3,
+};
+
+/* Get the BIOS-configured framebuffer size from PCI configuration space
+ * of function 3 in the respective chipset */
+static int viafb_get_fb_size_from_pci(int chip_type)
+{
+ int i;
+ u8 offset = 0;
+ u32 FBSize;
+ u32 VideoMemSize;
+
+ /* search for the "FUNCTION3" device in this chipset */
+ for (i = 0; i < ARRAY_SIZE(via_function3); i++) {
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i],
+ NULL);
+ if (!pdev)
+ continue;
+
+ DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device);
+
+ switch (pdev->device) {
+ case CLE266_FUNCTION3:
+ case KM400_FUNCTION3:
+ offset = 0xE0;
+ break;
+ case CN400_FUNCTION3:
+ case CN700_FUNCTION3:
+ case CX700_FUNCTION3:
+ case KM800_FUNCTION3:
+ case KM890_FUNCTION3:
+ case P4M890_FUNCTION3:
+ case P4M900_FUNCTION3:
+ case VX800_FUNCTION3:
+ case VX855_FUNCTION3:
+ /*case CN750_FUNCTION3: */
+ offset = 0xA0;
+ break;
+ }
+
+ if (!offset)
+ break;
+
+ pci_read_config_dword(pdev, offset, &FBSize);
+ pci_dev_put(pdev);
+ }
+
+ if (!offset) {
+ printk(KERN_ERR "cannot determine framebuffer size\n");
+ return -EIO;
+ }
+
+ FBSize = FBSize & 0x00007000;
+ DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
+
+ if (chip_type < UNICHROME_CX700) {
+ switch (FBSize) {
+ case 0x00004000:
+ VideoMemSize = (16 << 20); /*16M */
+ break;
+
+ case 0x00005000:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+
+ case 0x00006000:
+ VideoMemSize = (64 << 20); /*64M */
+ break;
+
+ default:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+ }
+ } else {
+ switch (FBSize) {
+ case 0x00001000:
+ VideoMemSize = (8 << 20); /*8M */
+ break;
+
+ case 0x00002000:
+ VideoMemSize = (16 << 20); /*16M */
+ break;
+
+ case 0x00003000:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+
+ case 0x00004000:
+ VideoMemSize = (64 << 20); /*64M */
+ break;
+
+ case 0x00005000:
+ VideoMemSize = (128 << 20); /*128M */
+ break;
+
+ case 0x00006000:
+ VideoMemSize = (256 << 20); /*256M */
+ break;
+
+ case 0x00007000: /* Only on VX855/875 */
+ VideoMemSize = (512 << 20); /*512M */
+ break;
+
+ default:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+ }
+ }
+
+ return VideoMemSize;
+}
+
+
+/*
+ * Figure out and map our MMIO regions.
+ */
+static int __devinit via_pci_setup_mmio(struct viafb_dev *vdev)
+{
+ int ret;
+ /*
+ * Hook up to the device registers. Note that we soldier
+ * on if it fails; the framebuffer can operate (without
+ * acceleration) without this region.
+ */
+ vdev->engine_start = pci_resource_start(vdev->pdev, 1);
+ vdev->engine_len = pci_resource_len(vdev->pdev, 1);
+ vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
+ vdev->engine_len);
+ if (vdev->engine_mmio == NULL)
+ dev_err(&vdev->pdev->dev,
+ "Unable to map engine MMIO; operation will be "
+ "slow and crippled.\n");
+ /*
+ * Map in framebuffer memory. For now, failure here is
+ * fatal. Unfortunately, in the absence of significant
+ * vmalloc space, failure here is also entirely plausible.
+ * Eventually we want to move away from mapping this
+ * entire region.
+ */
+ vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
+ ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
+ if (ret < 0)
+ goto out_unmap;
+ vdev->fbmem = ioremap_nocache(vdev->fbmem_start, vdev->fbmem_len);
+ if (vdev->fbmem == NULL) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+ return 0;
+out_unmap:
+ iounmap(vdev->engine_mmio);
+ return ret;
+}
+
+static void __devexit via_pci_teardown_mmio(struct viafb_dev *vdev)
+{
+ iounmap(vdev->fbmem);
+ iounmap(vdev->engine_mmio);
+}
+
+/*
+ * Create our subsidiary devices.
+ */
+static struct viafb_subdev_info {
+ char *name;
+ struct platform_device *platdev;
+} viafb_subdevs[] = {
+ {
+ .name = "viafb-gpio",
+ },
+ {
+ .name = "viafb-i2c",
+ }
+};
+#define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
+
+static int __devinit via_create_subdev(struct viafb_dev *vdev,
+ struct viafb_subdev_info *info)
+{
+ int ret;
+
+ info->platdev = platform_device_alloc(info->name, -1);
+ if (!info->platdev) {
+ dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n",
+ info->name);
+ return -ENOMEM;
+ }
+ info->platdev->dev.parent = &vdev->pdev->dev;
+ info->platdev->dev.platform_data = vdev;
+ ret = platform_device_add(info->platdev);
+ if (ret) {
+ dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n",
+ info->name);
+ platform_device_put(info->platdev);
+ info->platdev = NULL;
+ }
+ return ret;
+}
+
+static int __devinit via_setup_subdevs(struct viafb_dev *vdev)
+{
+ int i;
+
+ /*
+ * Ignore return values. Even if some of the devices
+ * fail to be created, we'll still be able to use some
+ * of the rest.
+ */
+ for (i = 0; i < N_SUBDEVS; i++)
+ via_create_subdev(vdev, viafb_subdevs + i);
+ return 0;
+}
+
+static void __devexit via_teardown_subdevs(void)
+{
+ int i;
+
+ for (i = 0; i < N_SUBDEVS; i++)
+ if (viafb_subdevs[i].platdev) {
+ viafb_subdevs[i].platdev->dev.platform_data = NULL;
+ platform_device_unregister(viafb_subdevs[i].platdev);
+ }
+}
+
+
+static int __devinit via_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ /*
+ * Global device initialization.
+ */
+ memset(&global_dev, 0, sizeof(global_dev));
+ global_dev.pdev = pdev;
+ global_dev.chip_type = ent->driver_data;
+ global_dev.port_cfg = adap_configs;
+ spin_lock_init(&global_dev.reg_lock);
+ ret = via_pci_setup_mmio(&global_dev);
+ if (ret)
+ goto out_disable;
+ /*
+ * Set up interrupts and create our subdevices. Continue even if
+ * some things fail.
+ */
+ viafb_int_init();
+ via_setup_subdevs(&global_dev);
+ /*
+ * Set up the framebuffer device
+ */
+ ret = via_fb_pci_probe(&global_dev);
+ if (ret)
+ goto out_subdevs;
+ return 0;
+
+out_subdevs:
+ via_teardown_subdevs();
+ via_pci_teardown_mmio(&global_dev);
+out_disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void __devexit via_pci_remove(struct pci_dev *pdev)
+{
+ via_teardown_subdevs();
+ via_fb_pci_remove(pdev);
+ via_pci_teardown_mmio(&global_dev);
+ pci_disable_device(pdev);
+}
+
+
+static struct pci_device_id via_pci_table[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
+ .driver_data = UNICHROME_CLE266 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
+ .driver_data = UNICHROME_PM800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
+ .driver_data = UNICHROME_K400 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
+ .driver_data = UNICHROME_K800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
+ .driver_data = UNICHROME_CN700 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
+ .driver_data = UNICHROME_K8M890 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
+ .driver_data = UNICHROME_CX700 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
+ .driver_data = UNICHROME_P4M900 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
+ .driver_data = UNICHROME_CN750 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
+ .driver_data = UNICHROME_VX800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
+ .driver_data = UNICHROME_VX855 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, via_pci_table);
+
+static struct pci_driver via_driver = {
+ .name = "viafb",
+ .id_table = via_pci_table,
+ .probe = via_pci_probe,
+ .remove = __devexit_p(via_pci_remove),
+};
+
+static int __init via_core_init(void)
+{
+ int ret;
+
+ ret = viafb_init();
+ if (ret)
+ return ret;
+ viafb_i2c_init();
+ viafb_gpio_init();
+ return pci_register_driver(&via_driver);
+}
+
+static void __exit via_core_exit(void)
+{
+ pci_unregister_driver(&via_driver);
+ viafb_gpio_exit();
+ viafb_i2c_exit();
+ viafb_exit();
+}
+
+module_init(via_core_init);
+module_exit(via_core_exit);
diff --git a/drivers/video/via/via-gpio.c b/drivers/video/via/via-gpio.c
new file mode 100644
index 0000000..595516a
--- /dev/null
+++ b/drivers/video/via/via-gpio.c
@@ -0,0 +1,285 @@
+/*
+ * Support for viafb GPIO ports.
+ *
+ * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
+ * Distributable under version 2 of the GNU General Public License.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/via-core.h>
+#include <linux/via-gpio.h>
+
+/*
+ * The ports we know about. Note that the port-25 gpios are not
+ * mentioned in the datasheet.
+ */
+
+struct viafb_gpio {
+ char *vg_name; /* Data sheet name */
+ u16 vg_io_port;
+ u8 vg_port_index;
+ int vg_mask_shift;
+};
+
+static struct viafb_gpio viafb_all_gpios[] = {
+ {
+ .vg_name = "VGPIO0", /* Guess - not in datasheet */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x25,
+ .vg_mask_shift = 1
+ },
+ {
+ .vg_name = "VGPIO1",
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x25,
+ .vg_mask_shift = 0
+ },
+ {
+ .vg_name = "VGPIO2", /* aka DISPCLKI0 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x2c,
+ .vg_mask_shift = 1
+ },
+ {
+ .vg_name = "VGPIO3", /* aka DISPCLKO0 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x2c,
+ .vg_mask_shift = 0
+ },
+ {
+ .vg_name = "VGPIO4", /* DISPCLKI1 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x3d,
+ .vg_mask_shift = 1
+ },
+ {
+ .vg_name = "VGPIO5", /* DISPCLKO1 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x3d,
+ .vg_mask_shift = 0
+ },
+};
+
+#define VIAFB_NUM_GPIOS ARRAY_SIZE(viafb_all_gpios)
+
+/*
+ * This structure controls the active GPIOs, which may be a subset
+ * of those which are known.
+ */
+
+struct viafb_gpio_cfg {
+ struct gpio_chip gpio_chip;
+ struct viafb_dev *vdev;
+ struct viafb_gpio *active_gpios[VIAFB_NUM_GPIOS];
+ char *gpio_names[VIAFB_NUM_GPIOS];
+};
+
+/*
+ * GPIO access functions
+ */
+static void via_gpio_set(struct gpio_chip *chip, unsigned int nr,
+ int value)
+{
+ struct viafb_gpio_cfg *cfg = container_of(chip,
+ struct viafb_gpio_cfg,
+ gpio_chip);
+ u8 reg;
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
+ gpio = cfg->active_gpios[nr];
+ reg = via_read_reg(VIASR, gpio->vg_port_index);
+ reg |= 0x40 << gpio->vg_mask_shift; /* output enable */
+ if (value)
+ reg |= 0x10 << gpio->vg_mask_shift;
+ else
+ reg &= ~(0x10 << gpio->vg_mask_shift);
+ via_write_reg(VIASR, gpio->vg_port_index, reg);
+ spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+}
+
+static int via_gpio_dir_out(struct gpio_chip *chip, unsigned int nr,
+ int value)
+{
+ via_gpio_set(chip, nr, value);
+ return 0;
+}
+
+/*
+ * Set the input direction. I'm not sure this is right; we should
+ * be able to do input without disabling output.
+ */
+static int via_gpio_dir_input(struct gpio_chip *chip, unsigned int nr)
+{
+ struct viafb_gpio_cfg *cfg = container_of(chip,
+ struct viafb_gpio_cfg,
+ gpio_chip);
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
+ gpio = cfg->active_gpios[nr];
+ via_write_reg_mask(VIASR, gpio->vg_port_index, 0,
+ 0x40 << gpio->vg_mask_shift);
+ spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+ return 0;
+}
+
+static int via_gpio_get(struct gpio_chip *chip, unsigned int nr)
+{
+ struct viafb_gpio_cfg *cfg = container_of(chip,
+ struct viafb_gpio_cfg,
+ gpio_chip);
+ u8 reg;
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
+ gpio = cfg->active_gpios[nr];
+ reg = via_read_reg(VIASR, gpio->vg_port_index);
+ spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+ return reg & (0x04 << gpio->vg_mask_shift);
+}
+
+
+static struct viafb_gpio_cfg gpio_config = {
+ .gpio_chip = {
+ .label = "VIAFB onboard GPIO",
+ .owner = THIS_MODULE,
+ .direction_output = via_gpio_dir_out,
+ .set = via_gpio_set,
+ .direction_input = via_gpio_dir_input,
+ .get = via_gpio_get,
+ .base = -1,
+ .ngpio = 0,
+ .can_sleep = 0
+ }
+};
+
+/*
+ * Manage the software enable bit.
+ */
+static void viafb_gpio_enable(struct viafb_gpio *gpio)
+{
+ via_write_reg_mask(VIASR, gpio->vg_port_index, 0x02, 0x02);
+}
+
+static void viafb_gpio_disable(struct viafb_gpio *gpio)
+{
+ via_write_reg_mask(VIASR, gpio->vg_port_index, 0, 0x02);
+}
+
+/*
+ * Look up a specific gpio and return the number it was assigned.
+ */
+int viafb_gpio_lookup(const char *name)
+{
+ int i;
+
+ for (i = 0; i < gpio_config.gpio_chip.ngpio; i++)
+ if (!strcmp(name, gpio_config.active_gpios[i]->vg_name))
+ return gpio_config.gpio_chip.base + i;
+ return -1;
+}
+EXPORT_SYMBOL_GPL(viafb_gpio_lookup);
+
+/*
+ * Platform device stuff.
+ */
+static __devinit int viafb_gpio_probe(struct platform_device *platdev)
+{
+ struct viafb_dev *vdev = platdev->dev.platform_data;
+ struct via_port_cfg *port_cfg = vdev->port_cfg;
+ int i, ngpio = 0, ret;
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ /*
+ * Set up entries for all GPIOs which have been configured to
+ * operate as such (as opposed to as i2c ports).
+ */
+ for (i = 0; i < VIAFB_NUM_PORTS; i++) {
+ if (port_cfg[i].mode != VIA_MODE_GPIO)
+ continue;
+ for (gpio = viafb_all_gpios;
+ gpio < viafb_all_gpios + VIAFB_NUM_GPIOS; gpio++)
+ if (gpio->vg_port_index == port_cfg[i].ioport_index) {
+ gpio_config.active_gpios[ngpio] = gpio;
+ gpio_config.gpio_names[ngpio] = gpio->vg_name;
+ ngpio++;
+ }
+ }
+ gpio_config.gpio_chip.ngpio = ngpio;
+ gpio_config.gpio_chip.names = gpio_config.gpio_names;
+ gpio_config.vdev = vdev;
+ if (ngpio == 0) {
+ printk(KERN_INFO "viafb: no GPIOs configured\n");
+ return 0;
+ }
+ /*
+ * Enable the ports. They come in pairs, with a single
+ * enable bit for both.
+ */
+ spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags);
+ for (i = 0; i < ngpio; i += 2)
+ viafb_gpio_enable(gpio_config.active_gpios[i]);
+ spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags);
+ /*
+ * Get registered.
+ */
+ gpio_config.gpio_chip.base = -1; /* Dynamic */
+ ret = gpiochip_add(&gpio_config.gpio_chip);
+ if (ret) {
+ printk(KERN_ERR "viafb: failed to add gpios (%d)\n", ret);
+ gpio_config.gpio_chip.ngpio = 0;
+ }
+ return ret;
+}
+
+
+static int viafb_gpio_remove(struct platform_device *platdev)
+{
+ unsigned long flags;
+ int ret = 0, i;
+
+ /*
+ * Get unregistered.
+ */
+ if (gpio_config.gpio_chip.ngpio > 0) {
+ ret = gpiochip_remove(&gpio_config.gpio_chip);
+ if (ret) { /* Somebody still using it? */
+ printk(KERN_ERR "Viafb: GPIO remove failed\n");
+ return ret;
+ }
+ }
+ /*
+ * Disable the ports.
+ */
+ spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags);
+ for (i = 0; i < gpio_config.gpio_chip.ngpio; i += 2)
+ viafb_gpio_disable(gpio_config.active_gpios[i]);
+ gpio_config.gpio_chip.ngpio = 0;
+ spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags);
+ return ret;
+}
+
+static struct platform_driver via_gpio_driver = {
+ .driver = {
+ .name = "viafb-gpio",
+ },
+ .probe = viafb_gpio_probe,
+ .remove = viafb_gpio_remove,
+};
+
+int viafb_gpio_init(void)
+{
+ return platform_driver_register(&via_gpio_driver);
+}
+
+void viafb_gpio_exit(void)
+{
+ platform_driver_unregister(&via_gpio_driver);
+}
diff --git a/drivers/video/via/via_i2c.c b/drivers/video/via/via_i2c.c
index 15543e9..da9e4ca 100644
--- a/drivers/video/via/via_i2c.c
+++ b/drivers/video/via/via_i2c.c
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
@@ -19,77 +19,106 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include "global.h"
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
+
+/*
+ * There can only be one set of these, so there's no point in having
+ * them be dynamically allocated...
+ */
+#define VIAFB_NUM_I2C 5
+static struct via_i2c_stuff via_i2c_par[VIAFB_NUM_I2C];
+struct viafb_dev *i2c_vdev; /* Passed in from core */
static void via_i2c_setscl(void *data, int state)
{
u8 val;
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
- val = viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0xF0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ val = via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0xF0;
if (state)
val |= 0x20;
else
val &= ~0x20;
- switch (via_i2c_chan->i2c_port) {
- case I2CPORTINDEX:
+ switch (adap_data->type) {
+ case VIA_PORT_I2C:
val |= 0x01;
break;
- case GPIOPORTINDEX:
+ case VIA_PORT_GPIO:
val |= 0x80;
break;
default:
- DEBUG_MSG("via_i2c: specify wrong i2c port.\n");
+ printk(KERN_ERR "viafb_i2c: specify wrong i2c type.\n");
}
- viafb_write_reg(via_i2c_chan->i2c_port, VIASR, val);
+ via_write_reg(adap_data->io_port, adap_data->ioport_index, val);
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
}
static int via_i2c_getscl(void *data)
{
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
+ int ret = 0;
- if (viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0x08)
- return 1;
- return 0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ if (via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0x08)
+ ret = 1;
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
+ return ret;
}
static int via_i2c_getsda(void *data)
{
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
+ int ret = 0;
- if (viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0x04)
- return 1;
- return 0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ if (via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0x04)
+ ret = 1;
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
+ return ret;
}
static void via_i2c_setsda(void *data, int state)
{
u8 val;
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
- val = viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0xF0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ val = via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0xF0;
if (state)
val |= 0x10;
else
val &= ~0x10;
- switch (via_i2c_chan->i2c_port) {
- case I2CPORTINDEX:
+ switch (adap_data->type) {
+ case VIA_PORT_I2C:
val |= 0x01;
break;
- case GPIOPORTINDEX:
+ case VIA_PORT_GPIO:
val |= 0x40;
break;
default:
- DEBUG_MSG("via_i2c: specify wrong i2c port.\n");
+ printk(KERN_ERR "viafb_i2c: specify wrong i2c type.\n");
}
- viafb_write_reg(via_i2c_chan->i2c_port, VIASR, val);
+ via_write_reg(adap_data->io_port, adap_data->ioport_index, val);
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
}
-int viafb_i2c_readbyte(u8 slave_addr, u8 index, u8 *pdata)
+int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata)
{
u8 mm1[] = {0x00};
struct i2c_msg msgs[2];
+ if (!via_i2c_par[adap].is_active)
+ return -ENODEV;
*pdata = 0;
msgs[0].flags = 0;
msgs[1].flags = I2C_M_RD;
@@ -97,81 +126,144 @@ int viafb_i2c_readbyte(u8 slave_addr, u8 index, u8 *pdata)
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = 1;
msgs[0].buf = mm1; msgs[1].buf = pdata;
- i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, msgs, 2);
-
- return 0;
+ return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
}
-int viafb_i2c_writebyte(u8 slave_addr, u8 index, u8 data)
+int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data)
{
u8 msg[2] = { index, data };
struct i2c_msg msgs;
+ if (!via_i2c_par[adap].is_active)
+ return -ENODEV;
msgs.flags = 0;
msgs.addr = slave_addr / 2;
msgs.len = 2;
msgs.buf = msg;
- return i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, &msgs, 1);
+ return i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
}
-int viafb_i2c_readbytes(u8 slave_addr, u8 index, u8 *buff, int buff_len)
+int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len)
{
u8 mm1[] = {0x00};
struct i2c_msg msgs[2];
+ if (!via_i2c_par[adap].is_active)
+ return -ENODEV;
msgs[0].flags = 0;
msgs[1].flags = I2C_M_RD;
msgs[0].addr = msgs[1].addr = slave_addr / 2;
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = buff_len;
msgs[0].buf = mm1; msgs[1].buf = buff;
- i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, msgs, 2);
- return 0;
+ return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
}
-int viafb_create_i2c_bus(void *viapar)
+/*
+ * Allow other viafb subdevices to look up a specific adapter
+ * by port name.
+ */
+struct i2c_adapter *viafb_find_i2c_adapter(enum viafb_i2c_adap which)
{
- int ret;
- struct via_i2c_stuff *i2c_stuff =
- &((struct viafb_par *)viapar)->shared->i2c_stuff;
-
- strcpy(i2c_stuff->adapter.name, "via_i2c");
- i2c_stuff->i2c_port = 0x0;
- i2c_stuff->adapter.owner = THIS_MODULE;
- i2c_stuff->adapter.id = 0x01FFFF;
- i2c_stuff->adapter.class = 0;
- i2c_stuff->adapter.algo_data = &i2c_stuff->algo;
- i2c_stuff->adapter.dev.parent = NULL;
- i2c_stuff->algo.setsda = via_i2c_setsda;
- i2c_stuff->algo.setscl = via_i2c_setscl;
- i2c_stuff->algo.getsda = via_i2c_getsda;
- i2c_stuff->algo.getscl = via_i2c_getscl;
- i2c_stuff->algo.udelay = 40;
- i2c_stuff->algo.timeout = 20;
- i2c_stuff->algo.data = i2c_stuff;
-
- i2c_set_adapdata(&i2c_stuff->adapter, i2c_stuff);
+ struct via_i2c_stuff *stuff = &via_i2c_par[which];
- /* Raise SCL and SDA */
- i2c_stuff->i2c_port = I2CPORTINDEX;
- via_i2c_setsda(i2c_stuff, 1);
- via_i2c_setscl(i2c_stuff, 1);
+ return &stuff->adapter;
+}
+EXPORT_SYMBOL_GPL(viafb_find_i2c_adapter);
- i2c_stuff->i2c_port = GPIOPORTINDEX;
- via_i2c_setsda(i2c_stuff, 1);
- via_i2c_setscl(i2c_stuff, 1);
- udelay(20);
- ret = i2c_bit_add_bus(&i2c_stuff->adapter);
- if (ret == 0)
- DEBUG_MSG("I2C bus %s registered.\n", i2c_stuff->adapter.name);
+static int create_i2c_bus(struct i2c_adapter *adapter,
+ struct i2c_algo_bit_data *algo,
+ struct via_port_cfg *adap_cfg,
+ struct pci_dev *pdev)
+{
+ algo->setsda = via_i2c_setsda;
+ algo->setscl = via_i2c_setscl;
+ algo->getsda = via_i2c_getsda;
+ algo->getscl = via_i2c_getscl;
+ algo->udelay = 40;
+ algo->timeout = 20;
+ algo->data = adap_cfg;
+
+ sprintf(adapter->name, "viafb i2c io_port idx 0x%02x",
+ adap_cfg->ioport_index);
+ adapter->owner = THIS_MODULE;
+ adapter->id = 0x01FFFF;
+ adapter->class = I2C_CLASS_DDC;
+ adapter->algo_data = algo;
+ if (pdev)
+ adapter->dev.parent = &pdev->dev;
else
- DEBUG_MSG("Failed to register I2C bus %s.\n",
- i2c_stuff->adapter.name);
- return ret;
+ adapter->dev.parent = NULL;
+ /* i2c_set_adapdata(adapter, adap_cfg); */
+
+ /* Raise SCL and SDA */
+ via_i2c_setsda(adap_cfg, 1);
+ via_i2c_setscl(adap_cfg, 1);
+ udelay(20);
+
+ return i2c_bit_add_bus(adapter);
+}
+
+static int viafb_i2c_probe(struct platform_device *platdev)
+{
+ int i, ret;
+ struct via_port_cfg *configs;
+
+ i2c_vdev = platdev->dev.platform_data;
+ configs = i2c_vdev->port_cfg;
+
+ for (i = 0; i < VIAFB_NUM_PORTS; i++) {
+ struct via_port_cfg *adap_cfg = configs++;
+ struct via_i2c_stuff *i2c_stuff = &via_i2c_par[i];
+
+ i2c_stuff->is_active = 0;
+ if (adap_cfg->type == 0 || adap_cfg->mode != VIA_MODE_I2C)
+ continue;
+ ret = create_i2c_bus(&i2c_stuff->adapter,
+ &i2c_stuff->algo, adap_cfg,
+ NULL); /* FIXME: PCIDEV */
+ if (ret < 0) {
+ printk(KERN_ERR "viafb: cannot create i2c bus %u:%d\n",
+ i, ret);
+ continue; /* Still try to make the rest */
+ }
+ i2c_stuff->is_active = 1;
+ }
+
+ return 0;
+}
+
+static int viafb_i2c_remove(struct platform_device *platdev)
+{
+ int i;
+
+ for (i = 0; i < VIAFB_NUM_PORTS; i++) {
+ struct via_i2c_stuff *i2c_stuff = &via_i2c_par[i];
+ /*
+ * Only remove those entries in the array that we've
+ * actually used (and thus initialized algo_data)
+ */
+ if (i2c_stuff->is_active)
+ i2c_del_adapter(&i2c_stuff->adapter);
+ }
+ return 0;
+}
+
+static struct platform_driver via_i2c_driver = {
+ .driver = {
+ .name = "viafb-i2c",
+ },
+ .probe = viafb_i2c_probe,
+ .remove = viafb_i2c_remove,
+};
+
+int viafb_i2c_init(void)
+{
+ return platform_driver_register(&via_i2c_driver);
}
-void viafb_delete_i2c_buss(void *par)
+void viafb_i2c_exit(void)
{
- i2c_del_adapter(&((struct viafb_par *)par)->shared->i2c_stuff.adapter);
+ platform_driver_unregister(&via_i2c_driver);
}
diff --git a/drivers/video/via/via_modesetting.c b/drivers/video/via/via_modesetting.c
new file mode 100644
index 0000000..3cddcff
--- /dev/null
+++ b/drivers/video/via/via_modesetting.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
+ * the implied warranty of MERCHANTABILITY or FITNESS FOR
+ * A PARTICULAR PURPOSE.See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/*
+ * basic modesetting functions
+ */
+
+#include <linux/kernel.h>
+#include <linux/via-core.h>
+#include "via_modesetting.h"
+#include "share.h"
+#include "debug.h"
+
+void via_set_primary_address(u32 addr)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_primary_address(0x%08X)\n", addr);
+ via_write_reg(VIACR, 0x0D, addr & 0xFF);
+ via_write_reg(VIACR, 0x0C, (addr >> 8) & 0xFF);
+ via_write_reg(VIACR, 0x34, (addr >> 16) & 0xFF);
+ via_write_reg_mask(VIACR, 0x48, (addr >> 24) & 0x1F, 0x1F);
+}
+
+void via_set_secondary_address(u32 addr)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_secondary_address(0x%08X)\n", addr);
+ /* secondary display supports only quadword aligned memory */
+ via_write_reg_mask(VIACR, 0x62, (addr >> 2) & 0xFE, 0xFE);
+ via_write_reg(VIACR, 0x63, (addr >> 10) & 0xFF);
+ via_write_reg(VIACR, 0x64, (addr >> 18) & 0xFF);
+ via_write_reg_mask(VIACR, 0xA3, (addr >> 26) & 0x07, 0x07);
+}
+
+void via_set_primary_pitch(u32 pitch)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_primary_pitch(0x%08X)\n", pitch);
+ /* spec does not say that first adapter skips 3 bits but old
+ * code did it and seems to be reasonable in analogy to 2nd adapter
+ */
+ pitch = pitch >> 3;
+ via_write_reg(VIACR, 0x13, pitch & 0xFF);
+ via_write_reg_mask(VIACR, 0x35, (pitch >> (8 - 5)) & 0xE0, 0xE0);
+}
+
+void via_set_secondary_pitch(u32 pitch)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_secondary_pitch(0x%08X)\n", pitch);
+ pitch = pitch >> 3;
+ via_write_reg(VIACR, 0x66, pitch & 0xFF);
+ via_write_reg_mask(VIACR, 0x67, (pitch >> 8) & 0x03, 0x03);
+ via_write_reg_mask(VIACR, 0x71, (pitch >> (10 - 7)) & 0x80, 0x80);
+}
+
+void via_set_primary_color_depth(u8 depth)
+{
+ u8 value;
+
+ DEBUG_MSG(KERN_DEBUG "via_set_primary_color_depth(%d)\n", depth);
+ switch (depth) {
+ case 8:
+ value = 0x00;
+ break;
+ case 15:
+ value = 0x04;
+ break;
+ case 16:
+ value = 0x14;
+ break;
+ case 24:
+ value = 0x0C;
+ break;
+ case 30:
+ value = 0x08;
+ break;
+ default:
+ printk(KERN_WARNING "via_set_primary_color_depth: "
+ "Unsupported depth: %d\n", depth);
+ return;
+ }
+
+ via_write_reg_mask(VIASR, 0x15, value, 0x1C);
+}
+
+void via_set_secondary_color_depth(u8 depth)
+{
+ u8 value;
+
+ DEBUG_MSG(KERN_DEBUG "via_set_secondary_color_depth(%d)\n", depth);
+ switch (depth) {
+ case 8:
+ value = 0x00;
+ break;
+ case 16:
+ value = 0x40;
+ break;
+ case 24:
+ value = 0xC0;
+ break;
+ case 30:
+ value = 0x80;
+ break;
+ default:
+ printk(KERN_WARNING "via_set_secondary_color_depth: "
+ "Unsupported depth: %d\n", depth);
+ return;
+ }
+
+ via_write_reg_mask(VIACR, 0x67, value, 0xC0);
+}
diff --git a/drivers/video/via/via_i2c.h b/drivers/video/via/via_modesetting.h
index 3a13242..ae35cfd 100644
--- a/drivers/video/via/via_i2c.h
+++ b/drivers/video/via/via_modesetting.h
@@ -1,46 +1,38 @@
/*
* Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
-
+ * Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation;
* either version 2, or (at your option) any later version.
-
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.See the GNU General Public License
* for more details.
-
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#ifndef __VIA_I2C_H__
-#define __VIA_I2C_H__
+/*
+ * basic modesetting functions
+ */
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
+#ifndef __VIA_MODESETTING_H__
+#define __VIA_MODESETTING_H__
-struct via_i2c_stuff {
- u16 i2c_port; /* GPIO or I2C port */
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
-};
+#include <linux/types.h>
-#define I2CPORT 0x3c4
-#define I2CPORTINDEX 0x31
-#define GPIOPORT 0x3C4
-#define GPIOPORTINDEX 0x2C
-#define I2C_BUS 1
-#define GPIO_BUS 2
-#define DELAYPORT 0x3C3
+void via_set_primary_address(u32 addr);
+void via_set_secondary_address(u32 addr);
+void via_set_primary_pitch(u32 pitch);
+void via_set_secondary_pitch(u32 pitch);
+void via_set_primary_color_depth(u8 depth);
+void via_set_secondary_color_depth(u8 depth);
-int viafb_i2c_readbyte(u8 slave_addr, u8 index, u8 *pdata);
-int viafb_i2c_writebyte(u8 slave_addr, u8 index, u8 data);
-int viafb_i2c_readbytes(u8 slave_addr, u8 index, u8 *buff, int buff_len);
-int viafb_create_i2c_bus(void *par);
-void viafb_delete_i2c_buss(void *par);
-#endif /* __VIA_I2C_H__ */
+#endif /* __VIA_MODESETTING_H__ */
diff --git a/drivers/video/via/via_utility.c b/drivers/video/via/via_utility.c
index aefdeee..d05ccb6 100644
--- a/drivers/video/via/via_utility.c
+++ b/drivers/video/via/via_utility.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
void viafb_get_device_support_state(u32 *support_state)
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 777b38a..2bc40e6 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
@@ -23,8 +23,9 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
-#define _MASTER_FILE
+#include <linux/via-core.h>
+#define _MASTER_FILE
#include "global.h"
static char *viafb_name = "Via";
@@ -221,7 +222,7 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
/* Adjust var according to our driver's own table */
viafb_fill_var_timing_info(var, viafb_refresh, vmode_entry);
if (info->var.accel_flags & FB_ACCELF_TEXT &&
- !ppar->shared->engine_mmio)
+ !ppar->shared->vdev->engine_mmio)
info->var.accel_flags = 0;
return 0;
@@ -317,12 +318,12 @@ static int viafb_pan_display(struct fb_var_screeninfo *var,
DEBUG_MSG(KERN_DEBUG "viafb_pan_display, address = %d\n", vram_addr);
if (!viafb_dual_fb) {
- viafb_set_primary_address(vram_addr);
- viafb_set_secondary_address(vram_addr);
+ via_set_primary_address(vram_addr);
+ via_set_secondary_address(vram_addr);
} else if (viapar->iga_path == IGA1)
- viafb_set_primary_address(vram_addr);
+ via_set_primary_address(vram_addr);
else
- viafb_set_secondary_address(vram_addr);
+ via_set_secondary_address(vram_addr);
return 0;
}
@@ -696,7 +697,7 @@ static void viafb_fillrect(struct fb_info *info,
rop = 0xF0;
DEBUG_MSG(KERN_DEBUG "viafb 2D engine: fillrect\n");
- if (shared->hw_bitblt(shared->engine_mmio, VIA_BITBLT_FILL,
+ if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_FILL,
rect->width, rect->height, info->var.bits_per_pixel,
viapar->vram_addr, info->fix.line_length, rect->dx, rect->dy,
NULL, 0, 0, 0, 0, fg_color, 0, rop))
@@ -718,7 +719,7 @@ static void viafb_copyarea(struct fb_info *info,
return;
DEBUG_MSG(KERN_DEBUG "viafb 2D engine: copyarea\n");
- if (shared->hw_bitblt(shared->engine_mmio, VIA_BITBLT_COLOR,
+ if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_COLOR,
area->width, area->height, info->var.bits_per_pixel,
viapar->vram_addr, info->fix.line_length, area->dx, area->dy,
NULL, viapar->vram_addr, info->fix.line_length,
@@ -755,7 +756,7 @@ static void viafb_imageblit(struct fb_info *info,
op = VIA_BITBLT_COLOR;
DEBUG_MSG(KERN_DEBUG "viafb 2D engine: imageblit\n");
- if (shared->hw_bitblt(shared->engine_mmio, op,
+ if (shared->hw_bitblt(shared->vdev->engine_mmio, op,
image->width, image->height, info->var.bits_per_pixel,
viapar->vram_addr, info->fix.line_length, image->dx, image->dy,
(u32 *)image->data, 0, 0, 0, 0, fg_color, bg_color, 0))
@@ -765,7 +766,7 @@ static void viafb_imageblit(struct fb_info *info,
static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct viafb_par *viapar = info->par;
- void __iomem *engine = viapar->shared->engine_mmio;
+ void __iomem *engine = viapar->shared->vdev->engine_mmio;
u32 temp, xx, yy, bg_color = 0, fg_color = 0,
chip_name = viapar->shared->chip_info.gfx_chip_name;
int i, j = 0, cur_size = 64;
@@ -1018,8 +1019,8 @@ static void viafb_set_device(struct device_t active_dev)
viafb_SAMM_ON = active_dev.samm;
viafb_primary_dev = active_dev.primary_dev;
- viafb_set_primary_address(0);
- viafb_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
+ via_set_primary_address(0);
+ via_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
viafb_set_iga_path();
}
@@ -1165,8 +1166,9 @@ static int apply_device_setting(struct viafb_ioctl_setting setting_info,
if (viafb_SAMM_ON)
viafb_primary_dev = setting_info.primary_device;
- viafb_set_primary_address(0);
- viafb_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
+ via_set_primary_address(0);
+ via_set_secondary_address(viafb_SAMM_ON ?
+ viafb_second_offset : 0);
viafb_set_iga_path();
}
need_set_mode = 1;
@@ -1325,6 +1327,8 @@ static void parse_dvi_port(void)
output_interface);
}
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
+
/*
* The proc filesystem read/write function, a simple proc implement to
* get/set the value of DPA DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1,
@@ -1701,16 +1705,21 @@ static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
}
static void viafb_remove_proc(struct proc_dir_entry *viafb_entry)
{
- /* no problem if it was not registered */
+ struct chip_information *chip_info = &viaparinfo->shared->chip_info;
+
remove_proc_entry("dvp0", viafb_entry);/* parent dir */
remove_proc_entry("dvp1", viafb_entry);
remove_proc_entry("dfph", viafb_entry);
remove_proc_entry("dfpl", viafb_entry);
- remove_proc_entry("vt1636", viafb_entry);
- remove_proc_entry("vt1625", viafb_entry);
+ if (chip_info->lvds_chip_info.lvds_chip_name == VT1636_LVDS
+ || chip_info->lvds_chip_info2.lvds_chip_name == VT1636_LVDS)
+ remove_proc_entry("vt1636", viafb_entry);
+
remove_proc_entry("viafb", NULL);
}
+#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
+
static int parse_mode(const char *str, u32 *xres, u32 *yres)
{
char *ptr;
@@ -1732,12 +1741,13 @@ static int parse_mode(const char *str, u32 *xres, u32 *yres)
return 0;
}
-static int __devinit via_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+
+int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
{
u32 default_xres, default_yres;
struct VideoModeTable *vmode_entry;
struct fb_var_screeninfo default_var;
+ int rc;
u32 viafb_par_length;
DEBUG_MSG(KERN_INFO "VIAFB PCI Probe!!\n");
@@ -1749,14 +1759,15 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
*/
viafbinfo = framebuffer_alloc(viafb_par_length +
ALIGN(sizeof(struct viafb_shared), BITS_PER_LONG/8),
- &pdev->dev);
+ &vdev->pdev->dev);
if (!viafbinfo) {
printk(KERN_ERR"Could not allocate memory for viafb_info.\n");
- return -ENODEV;
+ return -ENOMEM;
}
viaparinfo = (struct viafb_par *)viafbinfo->par;
viaparinfo->shared = viafbinfo->par + viafb_par_length;
+ viaparinfo->shared->vdev = vdev;
viaparinfo->vram_addr = 0;
viaparinfo->tmds_setting_info = &viaparinfo->shared->tmds_setting_info;
viaparinfo->lvds_setting_info = &viaparinfo->shared->lvds_setting_info;
@@ -1774,23 +1785,20 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
if (!viafb_SAMM_ON)
viafb_dual_fb = 0;
- /* Set up I2C bus stuff */
- viafb_create_i2c_bus(viaparinfo);
-
- viafb_init_chip_info(pdev, ent);
- viaparinfo->fbmem = pci_resource_start(pdev, 0);
- viaparinfo->memsize = viafb_get_fb_size_from_pci();
+ viafb_init_chip_info(vdev->chip_type);
+ /*
+ * The framebuffer will have been successfully mapped by
+ * the core (or we'd not be here), but we still need to
+ * set up our own accounting.
+ */
+ viaparinfo->fbmem = vdev->fbmem_start;
+ viaparinfo->memsize = vdev->fbmem_len;
viaparinfo->fbmem_free = viaparinfo->memsize;
viaparinfo->fbmem_used = 0;
- viafbinfo->screen_base = ioremap_nocache(viaparinfo->fbmem,
- viaparinfo->memsize);
- if (!viafbinfo->screen_base) {
- printk(KERN_INFO "ioremap failed\n");
- return -ENOMEM;
- }
+ viafbinfo->screen_base = vdev->fbmem;
- viafbinfo->fix.mmio_start = pci_resource_start(pdev, 1);
- viafbinfo->fix.mmio_len = pci_resource_len(pdev, 1);
+ viafbinfo->fix.mmio_start = vdev->engine_start;
+ viafbinfo->fix.mmio_len = vdev->engine_len;
viafbinfo->node = 0;
viafbinfo->fbops = &viafb_ops;
viafbinfo->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
@@ -1858,12 +1866,13 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
viafbinfo->var = default_var;
if (viafb_dual_fb) {
- viafbinfo1 = framebuffer_alloc(viafb_par_length, &pdev->dev);
+ viafbinfo1 = framebuffer_alloc(viafb_par_length,
+ &vdev->pdev->dev);
if (!viafbinfo1) {
printk(KERN_ERR
"allocate the second framebuffer struct error\n");
- framebuffer_release(viafbinfo);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_fb_release;
}
viaparinfo1 = viafbinfo1->par;
memcpy(viaparinfo1, viaparinfo, viafb_par_length);
@@ -1914,48 +1923,66 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
viaparinfo->depth = fb_get_color_depth(&viafbinfo->var,
&viafbinfo->fix);
default_var.activate = FB_ACTIVATE_NOW;
- fb_alloc_cmap(&viafbinfo->cmap, 256, 0);
+ rc = fb_alloc_cmap(&viafbinfo->cmap, 256, 0);
+ if (rc)
+ goto out_fb1_release;
if (viafb_dual_fb && (viafb_primary_dev == LCD_Device)
&& (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)) {
- if (register_framebuffer(viafbinfo1) < 0)
- return -EINVAL;
+ rc = register_framebuffer(viafbinfo1);
+ if (rc)
+ goto out_dealloc_cmap;
}
- if (register_framebuffer(viafbinfo) < 0)
- return -EINVAL;
+ rc = register_framebuffer(viafbinfo);
+ if (rc)
+ goto out_fb1_unreg_lcd_cle266;
if (viafb_dual_fb && ((viafb_primary_dev != LCD_Device)
|| (viaparinfo->chip_info->gfx_chip_name !=
UNICHROME_CLE266))) {
- if (register_framebuffer(viafbinfo1) < 0)
- return -EINVAL;
+ rc = register_framebuffer(viafbinfo1);
+ if (rc)
+ goto out_fb_unreg;
}
DEBUG_MSG(KERN_INFO "fb%d: %s frame buffer device %dx%d-%dbpp\n",
viafbinfo->node, viafbinfo->fix.id, default_var.xres,
default_var.yres, default_var.bits_per_pixel);
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
viafb_init_proc(&viaparinfo->shared->proc_entry);
+#endif
viafb_init_dac(IGA2);
return 0;
+
+out_fb_unreg:
+ unregister_framebuffer(viafbinfo);
+out_fb1_unreg_lcd_cle266:
+ if (viafb_dual_fb && (viafb_primary_dev == LCD_Device)
+ && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266))
+ unregister_framebuffer(viafbinfo1);
+out_dealloc_cmap:
+ fb_dealloc_cmap(&viafbinfo->cmap);
+out_fb1_release:
+ if (viafbinfo1)
+ framebuffer_release(viafbinfo1);
+out_fb_release:
+ framebuffer_release(viafbinfo);
+ return rc;
}
-static void __devexit via_pci_remove(struct pci_dev *pdev)
+void __devexit via_fb_pci_remove(struct pci_dev *pdev)
{
DEBUG_MSG(KERN_INFO "via_pci_remove!\n");
fb_dealloc_cmap(&viafbinfo->cmap);
unregister_framebuffer(viafbinfo);
if (viafb_dual_fb)
unregister_framebuffer(viafbinfo1);
- iounmap((void *)viafbinfo->screen_base);
- iounmap(viaparinfo->shared->engine_mmio);
-
- viafb_delete_i2c_buss(viaparinfo);
-
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
+ viafb_remove_proc(viaparinfo->shared->proc_entry);
+#endif
framebuffer_release(viafbinfo);
if (viafb_dual_fb)
framebuffer_release(viafbinfo1);
-
- viafb_remove_proc(viaparinfo->shared->proc_entry);
}
#ifndef MODULE
@@ -2031,41 +2058,10 @@ static int __init viafb_setup(char *options)
}
#endif
-static struct pci_device_id viafb_pci_table[] __devinitdata = {
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
- .driver_data = UNICHROME_CLE266 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
- .driver_data = UNICHROME_PM800 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
- .driver_data = UNICHROME_K400 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
- .driver_data = UNICHROME_K800 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
- .driver_data = UNICHROME_CN700 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
- .driver_data = UNICHROME_K8M890 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
- .driver_data = UNICHROME_CX700 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
- .driver_data = UNICHROME_P4M900 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
- .driver_data = UNICHROME_CN750 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
- .driver_data = UNICHROME_VX800 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
- .driver_data = UNICHROME_VX855 },
- { }
-};
-MODULE_DEVICE_TABLE(pci, viafb_pci_table);
-
-static struct pci_driver viafb_driver = {
- .name = "viafb",
- .id_table = viafb_pci_table,
- .probe = via_pci_probe,
- .remove = __devexit_p(via_pci_remove),
-};
-
-static int __init viafb_init(void)
+/*
+ * These are called out of via-core for now.
+ */
+int __init viafb_init(void)
{
u32 dummy;
#ifndef MODULE
@@ -2084,13 +2080,12 @@ static int __init viafb_init(void)
printk(KERN_INFO
"VIA Graphics Intergration Chipset framebuffer %d.%d initializing\n",
VERSION_MAJOR, VERSION_MINOR);
- return pci_register_driver(&viafb_driver);
+ return 0;
}
-static void __exit viafb_exit(void)
+void __exit viafb_exit(void)
{
DEBUG_MSG(KERN_INFO "viafb_exit!\n");
- pci_unregister_driver(&viafb_driver);
}
static struct fb_ops viafb_ops = {
@@ -2110,8 +2105,6 @@ static struct fb_ops viafb_ops = {
.fb_sync = viafb_sync,
};
-module_init(viafb_init);
-module_exit(viafb_exit);
#ifdef MODULE
module_param(viafb_mode, charp, S_IRUSR);
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h
index 61b5953..52a35fa 100644
--- a/drivers/video/via/viafbdev.h
+++ b/drivers/video/via/viafbdev.h
@@ -24,12 +24,12 @@
#include <linux/proc_fs.h>
#include <linux/fb.h>
+#include <linux/spinlock.h>
#include "ioctl.h"
#include "share.h"
#include "chip.h"
#include "hw.h"
-#include "via_i2c.h"
#define VERSION_MAJOR 2
#define VERSION_KERNEL 6 /* For kernel 2.6 */
@@ -37,11 +37,11 @@
#define VERSION_OS 0 /* 0: for 32 bits OS, 1: for 64 bits OS */
#define VERSION_MINOR 4
+#define VIAFB_NUM_I2C 5
+
struct viafb_shared {
struct proc_dir_entry *proc_entry; /*viafb proc entry */
-
- /* I2C stuff */
- struct via_i2c_stuff i2c_stuff;
+ struct viafb_dev *vdev; /* Global dev info */
/* All the information will be needed to set engine */
struct tmds_setting_information tmds_setting_info;
@@ -51,7 +51,6 @@ struct viafb_shared {
struct chip_information chip_info;
/* hardware acceleration stuff */
- void __iomem *engine_mmio;
u32 cursor_vram_addr;
u32 vq_vram_addr; /* virtual queue address in video ram */
int (*hw_bitblt)(void __iomem *engine, u8 op, u32 width, u32 height,
@@ -99,4 +98,9 @@ u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
void viafb_gpio_i2c_write_mask_lvds(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information
*plvds_chip_info, struct IODATA io_data);
+int via_fb_pci_probe(struct viafb_dev *vdev);
+void via_fb_pci_remove(struct pci_dev *pdev);
+/* Temporary */
+int viafb_init(void);
+void viafb_exit(void);
#endif /* __VIAFBDEV_H__ */
diff --git a/drivers/video/via/viamode.c b/drivers/video/via/viamode.c
index af50e24..2dbad3c 100644
--- a/drivers/video/via/viamode.c
+++ b/drivers/video/via/viamode.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
struct res_map_refresh res_map_refresh_tbl[] = {
/*hres, vres, vclock, vmode_refresh*/
@@ -66,6 +67,7 @@ struct res_map_refresh res_map_refresh_tbl[] = {
{1088, 612, RES_1088X612_60HZ_PIXCLOCK, 60},
{1152, 720, RES_1152X720_60HZ_PIXCLOCK, 60},
{1200, 720, RES_1200X720_60HZ_PIXCLOCK, 60},
+ {1200, 900, RES_1200X900_60HZ_PIXCLOCK, 60},
{1280, 600, RES_1280X600_60HZ_PIXCLOCK, 60},
{1280, 720, RES_1280X720_50HZ_PIXCLOCK, 50},
{1280, 768, RES_1280X768_50HZ_PIXCLOCK, 50},
@@ -759,6 +761,16 @@ struct crt_mode_table CRTM1200x720[] = {
{1568, 1200, 1200, 368, 1256, 128, 746, 720, 720, 26, 721, 3} }
};
+/* 1200x900 (DCON) */
+struct crt_mode_table DCON1200x900[] = {
+ /* r_rate, vclk, hsp, vsp */
+ {REFRESH_60, CLK_57_275M, M1200X900_R60_HSP, M1200X900_R60_VSP,
+ /* The correct htotal is 1240, but this doesn't raster on VX855. */
+ /* Via suggested changing to a multiple of 16, hence 1264. */
+ /* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
+ {1264, 1200, 1200, 64, 1211, 32, 912, 900, 900, 12, 901, 10} }
+};
+
/* 1280x600 (GTF) */
struct crt_mode_table CRTM1280x600[] = {
/* r_rate, vclk, hsp, vsp */
@@ -937,6 +949,9 @@ struct VideoModeTable viafb_modes[] = {
/* Display : 1200x720 (GTF) */
{CRTM1200x720, ARRAY_SIZE(CRTM1200x720)},
+ /* Display : 1200x900 (DCON) */
+ {DCON1200x900, ARRAY_SIZE(DCON1200x900)},
+
/* Display : 1280x600 (GTF) */
{CRTM1280x600, ARRAY_SIZE(CRTM1280x600)},
diff --git a/drivers/video/via/vt1636.c b/drivers/video/via/vt1636.c
index a6b3749..d65bf1a 100644
--- a/drivers/video/via/vt1636.c
+++ b/drivers/video/via/vt1636.c
@@ -19,6 +19,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
#include "global.h"
u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
@@ -27,9 +29,8 @@ u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
{
u8 data;
- viaparinfo->shared->i2c_stuff.i2c_port = plvds_chip_info->i2c_port;
- viafb_i2c_readbyte(plvds_chip_info->lvds_chip_slave_addr, index, &data);
-
+ viafb_i2c_readbyte(plvds_chip_info->i2c_port,
+ plvds_chip_info->lvds_chip_slave_addr, index, &data);
return data;
}
@@ -39,14 +40,13 @@ void viafb_gpio_i2c_write_mask_lvds(struct lvds_setting_information
{
int index, data;
- viaparinfo->shared->i2c_stuff.i2c_port = plvds_chip_info->i2c_port;
-
index = io_data.Index;
data = viafb_gpio_i2c_read_lvds(plvds_setting_info, plvds_chip_info,
index);
data = (data & (~io_data.Mask)) | io_data.Data;
- viafb_i2c_writebyte(plvds_chip_info->lvds_chip_slave_addr, index, data);
+ viafb_i2c_writebyte(plvds_chip_info->i2c_port,
+ plvds_chip_info->lvds_chip_slave_addr, index, data);
}
void viafb_init_lvds_vt1636(struct lvds_setting_information
@@ -159,7 +159,7 @@ void viafb_disable_lvds_vt1636(struct lvds_setting_information
}
}
-bool viafb_lvds_identify_vt1636(void)
+bool viafb_lvds_identify_vt1636(u8 i2c_adapter)
{
u8 Buffer[2];
@@ -167,26 +167,20 @@ bool viafb_lvds_identify_vt1636(void)
/* Sense VT1636 LVDS Transmiter */
viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr =
- VT1636_LVDS_I2C_ADDR;
+ VT1636_LVDS_I2C_ADDR;
/* Check vendor ID first: */
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x00, &Buffer[0]);
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x01, &Buffer[1]);
+ if (viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR,
+ 0x00, &Buffer[0]))
+ return false;
+ viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x01, &Buffer[1]);
if (!((Buffer[0] == 0x06) && (Buffer[1] == 0x11)))
return false;
/* Check Chip ID: */
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x02, &Buffer[0]);
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x03, &Buffer[1]);
+ viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x02, &Buffer[0]);
+ viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x03, &Buffer[1]);
if ((Buffer[0] == 0x45) && (Buffer[1] == 0x33)) {
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name =
VT1636_LVDS;
diff --git a/drivers/video/via/vt1636.h b/drivers/video/via/vt1636.h
index 2a150c5..4c1314e 100644
--- a/drivers/video/via/vt1636.h
+++ b/drivers/video/via/vt1636.h
@@ -22,7 +22,7 @@
#ifndef _VT1636_H_
#define _VT1636_H_
#include "chip.h"
-bool viafb_lvds_identify_vt1636(void);
+bool viafb_lvds_identify_vt1636(u8 i2c_adapter);
void viafb_init_lvds_vt1636(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information *plvds_chip_info);
void viafb_enable_lvds_vt1636(struct lvds_setting_information
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index 2c6c0cf..84e2410 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -96,7 +96,7 @@ static int w1_f2d_readblock(struct w1_slave *sl, int off, int count, char *buf)
return -1;
}
-static ssize_t w1_f2d_read_bin(struct kobject *kobj,
+static ssize_t w1_f2d_read_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -202,7 +202,7 @@ retry:
return 0;
}
-static ssize_t w1_f2d_write_bin(struct kobject *kobj,
+static ssize_t w1_f2d_write_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index d2bf321..0f7b8f9 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -92,7 +92,7 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
}
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
-static ssize_t w1_f23_read_bin(struct kobject *kobj,
+static ssize_t w1_f23_read_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -206,7 +206,7 @@ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
return 0;
}
-static ssize_t w1_f23_write_bin(struct kobject *kobj,
+static ssize_t w1_f23_write_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index 6e15334..483d451 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -97,7 +97,7 @@ int w1_ds2760_recall_eeprom(struct device *dev, int addr)
return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
}
-static ssize_t w1_ds2760_read_bin(struct kobject *kobj,
+static ssize_t w1_ds2760_read_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index ad5897d..2839e28 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -120,7 +120,7 @@ static struct device_attribute w1_slave_attr_id =
/* Default family */
-static ssize_t w1_default_write(struct kobject *kobj,
+static ssize_t w1_default_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -139,7 +139,7 @@ out_up:
return count;
}
-static ssize_t w1_default_read(struct kobject *kobj,
+static ssize_t w1_default_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 8e4eacc..748a74b 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -600,8 +600,8 @@ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
{
usb_free_urb(usb_pcwd->intr_urb);
if (usb_pcwd->intr_buffer != NULL)
- usb_buffer_free(usb_pcwd->udev, usb_pcwd->intr_size,
- usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
+ usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size,
+ usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
kfree(usb_pcwd);
}
@@ -671,7 +671,7 @@ static int usb_pcwd_probe(struct usb_interface *interface,
le16_to_cpu(endpoint->wMaxPacketSize) : 8);
/* set up the memory buffer's */
- usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size,
+ usb_pcwd->intr_buffer = usb_alloc_coherent(udev, usb_pcwd->intr_size,
GFP_ATOMIC, &usb_pcwd->intr_dma);
if (!usb_pcwd->intr_buffer) {
printk(KERN_ERR PFX "Out of memory\n");
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index eb924e0..26f7184 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -49,7 +49,7 @@ static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *
static DEVICE_ATTR(resource, S_IRUGO, zorro_show_resource, NULL);
-static ssize_t zorro_read_config(struct kobject *kobj,
+static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{